code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule AWS.SMS do
@moduledoc """
AWS Server Migration Service
AWS Server Migration Service (AWS SMS) makes it easier and faster for you to
migrate your on-premises workloads to AWS.
To learn more about AWS SMS, see the following resources:
* [AWS Server Migration Service product page](http://aws.amazon.com/server-migration-service/)
* [AWS Server Migration Service User Guide](https://docs.aws.amazon.com/server-migration-service/latest/userguide/)
"""
@doc """
Creates an application.
An application consists of one or more server groups. Each server group contain
one or more servers.
"""
def create_app(client, input, options \\ []) do
request(client, "CreateApp", input, options)
end
@doc """
Creates a replication job.
The replication job schedules periodic replication runs to replicate your server
to AWS. Each replication run creates an Amazon Machine Image (AMI).
"""
def create_replication_job(client, input, options \\ []) do
request(client, "CreateReplicationJob", input, options)
end
@doc """
Deletes the specified application.
Optionally deletes the launched stack associated with the application and all
AWS SMS replication jobs for servers in the application.
"""
def delete_app(client, input, options \\ []) do
request(client, "DeleteApp", input, options)
end
@doc """
Deletes the launch configuration for the specified application.
"""
def delete_app_launch_configuration(client, input, options \\ []) do
request(client, "DeleteAppLaunchConfiguration", input, options)
end
@doc """
Deletes the replication configuration for the specified application.
"""
def delete_app_replication_configuration(client, input, options \\ []) do
request(client, "DeleteAppReplicationConfiguration", input, options)
end
@doc """
Deletes the validation configuration for the specified application.
"""
def delete_app_validation_configuration(client, input, options \\ []) do
request(client, "DeleteAppValidationConfiguration", input, options)
end
@doc """
Deletes the specified replication job.
After you delete a replication job, there are no further replication runs. AWS
deletes the contents of the Amazon S3 bucket used to store AWS SMS artifacts.
The AMIs created by the replication runs are not deleted.
"""
def delete_replication_job(client, input, options \\ []) do
request(client, "DeleteReplicationJob", input, options)
end
@doc """
Deletes all servers from your server catalog.
"""
def delete_server_catalog(client, input, options \\ []) do
request(client, "DeleteServerCatalog", input, options)
end
@doc """
Disassociates the specified connector from AWS SMS.
After you disassociate a connector, it is no longer available to support
replication jobs.
"""
def disassociate_connector(client, input, options \\ []) do
request(client, "DisassociateConnector", input, options)
end
@doc """
Generates a target change set for a currently launched stack and writes it to an
Amazon S3 object in the customer’s Amazon S3 bucket.
"""
def generate_change_set(client, input, options \\ []) do
request(client, "GenerateChangeSet", input, options)
end
@doc """
Generates an AWS CloudFormation template based on the current launch
configuration and writes it to an Amazon S3 object in the customer’s Amazon S3
bucket.
"""
def generate_template(client, input, options \\ []) do
request(client, "GenerateTemplate", input, options)
end
@doc """
Retrieve information about the specified application.
"""
def get_app(client, input, options \\ []) do
request(client, "GetApp", input, options)
end
@doc """
Retrieves the application launch configuration associated with the specified
application.
"""
def get_app_launch_configuration(client, input, options \\ []) do
request(client, "GetAppLaunchConfiguration", input, options)
end
@doc """
Retrieves the application replication configuration associated with the
specified application.
"""
def get_app_replication_configuration(client, input, options \\ []) do
request(client, "GetAppReplicationConfiguration", input, options)
end
@doc """
Retrieves information about a configuration for validating an application.
"""
def get_app_validation_configuration(client, input, options \\ []) do
request(client, "GetAppValidationConfiguration", input, options)
end
@doc """
Retrieves output from validating an application.
"""
def get_app_validation_output(client, input, options \\ []) do
request(client, "GetAppValidationOutput", input, options)
end
@doc """
Describes the connectors registered with the AWS SMS.
"""
def get_connectors(client, input, options \\ []) do
request(client, "GetConnectors", input, options)
end
@doc """
Describes the specified replication job or all of your replication jobs.
"""
def get_replication_jobs(client, input, options \\ []) do
request(client, "GetReplicationJobs", input, options)
end
@doc """
Describes the replication runs for the specified replication job.
"""
def get_replication_runs(client, input, options \\ []) do
request(client, "GetReplicationRuns", input, options)
end
@doc """
Describes the servers in your server catalog.
Before you can describe your servers, you must import them using
`ImportServerCatalog`.
"""
def get_servers(client, input, options \\ []) do
request(client, "GetServers", input, options)
end
@doc """
Allows application import from AWS Migration Hub.
"""
def import_app_catalog(client, input, options \\ []) do
request(client, "ImportAppCatalog", input, options)
end
@doc """
Gathers a complete list of on-premises servers.
Connectors must be installed and monitoring all servers to import.
This call returns immediately, but might take additional time to retrieve all
the servers.
"""
def import_server_catalog(client, input, options \\ []) do
request(client, "ImportServerCatalog", input, options)
end
@doc """
Launches the specified application as a stack in AWS CloudFormation.
"""
def launch_app(client, input, options \\ []) do
request(client, "LaunchApp", input, options)
end
@doc """
Retrieves summaries for all applications.
"""
def list_apps(client, input, options \\ []) do
request(client, "ListApps", input, options)
end
@doc """
Provides information to AWS SMS about whether application validation is
successful.
"""
def notify_app_validation_output(client, input, options \\ []) do
request(client, "NotifyAppValidationOutput", input, options)
end
@doc """
Creates or updates the launch configuration for the specified application.
"""
def put_app_launch_configuration(client, input, options \\ []) do
request(client, "PutAppLaunchConfiguration", input, options)
end
@doc """
Creates or updates the replication configuration for the specified application.
"""
def put_app_replication_configuration(client, input, options \\ []) do
request(client, "PutAppReplicationConfiguration", input, options)
end
@doc """
Creates or updates a validation configuration for the specified application.
"""
def put_app_validation_configuration(client, input, options \\ []) do
request(client, "PutAppValidationConfiguration", input, options)
end
@doc """
Starts replicating the specified application by creating replication jobs for
each server in the application.
"""
def start_app_replication(client, input, options \\ []) do
request(client, "StartAppReplication", input, options)
end
@doc """
Starts an on-demand replication run for the specified application.
"""
def start_on_demand_app_replication(client, input, options \\ []) do
request(client, "StartOnDemandAppReplication", input, options)
end
@doc """
Starts an on-demand replication run for the specified replication job.
This replication run starts immediately. This replication run is in addition to
the ones already scheduled.
There is a limit on the number of on-demand replications runs that you can
request in a 24-hour period.
"""
def start_on_demand_replication_run(client, input, options \\ []) do
request(client, "StartOnDemandReplicationRun", input, options)
end
@doc """
Stops replicating the specified application by deleting the replication job for
each server in the application.
"""
def stop_app_replication(client, input, options \\ []) do
request(client, "StopAppReplication", input, options)
end
@doc """
Terminates the stack for the specified application.
"""
def terminate_app(client, input, options \\ []) do
request(client, "TerminateApp", input, options)
end
@doc """
Updates the specified application.
"""
def update_app(client, input, options \\ []) do
request(client, "UpdateApp", input, options)
end
@doc """
Updates the specified settings for the specified replication job.
"""
def update_replication_job(client, input, options \\ []) do
request(client, "UpdateReplicationJob", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "sms"}
host = build_host("sms", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSServerMigrationService_V2016_10_24.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/sms.ex
| 0.841826 | 0.452052 |
sms.ex
|
starcoder
|
defmodule Conform.Schema do
@moduledoc """
A schema is a keyword list which represents how to map, transform, and validate
configuration values parsed from the .conf file. The following is an explanation of
each key in the schema definition in order of appearance, and how to use them.
## Import
A list of application names (as atoms), which represent apps to load modules from
which you can then reference in your schema definition. This is how you import your
own custom Validator/Transform modules, or general utility modules for use in
validator/transform functions in the schema. For example, if you have an application
`:foo` which contains a custom Transform module, you would add it to your schema like so:
`[ import: [:foo], ..., transforms: ["myapp.some.setting": MyApp.SomeTransform]]`
## Extends
A list of application names (as atoms), which contain schemas that you want to extend
with this schema. By extending a schema, you effectively re-use definitions in the
extended schema. You may also override definitions from the extended schema by redefining them
in the extending schema. You use `:extends` like so:
`[ extends: [:foo], ... ]`
## Mappings
Mappings define how to interpret settings in the .conf when they are translated to
runtime configuration. They also define how the .conf will be generated, things like
documention, @see references, example values, etc.
See the moduledoc for `Conform.Schema.Mapping` for more details.
## Transforms
Transforms are custom functions which are executed to build the value which will be
stored at the path defined by the key. Transforms have access to the current config
state via the `Conform.Conf` module, and can use that to build complex configuration
from a combination of other config values.
See the moduledoc for `Conform.Schema.Transform` for more details and examples.
## Validators
Validators are simple functions which take two arguments, the value to be validated,
and arguments provided to the validator (used only by custom validators). A validator
checks the value, and returns `:ok` if it is valid, `{:warn, message}` if it is valid,
but should be brought to the users attention, or `{:error, message}` if it is invalid.
See the moduledoc for `Conform.Schema.Validator` for more details and examples.
"""
alias __MODULE__
@type schema :: __MODULE__
defstruct import: [],
extends: [],
mappings: [],
transforms: [],
validators: []
defmodule SchemaError do
@moduledoc """
This exception reflects an issue with the schema
"""
defexception message: "Invalid schema. Should be a keyword list with at least the :mappings key defined"
end
@doc """
Get the current app's schema path
"""
@spec schema_path() :: binary
def schema_path(), do: Mix.Project.config |> Keyword.get(:app) |> schema_path
def schema_path(app), do: Path.join([File.cwd!, "config", schema_filename(app)])
@doc """
get the current app's schema filename
"""
def schema_filename(app), do: "#{app}.schema.exs"
@doc """
Parses the schema at the provided path as quoted terms.
Returns {:ok, quoted} | {:error, {line, error, details}}
"""
@spec parse(String.t) :: {:ok, term} | {:error, {integer, binary, binary}}
def parse(binary) when is_binary(binary) do
case Code.string_to_quoted(binary) do
{:ok, {:__block__, _, [_, quoted]}} -> {:ok, quoted}
{:ok, quoted} -> {:ok, quoted}
{:error, _} = err -> err
end
end
@doc """
Parses the schema at the provided path as quoted terms.
Returns the quoted terms or raises SchemaError on failure.
"""
@spec parse!(String.t) :: term | no_return
def parse!(binary) when is_binary(binary) do
case parse(binary) do
{:ok, quoted} -> quoted
{:error, {line, error, details}} ->
raise SchemaError, message: "Invalid schema at line #{line}: #{error}#{details}."
end
end
@doc """
Load a schema from the provided path. Throws on error.
Used for schema evaluation only.
"""
@spec load!(binary | atom) :: schema
def load!(path) when is_binary(path) do
if File.exists?(path) do
path |> File.read! |> parse! |> from(path)
else
raise SchemaError, message: "Schema at #{path} doesn't exist!"
end
end
def load!(name) when is_atom(name), do: name |> schema_path |> load!
@doc """
Loads a schema from the provided path.
Returns {:ok, schema} | {:error, message}
"""
@spec load(binary | atom) :: {:ok, schema} | {:error, term}
def load(path) do
try do
{:ok, load!(path)}
rescue
err in SchemaError -> {:error, err.message}
end
end
# Ignore the documentation block if one is present
defp from({:__block__, _, [_, quoted]}, path), do: from(quoted, path)
defp from(quoted, path) do
# Load imports from archive if present
archive_path = String.replace(path, ".exs", ".ez")
load_archive(archive_path)
# Build schema
schema = %Schema{}
# Get and validate imports
schema = case Keyword.get(quoted, :import) do
nil -> schema
imports when is_list(imports) ->
imports = Enum.map(imports, fn i ->
case valid_import?(i) do
true -> i
false ->
Conform.Utils.warn "Schema imports #{i}, but #{i} could not be loaded."
nil
end
end) |> Enum.filter(fn nil -> false; _ -> true end)
%{schema | :import => imports}
end
# Get and validate mappings
schema = case Keyword.get(quoted, :mappings) do
nil -> raise SchemaError, message: "Schema must contain at least one mapping!"
mappings when is_list(mappings) ->
%{schema | :mappings => Enum.map(mappings, &Conform.Schema.Mapping.from_quoted/1)}
end
# Get and validate transforms
schema = case Keyword.get(quoted, :transforms) do
nil -> schema
transforms when is_list(transforms) ->
user_defined = Enum.map(transforms, &Conform.Schema.Transform.from_quoted/1)
%{schema | :transforms => user_defined}
end
# Get and validate validators
global_validators = Conform.Schema.Validator.load
schema = case Keyword.get(quoted, :validators) do
nil -> %{schema | :validators => global_validators}
validators when is_list(validators) ->
user_defined = Enum.map(validators, &Conform.Schema.Validator.from_quoted/1)
%{schema | :validators => user_defined ++ global_validators}
end
# Determine if we are extending any schemas in
# dependencies of this application. `extends` should be a list of application names
# as atoms. Given an application, we will fetch it's schema, load it, and merge it
# on to our base schema. Definitions in this schema will then override those which are
# present in the schemas being extended.
case Keyword.get(quoted, :extends) do
nil -> schema
extends when is_list(extends) ->
# Load schemas
schemas = Enum.map(extends, fn
e when is_atom(e) ->
case get_extends_schema(e, path) do
nil ->
Conform.Utils.warn "Schema extends #{e}, but the schema for #{e} was not found."
nil
{schema_path, contents} ->
contents |> parse! |> from(schema_path)
end
e ->
Conform.Utils.warn "Invalid extends value: #{e}. Only application names as atoms are permitted."
nil
end) |> Enum.filter(fn nil -> false; _ -> true end)
# Merge them onto the base schema in the order provided
Enum.reduce(schemas, schema, fn s, acc ->
s = Map.drop(s, [:extends])
Map.merge(acc, s, fn
_, [], [] ->
[]
_, v1, v2 ->
cond do
Keyword.keyword?(v1) && Keyword.keyword?(v2) ->
Keyword.merge(v1, v2) |> Enum.map(fn _, v -> put_in(v, [:persist], false) end)
is_list(v1) && is_list(v2) ->
v1 |> Enum.concat(v2) |> Enum.uniq
true ->
v2
end
end)
end)
end
end
@doc """
Load the schemas for all dependencies of the current project,
and merge them into a single schema. Schemas are returned in
their quoted form.
"""
@spec coalesce() :: schema
def coalesce do
# Get schemas from all dependencies
proj_config = [build_path: Mix.Project.build_path, umbrella?: Mix.Project.umbrella?]
# Merge schemas for all deps
Mix.Dep.loaded([])
|> Enum.map(fn %Mix.Dep{app: app, opts: opts} ->
Mix.Project.in_project(app, opts[:dest], proj_config, fn _ -> load!(app) end)
end)
|> coalesce
end
@doc """
Given a collection of schemas, merge them into a single schema
"""
@spec coalesce([schema]) :: schema
def coalesce(schemas) do
Enum.reduce(schemas, empty, &merge/2)
end
@doc """
Merges two schemas. Conflicts are resolved by taking the value from `y`.
Expects the schema to be provided in it's quoted form.
"""
@spec merge(schema, schema) :: schema
def merge(%Schema{} = x, %Schema{} = y) do
Dict.merge(x, y, fn _, v1, v2 ->
case Keyword.keyword?(v1) && Keyword.keyword?(v2) do
true -> Keyword.merge(v1, v2)
false -> v1 |> Enum.concat(v2) |> Enum.uniq
end
end)
end
@doc """
Saves a schema to the provided path
"""
@spec write(schema, binary) :: :ok | {:error, term}
def write(schema, path) do
File.write!(path, stringify(schema))
end
@doc """
Converts a schema in it's quoted form and writes it to
the provided path
"""
@spec write_quoted(schema, binary) :: :ok | {:error, term}
def write_quoted(schema, path) do
File.write!(path, stringify(schema))
end
@doc """
Converts a schema to a prettified string. Expects the schema
to be in it's quoted form.
"""
@spec stringify([term]) :: binary
def stringify(schema, with_moduledoc \\ true) do
string = if schema == Conform.Schema.empty do
schema
|> to_list
|> Inspect.Algebra.to_doc(%Inspect.Opts{pretty: true})
|> Inspect.Algebra.format(10)
|> Enum.join
else
schema
|> to_list
|> Conform.Utils.Code.stringify
end
case with_moduledoc do
true ->
"@moduledoc \"\"\"\n" <> @moduledoc <> "\"\"\"\n" <> string
false ->
string
end
end
defp to_list(%Schema{} = schema) do
schema |> Map.to_list |> Keyword.delete(:__struct__) |> Enum.map(&to_list/1)
end
defp to_list({k, v}) when is_list(v) do
{k, Enum.map(v, &to_list/1)}
end
defp to_list(%Conform.Schema.Validator{name: nil, definition: nil, validator: v}), do: v
defp to_list(%Conform.Schema.Validator{name: name, definition: v}), do: {String.to_atom(name), v}
defp to_list(%Conform.Schema.Transform{path: path, definition: nil, transform: t}), do: {String.to_atom(path), t}
defp to_list(%Conform.Schema.Transform{path: path, definition: t}), do: {String.to_atom(path), t}
defp to_list(%Conform.Schema.Mapping{name: name} = mapping) do
props = mapping
|> Map.to_list
|> Keyword.delete(:__struct__)
|> Keyword.delete(:name)
|> Keyword.delete(:persist)
|> Enum.filter(fn
{_, ignore} when ignore in [nil, "", []] -> false
_ -> true
end)
{String.to_atom(name), props}
end
defp to_list(v) when is_map(v) do
v |> Map.to_list |> Keyword.delete(:__struct__)
end
@doc """
Convert standard configuration to quoted schema format
"""
@spec from_config([] | [{atom, term}]) :: [{atom, term}]
def from_config([]), do: empty
def from_config(config) when is_list(config) do
to_schema(config)
end
def empty, do: %Schema{}
defp to_schema([]), do: %Schema{}
defp to_schema(config), do: to_schema(config, %Schema{})
defp to_schema([], schema), do: schema
defp to_schema([{app, settings} | config], schema) do
mappings = Enum.map(settings, fn {k, v} -> to_mapping("#{app}", k, v) end) |> List.flatten
to_schema(config, %{schema | :mappings => schema.mappings ++ mappings})
end
defp to_mapping(key, setting, value) do
case Keyword.keyword?(value) do
true ->
for {k, v} <- value, into: [] do
to_mapping("#{key}.#{setting}", k, v)
end
false ->
datatype = extract_datatype(value)
setting_name = "#{key}.#{setting}"
Conform.Schema.Mapping.from_quoted({:"#{setting_name}", [
doc: "Provide documentation for #{setting_name} here.",
to: setting_name,
datatype: datatype,
default: convert_to_datatype(datatype, value)
]})
end
end
defp extract_datatype(v) when is_atom(v), do: :atom
defp extract_datatype(v) when is_binary(v), do: :binary
defp extract_datatype(v) when is_boolean(v), do: :boolean
defp extract_datatype(v) when is_integer(v), do: :integer
defp extract_datatype(v) when is_float(v), do: :float
# First check if the list value type is a charlist, otherwise
# assume a list of whatever the first element value type is
defp extract_datatype([h|_]=v) when is_list(v) and h != [] do
case :io_lib.char_list(v) do
true -> :charlist
false ->
list_type = extract_datatype(h)
[list: list_type]
end
end
defp extract_datatype({_, v}), do: {:atom, extract_datatype(v)}
defp extract_datatype(_), do: :binary
defp convert_to_datatype(:binary, v) when is_binary(v), do: v
defp convert_to_datatype(:binary, v) when not is_binary(v), do: nil
defp convert_to_datatype(_, v), do: v
defp valid_import?(i) when is_atom(i) do
case :code.lib_dir(i) do
{:error, _} -> false
path when is_list(path) -> true
end
end
defp valid_import?(_), do: false
defp get_extends_schema(app_name, src_schema_path) do
# Attempt loading from deps if Mix is available
schema_path = try do
paths = Mix.Dep.children
|> Enum.filter(fn %Mix.Dep{app: app} -> app == app_name end)
|> Enum.map(fn %Mix.Dep{opts: opts} ->
Keyword.get(opts, :dest, Keyword.get(opts, :path))
end)
|> Enum.filter(fn nil -> false; _ -> true end)
case paths do
[] -> nil
[app_path] -> Path.join([app_path, "config", "#{app_name}.schema.exs"])
end
catch
_,_ -> nil
rescue
_ -> nil
end
# Next try locating by application
schema_path = case schema_path do
nil ->
case :code.lib_dir(app_name) do
{:error, _} -> nil
path when is_list(path) ->
path = List.to_string(path)
case File.exists?(path <> ".ez") do
true -> Path.join([path <> ".ez", "#{app_name}", "config", "#{app_name}.schema.exs"])
false -> Path.join([path, "config", "#{app_name}.schema.exs"])
end
end
path when is_binary(path) ->
path
end
schema_path = case schema_path == nil || File.exists?(schema_path) == false do
true ->
# If that fails, try loading from archive, if present
archive_path = String.replace(src_schema_path, ".exs", ".ez")
case File.exists?(archive_path) do
false -> nil
true ->
case :erl_prim_loader.list_dir('#{archive_path}') do
:error -> nil
{:ok, apps} ->
case '#{app_name}' in apps do
true -> Path.join([archive_path, "#{app_name}", "config", "#{app_name}.schema.exs"])
false -> nil
end
end
end
_ -> schema_path
end
case schema_path do
nil -> nil
schema_path when is_binary(schema_path) ->
case File.exists?(schema_path) do
true -> {schema_path, File.read!(schema_path)}
false ->
case :erl_prim_loader.get_file('#{schema_path}') do
:error -> nil
{:ok, contents, _} -> {schema_path, contents}
end
end
end
end
defp load_archive(archive_path) do
case File.exists?(archive_path) do
true ->
{:ok, [_ | zip_files]} = :zip.list_dir('#{archive_path}')
apps = Enum.map(zip_files, fn {:zip_file, path, _, _, _, _} ->
path = to_string(path)
case :filename.extension(path) == ".app" do
true -> Path.dirname(path)
false -> []
end
end) |> List.flatten
Enum.each(apps, fn(app) ->
path = Path.join(archive_path, app) |> Path.expand
Code.prepend_path(path)
end)
false ->
:ok
end
end
end
|
lib/conform/schema.ex
| 0.921203 | 0.651258 |
schema.ex
|
starcoder
|
defmodule ApiWeb.ScheduleController do
@moduledoc """
Controller for Schedules. Filterable by:
* stop
* route
* direction ID
* service date
* trip
* stop sequence
"""
use ApiWeb.Web, :api_controller
alias State.Schedule
plug(ApiWeb.Plugs.ValidateDate)
plug(:date)
@filters ~w(date direction_id max_time min_time route stop stop_sequence route_type trip)s
@pagination_opts ~w(offset limit order_by)a
@includes ~w(stop trip prediction route)
def state_module, do: State.Schedule
def show_data(_conn, _params), do: []
swagger_path :index do
get(path(__MODULE__, :index))
description("""
**NOTE:** `filter[route]`, `filter[stop]`, or `filter[trip]` **MUST** be present for any schedules to be returned.
List of schedules. To get a realtime prediction instead of the scheduled times, use `/predictions`.
#{swagger_path_description("/data/{index}")}
## When a vehicle is scheduled to be at a stop
`/schedules?filter[stop]=STOP_ID`
## The schedule for one route
`/schedules?filter[route]=ROUTE_ID`
### When a route is open
Query for the `first` and `last` stops on the route.
`/schedules?filter[route]=ROUTE_ID&filter[stop_sequence]=first,last`
## The schedule for a whole trip
`/schedule?filter[trip]=TRIP_ID`
""")
common_index_parameters(__MODULE__, :schedule, :include_time)
include_parameters(@includes)
filter_param(:date, description: "Filter schedule by date that they are active.")
filter_param(:direction_id)
filter_param(:route_type, desc: "Must be used in conjunction with another filter.")
filter_param(
:time,
name: :min_time,
description:
"Time before which schedule should not be returned. To filter times after midnight use more than 24 hours. For example, min_time=24:00 will return schedule information for the next calendar day, since that service is considered part of the current service day. Additionally, min_time=00:00&max_time=02:00 will not return anything."
)
filter_param(
:time,
name: :max_time,
description:
"Time after which schedule should not be returned. To filter times after midnight use more than 24 hours. For example, min_time=24:00 will return schedule information for the next calendar day, since that service is considered part of the current service day. Additionally, min_time=00:00&max_time=02:00 will not return anything."
)
filter_param(:id, name: :route)
filter_param(:id, name: :stop)
filter_param(:id, name: :trip)
parameter(:"filter[stop_sequence]", :query, :string, """
Filter by the index of the stop in the trip. Symbolic values `first` and `last` can be used instead of \
numeric sequence number too.
""")
consumes("application/vnd.api+json")
produces("application/vnd.api+json")
response(200, "OK", Schema.ref(:Schedules))
response(400, "Bad Request", Schema.ref(:BadRequest))
response(403, "Forbidden", Schema.ref(:Forbidden))
response(429, "Too Many Requests", Schema.ref(:TooManyRequests))
end
def index_data(conn, params) do
with {:ok, filtered} <- Params.filter_params(params, @filters, conn),
{:ok, _includes} <- Params.validate_includes(params, @includes, conn) do
# must include one additional filter besides `route_type` and `date`, is automatically included
case format_filters(filtered, conn) do
%{route_type: _} = filters when map_size(filters) == 2 ->
{:error, :only_route_type}
filters when map_size(filters) > 1 ->
filters
|> Schedule.filter_by()
|> Schedule.filter_by_route_type(Map.get(filters, :route_type))
|> populate_extra_times(conn)
|> State.all(Params.filter_opts(params, @pagination_opts, conn))
_ ->
{:error, :filter_required}
end
else
{:error, _, _} = error -> error
end
end
def populate_extra_times(map, %{assigns: %{api_version: ver}}) when ver < "2019-07-01" do
for s <- map do
s = if s.pickup_type == 1, do: %Model.Schedule{s | departure_time: s.arrival_time}, else: s
if s.drop_off_type == 1, do: %Model.Schedule{s | arrival_time: s.departure_time}, else: s
end
end
def populate_extra_times(map, _), do: map
# Formats the filters we care about into map with parsed values
@spec format_filters(map, Plug.Conn.t()) :: map
defp format_filters(filters, conn) do
filters
|> Stream.flat_map(&do_format_filter/1)
|> Enum.into(%{})
|> expand_stops_filter(:stops, conn.assigns.api_version)
|> Map.put_new_lazy(:date, &Parse.Time.service_date/0)
end
# Parse the keys we care about
@spec do_format_filter({String.t(), String.t()}) :: %{atom: any} | []
defp do_format_filter({key, string}) when key in ["trip", "route"] do
case Params.split_on_comma(string) do
[] ->
[]
ids ->
%{String.to_existing_atom("#{key}s") => ids}
end
end
defp do_format_filter({"stop", string}) do
case Params.split_on_comma(string) do
[] -> []
ids -> %{stops: ids}
end
end
defp do_format_filter({"direction_id", direction_id}) do
case Params.direction_id(%{"direction_id" => direction_id}) do
nil ->
[]
parsed_direction_id ->
%{direction_id: parsed_direction_id}
end
end
defp do_format_filter({"date", date}) do
case Date.from_iso8601(date) do
{:ok, date} ->
%{date: date}
_ ->
[]
end
end
defp do_format_filter({"stop_sequence", stop_sequence_str}) do
case Params.split_on_comma(stop_sequence_str) do
[] ->
[]
stop_sequence ->
formatted_stop_sequence =
stop_sequence
|> Stream.map(&format_stop/1)
|> Enum.reject(&is_nil/1)
if formatted_stop_sequence != [] do
%{stop_sequence: formatted_stop_sequence}
else
[]
end
end
end
defp do_format_filter({"route_type", route_types}) do
%{route_type: Params.route_types(%{"route_type" => route_types})}
end
defp do_format_filter({key, time}) when key in ["min_time", "max_time"] do
case time_to_seconds_past_midnight(time) do
nil ->
[]
time_in_seconds ->
%{String.to_existing_atom(key) => time_in_seconds}
end
end
defp do_format_filter(_), do: []
defp format_stop("first"), do: :first
defp format_stop("last"), do: :last
defp format_stop(stop) do
case Integer.parse(stop) do
{stop_id, ""} ->
stop_id
_ ->
nil
end
end
defp time_to_seconds_past_midnight(<<hour_bin::binary-2, ?:, minute_bin::binary-2>>) do
time_to_seconds_past_midnight(hour_bin, minute_bin)
end
defp time_to_seconds_past_midnight(<<hour_bin::binary-1, ?:, minute_bin::binary-2>>) do
time_to_seconds_past_midnight(hour_bin, minute_bin)
end
defp time_to_seconds_past_midnight(_) do
nil
end
defp time_to_seconds_past_midnight(hour_bin, minute_bin) do
with {hour, ""} <- Integer.parse(hour_bin),
{minute, ""} <- Integer.parse(minute_bin) do
hour * 3_600 + minute * 60
else
_ ->
nil
end
end
@doc """
Assigns a datetime to the conn. If a valid date is passed as a param, that
value is used. Otherwise a default value of today is used.
"""
def date(%{params: params} = conn, []) do
{conn, date} =
with {:ok, %{"date" => date_string}} when date_string != nil <-
Params.filter_params(params, @filters, conn),
{:ok, parsed_date} <- Date.from_iso8601(date_string) do
{conn, parsed_date}
else
_ -> conn_service_date(conn)
end
conn
|> assign(:date, date)
|> assign(:date_seconds, DateHelpers.unix_midnight_seconds(date))
end
def swagger_definitions do
import PhoenixSwagger.JsonApi, except: [page: 1]
%{
ScheduleResource:
resource do
description(swagger_path_description("*"))
attributes do
arrival_time(
:string,
"""
Time when the trip arrives at the given stop. See \
[GTFS `stop_times.txt` `arrival_time`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#stop_timestxt)
Format is ISO8601.
""",
format: :"date-time",
example: "2017-08-14T15:04:00-04:00"
)
departure_time(
:string,
"""
Time when the trip departs the given stop. See \
[GTFS `stop_times.txt` `departure_time`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#stop_timestxt)
Format is ISO8601.
""",
format: :"date-time",
example: "2017-08-14T15:04:00-04:00"
)
stop_sequence(
:integer,
"""
The sequence the `stop_id` is arrived at during the `trip_id`. The stop sequence is \
monotonically increasing along the trip, but the `stop_sequence` along the `trip_id` are not \
necessarily consecutive. See \
[GTFS `stop_times.txt` `stop_sequence`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#stop_timestxt)
""",
example: 1
)
stop_headsign(
nullable(%Schema{type: :string}, true),
"""
Text identifying destination of the trip, overriding trip-level headsign if present.\
See [GTFS `stop_times.txt` `stop_headsign`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#stop_timestxt)
""",
example: "Foxboro via Back Bay"
)
pickup_type(
%Schema{type: :integer, enum: Enum.to_list(0..3)},
"""
How the vehicle departs from `stop_id`.
| Value | Description |
|-------|-----------------------------------------------|
| `0` | Regularly scheduled pickup |
| `1` | No pickup available |
| `2` | Must phone agency to arrange pickup |
| `3` | Must coordinate with driver to arrange pickup |
See \
[GTFS `stop_times.txt` `pickup_type`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#stop_timestxt)
""",
example: 0
)
drop_off_type(
%Schema{type: :integer, enum: Enum.to_list(0..3)},
"""
How the vehicle arrives at `stop_id`.
| Value | Description |
|-------|-----------------------------------------------|
| `0` | Regularly scheduled drop off |
| `1` | No drop off available |
| `2` | Must phone agency to arrange pickup |
| `3` | Must coordinate with driver to arrange pickup |
See \
[GTFS `stop_times.txt` `drop_off_type`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#stop_timestxt)
""",
example: 1
)
timepoint(
:boolean,
"""
| Value | `*/attributes/arrival_time` and `*/attributes/departure_time` |
|---------|---------------------------------------------------------------|
| `true` | Exact |
| `false` | Estimates |
See \
[GTFS `stop_times.txt` `timepoint`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#stop_timestxt)
""",
example: false
)
end
direction_id_attribute()
relationship(:route)
relationship(:trip)
relationship(:stop)
relationship(:prediction)
end,
Schedules: page(:ScheduleResource)
}
end
defp swagger_path_description(parent_pointer) do
"""
A schedule is the arrival drop off (`#{parent_pointer}/attributes/drop_off_type`) time \
(`#{parent_pointer}/attributes/arrival_time`) and departure pick up (`#{parent_pointer}/attributes/pickup_type`) \
time (`#{parent_pointer}/attributes/departure_time`) to/from a stop \
(`#{parent_pointer}/relationships/stop/data/id`) at a given sequence \
(`#{parent_pointer}/attributes/stop_sequence`) along \
a trip (`#{parent_pointer}/relationships/trip/data/id`) going in a direction \
(`#{parent_pointer}/attributes/direction_id`) on a route (`#{parent_pointer}/relationships/route/data/id`) when \
the trip is following a service (`#{parent_pointer}/relationships/service/data/id`) to determine when it is active.
See [GTFS `stop_times.txt`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#stop_timestxt) for base specification.
"""
end
end
|
apps/api_web/lib/api_web/controllers/schedule_controller.ex
| 0.817902 | 0.411584 |
schedule_controller.ex
|
starcoder
|
defmodule Expo.Mo do
@moduledoc """
`.mo` file handler
"""
alias Expo.Mo.InvalidFileError
alias Expo.Mo.Parser
alias Expo.Mo.UnsupportedVersionError
alias Expo.Translations
@type compose_options :: [
{:endianness, :little | :big},
{:use_fuzzy, boolean()},
{:statistics, boolean()}
]
@type parse_options :: [{:file, Path.t()}]
@type invalid_file_error :: {:error, :invalid_file}
@type unsupported_version_error ::
{:error, {:unsupported_version, major :: non_neg_integer(), minor :: non_neg_integer()}}
@type file_error :: {:error, File.posix()}
@doc """
Composes a `.mo` file from translations
### Examples
iex> %Expo.Translations{
...> headers: ["Last-Translator: <NAME>"],
...> translations: [
...> %Expo.Translation.Singular{msgid: ["foo"], msgstr: ["bar"], comments: "A comment"}
...> ]
...> }
...> |> Expo.Mo.compose()
...> |> IO.iodata_to_binary()
<<222, 18, 4, 149, 0, 0, 0, 0, 2, 0, 0, 0, 28, 0, 0, 0, 44, 0, 0, 0, 0, 0, 0, 0,
60, 0, 0, 0, 0, 0, 0, 0, 60, 0, 0, 0, 3, 0, 0, 0, 61, 0, 0, 0, 25, 0, 0, 0,
65, 0, 0, 0, 3, 0, 0, 0, 91, 0, 0, 0, 0, 102, 111, 111, 0, 76, 97, 115, 116,
45, 84, 114, 97, 110, 115, 108, 97, 116, 111, 114, 58, 32, 74, 97, 110, 101,
32, 68, 111, 101, 0, 98, 97, 114, 0>>
"""
@spec compose(translations :: Translations.t(), opts :: compose_options()) :: iodata()
defdelegate compose(content, opts \\ []), to: Expo.Mo.Composer
@doc """
Parse `.mo` file
### Examples
iex> Expo.Mo.parse_binary(<<0xDE120495::size(4)-unit(8),
...> 0::little-unsigned-integer-size(2)-unit(8),
...> 0::little-unsigned-integer-size(2)-unit(8),
...> 0::little-unsigned-integer-size(4)-unit(8),
...> 28::little-unsigned-integer-size(4)-unit(8),
...> 28::little-unsigned-integer-size(4)-unit(8),
...> 28::little-unsigned-integer-size(4)-unit(8),
...> 0::little-unsigned-integer-size(4)-unit(8)>>)
{:ok, %Expo.Translations{headers: [], translations: []}}
"""
@spec parse_binary(content :: binary(), opts :: parse_options()) ::
{:ok, Translations.t()}
| invalid_file_error()
| unsupported_version_error()
def parse_binary(content, opts \\ []), do: Parser.parse(content, opts)
@doc """
Parses a string into a `Expo.Translations` struct, raising an exception if there are
any errors.
Works exactly like `parse_binary/1`, but returns a `Expo.Translations` struct
if there are no errors or raises a `Expo.Mo.InvalidFileError` error if there
are.
If the version of the `.mo` file is not supported, a
`Expo.Mo.UnsupportedVersionError` is raised.
## Examples
iex> Expo.Mo.parse_binary!(<<0xDE120495::size(4)-unit(8),
...> 0::little-unsigned-integer-size(2)-unit(8),
...> 0::little-unsigned-integer-size(2)-unit(8),
...> 0::little-unsigned-integer-size(4)-unit(8),
...> 28::little-unsigned-integer-size(4)-unit(8),
...> 28::little-unsigned-integer-size(4)-unit(8),
...> 28::little-unsigned-integer-size(4)-unit(8),
...> 0::little-unsigned-integer-size(4)-unit(8)>>)
%Expo.Translations{headers: [], translations: []}
iex> Expo.Mo.parse_binary!("invalid")
** (Expo.Mo.InvalidFileError) invalid file
"""
@spec parse_binary!(content :: binary(), options :: parse_options()) ::
Translations.t() | no_return
def parse_binary!(str, opts \\ []) do
case parse_binary(str, opts) do
{:ok, parsed} ->
parsed
{:error, :invalid_file} ->
options =
case opts[:file] do
nil -> []
path -> [file: path]
end
raise InvalidFileError, options
{:error, {:unsupported_version, major, minor}} ->
options = [major: major, minor: minor]
options =
case opts[:file] do
nil -> options
path -> [{:file, path} | options]
end
raise UnsupportedVersionError, options
end
end
@doc """
Parses the contents of a file into a `Expo.Translations` struct.
This function works similarly to `parse_binary/1` except that it takes a file
and parses the contents of that file. It can return:
* `{:ok, po}`
* `{:error, line, reason}` if there is an error with the contents of the
`.po` file (for example, a syntax error)
* `{:error, reason}` if there is an error with reading the file (this error
is one of the errors that can be returned by `File.read/1`)
## Examples
{:ok, po} = Expo.Mo.parse_file "translations.po"
po.file
#=> "translations.po"
Expo.Mo.parse_file "nonexistent"
#=> {:error, :enoent}
"""
@spec parse_file(path :: Path.t(), opts :: parse_options()) ::
{:ok, Translations.t()}
| invalid_file_error()
| unsupported_version_error()
| file_error()
def parse_file(path, opts \\ []) do
with {:ok, contents} <- File.read(path),
{:ok, po} <- Parser.parse(contents, Keyword.put_new(opts, :file, path)) do
{:ok, %{po | file: path}}
end
end
@doc """
Parses the contents of a file into a `Expo.Translations` struct, raising if there
are any errors.
Works like `parse_file/1`, except that it raises a `Expo.Mo.SyntaxError`
exception if there's a syntax error in the file or a `File.Error` error if
there's an error with reading the file.
## Examples
Expo.Mo.parse_file! "nonexistent.po"
#=> ** (File.Error) could not parse "nonexistent.po": no such file or directory
"""
@spec parse_file!(Path.t(), opts :: parse_options()) :: Translations.t() | no_return
def parse_file!(path, opts \\ []) do
case parse_file(path, opts) do
{:ok, parsed} ->
parsed
{:error, :invalid_file} ->
raise InvalidFileError, file: path
{:error, {:unsupported_version, major, minor}} ->
raise UnsupportedVersionError,
major: major,
minor: minor,
file: Keyword.get(opts, :file, path)
{:error, reason} ->
raise File.Error, reason: reason, action: "parse", path: Keyword.get(opts, :file, path)
end
end
end
|
lib/expo/mo.ex
| 0.850717 | 0.46478 |
mo.ex
|
starcoder
|
defmodule PhoenixMDBootstrapForm do
@moduledoc """
Documentation for `PhoenixMDBootstrapForm` which provides helper methods for creating beautiful looking Material Design Bootstrap forms in Phoenix.
## Installation
This package can be installed by adding `rrpproxy` to your list of dependencies in `mix.exs`:
```elixir
def deps do
[
{:phoenix_mdbootstrap_form, "~> 0.1.2"}
]
end
```
You may also alias this module in `web.ex`, so it's shorter to type in templates.
```elixir
alias PhoenixMDBootstrapForm, as: MDF
```
## Usage
In order to change markup of form elements to bootstrap-style, all you need is to prefix regular methods you aleady have with `PhoenixMDBootstrapForm`, or `MDF` if you created an alias.
For example:
```elixir
<%= form_for @changeset, "/", fn f -> %>
<%= MDF.text_input f, :value %>
<%= MDF.submit f %>
<% end %>
```
Becomes bootstrap-styled:
```html
<form accept-charset="UTF-8" action="/" method="post">
<div class="form-group row">
<label class="col-form-label text-sm-right col-sm-2" for="record_value">
Value
</label>
<div class="col-sm-10">
<input class="form-control" id="record_value" name="record[value]" type="text">
</div>
</div>
<div class="form-group row">
<div class="col-sm-10 ml-auto">
<button class="btn" type="submit">Submit</button>
</div>
</div>
</form>
```
This library generates [horizonal form](https://mdbootstrap.com/docs/jquery/forms/basic/) layout that collapses down on small screens.
You can always fall-back to default [Phoenix.HTML.Form](https://hexdocs.pm/phoenix_html/Phoenix.HTML.Form.html) methods if bootstrapped ones are not good enough.
Currently this module supports following methods:
* text_input
* file_input
* email_input
* password_input
* textarea
* telephone_input
* number_input
* select
* time_select
* date_select
* datetime_select
* multi_select
* checkbox
* checkboxes
* radio_buttons
* submit
* static
[For quick reference you can look at this template](demo/lib/demo_web/templates/page/index.html.eex).
You can `mix phx.server` inside demo folder to see this reference template rendered.
### Labels
To set your own label you can do something like this:
```elixir
<%= MDF.text_input f, :value, label: [text: "Custom"] %>
```
### CSS Classes
To add your own css class to the input element / controls do this:
```elixir
<%= MDF.text_input f, :value, input: [class: "custom"] %>
```
### Help Text
You can add help text under the input. It could also be rendered template with
links, tables, and whatever else.
```elixir
<%= MDF.text_input f, :value, input: [help: "Help text"] %>
```
### Prepending and Appending Inputs
```elixir
<%= MDF.text_input f, :value, input: [prepend: "$", append: ".00"] %>
```
### Radio Buttons
You don't need to do multiple calls to create list of radio buttons. One method
will do them all:
```elixir
<%= MDF.radio_buttons f, :value, ["red", "green"] %>
```
or with custom labels:
```elixir
<%= MDF.radio_buttons f, :value, [{"R", "red"}, {"G", "green"}] %>
```
or rendered inline:
```elixir
<%= MDF.radio_buttons f, :value, ["red", "green", "blue"], input: [inline: true] %>
```
### Select
Works just like the standard `select` or `multiple_select` provided by Phoenix:
```elixir
<%= MDF.select f, :value, ["red", "green", "blue"] %>
```
or use a multiple select field:
```elixir
<%= MDF.multiple_select f, :value, ["red", "green", "blue"] %>
```
### Checkboxes
Very similar to `multiple_select` in functionality, you can render collection of
checkboxes. Other options are the same as for `radio_buttons`
```elixir
<%= MDF.checkboxes f, :value, ["red", "green", "blue"], selected: ["green"] %>
```
### Submit Buttons
Besides simple `MDF.submit f` you can define custom label and content that goes
next to the button. For example:
```elixir
<% cancel = link "Cancel", to: "/", class: "btn btn-link" %>
<%= MDF.submit f, "Smash", class: "btn-primary", alternative: cancel %>
```
### Static Elements
When you need to render a piece of content in the context of your form. For example:
```elixir
<%= MDF.static f, "Current Avatar", avatar_image_tag %>
```
### Form Errors
If changeset is invalid, form elements will have `.is-invalid` class added and
`.invalid-feedback` container will be appended with an error message.
In order to properly pull in i18n error messages specify `translate_error`
function that handles it:
```elixir
config :phoenix_mdbootstrap_form, [
translate_error_function: &MyApp.ErrorHelpers.translate_error/1
]
```
### Custom Grid and Label Alignment
By default `.col-sm-2` and `.col-sm-10` used for label and control colums respectively.
You can change that by passing `label_col` and `control_col` with `form_for` like this:
```elixir
<% opts = [label_col: "col-sm-4", control_col: "col-sm-8", label_align: "text-sm-left"] %>
<%= form_for @changeset, "/", opts, fn f -> %>
```
If you need to change it application-wide just edit your `config.exs` and play around with these:
```elixir
config :phoenix_mdbootstrap_form,
label_col_class: "col-form-label col-sm-2",
control_col_class: "col-sm-10",
label_align_class: "text-sm-right",
form_group_class: "form-group row"
```
### Credit
This repository has been forked from [GBH's phoenix_bootstrap_form](https://github.com/GBH/phoenix_bootstrap_form) and i just adjusted it for Material Design Bootstrap.
"""
alias Phoenix.HTML
alias Phoenix.HTML.{Tag, Form}
@label_col_class "col-form-label col-sm-2"
@control_col_class "col-sm-10"
@label_align_class "text-sm-right"
@form_group_class "form-group row"
defp special_select(form = %Form{}, field, icon, class, opts) do
input =
Tag.content_tag :div, class: control_col_class(form) do
is_valid_class = is_valid_class(form, field)
input_opts =
[class: "form-control #{is_valid_class} #{class}"] ++
Keyword.get(opts, :input, []) ++
Keyword.get(opts, :multiple, [])
prepend = Tag.content_tag(:i, "", class: "fas input-prefix #{icon}")
{help, input_opts} = Keyword.pop(input_opts, :help)
input =
draw_input(:text_input, form, field, nil, input_opts)
|> draw_input_group(prepend, nil)
help = draw_help(help)
error = draw_error_message(get_error(form, field))
[input, error, help]
end
Tag.content_tag :div, class: form_group_class(opts) do
[
draw_label(form, field, opts),
input
]
end
end
@doc "Creates a time-select field."
def time_select(form = %Form{}, field, opts \\ []) do
special_select(form, field, "fa-clock", "time-picker", opts)
end
@doc "Creates a date-select field."
def date_select(form = %Form{}, field, opts \\ []) do
special_select(form, field, "fa-calendar", "date-picker", opts)
end
@doc "Creates a datetime-select field."
def datetime_select(form = %Form{}, field, opts \\ []) do
special_select(form, field, "fa-calendar", "date-time-picker", opts)
end
@doc "Creates a select field."
def select(form = %Form{}, field, options, opts \\ []) do
draw_generic_input(:select, form, field, options, opts)
end
@doc "Creates a multiple-select field."
def multiple_select(form = %Form{}, field, options, opts \\ []) do
multi_opts = Keyword.put_new(opts, :multiple, multiple: true)
draw_generic_input(:select, form, field, options, multi_opts)
end
[
:text_input,
:file_input,
:email_input,
:password_input,
:textarea,
:telephone_input,
:number_input
]
|> Enum.each(fn method ->
@doc "Creates a simple form field."
def unquote(method)(form = %Form{}, field, opts \\ []) when is_atom(field) do
draw_generic_input(unquote(method), form, field, nil, opts)
end
end)
@doc "Creates a checkbox field."
def checkbox(form = %Form{}, field, opts \\ []) do
{label_opts, opts} = Keyword.pop(opts, :label, [])
{input_opts, _} = Keyword.pop(opts, :input, [])
{help, input_opts} = Keyword.pop(input_opts, :help)
label =
case Keyword.get(label_opts, :show, true) do
true -> Keyword.get(label_opts, :text, Form.humanize(field))
false -> ""
end
checkbox =
Form.checkbox(form, field, class: "form-check-input " <> is_valid_class(form, field))
for_attr = Form.input_id(form, field)
help = draw_help(help)
error = draw_error_message(get_error(form, field))
content =
Tag.content_tag :div, class: "#{control_col_class(form)} ml-auto" do
[
draw_form_check(checkbox, label, for_attr, error, input_opts[:inline]),
help
]
end
draw_form_group("", content, opts)
end
@doc "Creates multiple checkbox fields."
def checkboxes(form = %Form{}, field, values, opts \\ []) when is_list(values) do
values = add_labels_to_values(values)
{input_opts, opts} = Keyword.pop(opts, :input, [])
{help, input_opts} = Keyword.pop(input_opts, :help)
{selected, opts} = Keyword.pop(opts, :selected, [])
input_id = Form.input_id(form, field)
help = draw_help(help)
error = draw_error_message(get_error(form, field))
inputs =
values
|> Enum.with_index()
|> Enum.map(fn {{label, value}, index} ->
value = elem(HTML.html_escape(value), 1)
# error needs to show up only on last element
input_error =
if Enum.count(values) - 1 == index do
error
else
""
end
input_class = "form-check-input " <> is_valid_class(form, field)
value_id = value |> String.replace(~r/\s/, "")
input_id = input_id <> "_" <> value_id
input =
Tag.tag(
:input,
name: Form.input_name(form, field) <> "[]",
id: input_id,
type: "checkbox",
value: value,
class: input_class,
checked: Enum.member?(selected, value)
)
draw_form_check(
input,
label,
input_id,
input_error,
input_opts[:inline]
)
end)
content =
Tag.content_tag :div, class: "#{control_col_class(form)}" do
[inputs, help]
end
opts = Keyword.put_new(opts, :label, [])
opts = put_in(opts[:label][:span], true)
draw_form_group(
draw_label(form, field, opts),
content,
opts
)
end
@doc "Creates radio buttons."
def radio_buttons(form = %Form{}, field, values, opts \\ []) when is_list(values) do
values = add_labels_to_values(values)
{input_opts, opts} = Keyword.pop(opts, :input, [])
{help, input_opts} = Keyword.pop(input_opts, :help)
input_id = Form.input_id(form, field)
help = draw_help(help)
error = draw_error_message(get_error(form, field))
inputs =
values
|> Enum.with_index()
|> Enum.map(fn {{label, value}, index} ->
value = elem(HTML.html_escape(value), 1)
# error needs to show up only on last element
radio_error =
if Enum.count(values) - 1 == index do
error
else
""
end
radio_class = "form-check-input " <> is_valid_class(form, field)
value_id = value |> String.replace(~r/\s/, "")
input_id = input_id <> "_" <> value_id
draw_form_check(
Form.radio_button(form, field, value, class: radio_class),
label,
input_id,
radio_error,
input_opts[:inline]
)
end)
content =
Tag.content_tag :div, class: "#{control_col_class(form)}" do
[inputs, help]
end
opts = Keyword.put_new(opts, :label, [])
opts = put_in(opts[:label][:span], true)
draw_form_group(
draw_label(form, field, opts),
content,
opts
)
end
@doc "Creates submit button."
def submit(form = %Form{}, opts) when is_list(opts), do: draw_submit(form, nil, opts)
@doc "Creates submit button."
def submit(form = %Form{}, label), do: draw_submit(form, label, [])
@doc "Creates submit button."
def submit(form = %Form{}, label, opts \\ []), do: draw_submit(form, label, opts)
@doc "Creates submit button."
def submit(form = %Form{}), do: draw_submit(form, nil, [])
@doc "Creates static form field without any field required in the changeset."
def static(form = %Form{}, label, content) do
label =
Tag.content_tag(
:label,
label,
class: "#{label_col_class(form)} #{label_align_class(form)}"
)
content =
Tag.content_tag(:div, content, class: "form-control-plaintext #{control_col_class(form)}")
draw_form_group(label, content, [])
end
# -- Private methods ---------------------------------------------------------
defp label_col_class(form) do
default = Application.get_env(:phoenix_mdbootstrap_form, :label_col_class, @label_col_class)
Keyword.get(form.options, :label_col, default)
end
defp control_col_class(form) do
default =
Application.get_env(:phoenix_mdbootstrap_form, :control_col_class, @control_col_class)
Keyword.get(form.options, :control_col, default)
end
defp label_align_class(form) do
default =
Application.get_env(:phoenix_mdbootstrap_form, :label_align_class, @label_align_class)
Keyword.get(form.options, :label_align, default)
end
defp form_group_class(opts) do
default = Application.get_env(:phoenix_mdbootstrap_form, :form_group_class, @form_group_class)
Keyword.get(opts, :form_group, default)
end
defp merge_css_classes(opts) do
{classes, rest} = Keyword.split(opts, [:class])
class =
classes
|> Keyword.values()
|> Enum.join(" ")
[class: class] ++ rest
end
defp is_valid_class(form, field) do
case has_error?(form, field) do
true -> "is-invalid"
_ -> ""
end
end
defp has_error?(%Form{errors: errors}, field), do: Keyword.has_key?(errors, field)
defp has_error?(_, _), do: false
defp get_error(form, field) do
case has_error?(form, field) do
true ->
msg = form.errors[field] |> elem(0)
opts = form.errors[field] |> elem(1)
translate_error(msg, opts)
_ ->
nil
end
end
defp add_labels_to_values(values) when is_list(values) do
Enum.into(values, [], fn value ->
case value do
{k, v} -> {k, v}
v -> {Form.humanize(v), v}
end
end)
end
defp draw_generic_input(type, form, field, options, opts) do
draw_form_group(
draw_label(form, field, opts),
draw_control(type, form, field, options, opts),
opts
)
end
defp draw_control(:file_input = type, form, field, options, opts) do
Tag.content_tag :div, class: "custom-file #{control_col_class(form)}" do
is_valid_class = is_valid_class(form, field)
input_opts =
[class: "custom-file-input #{is_valid_class}"] ++
Keyword.get(opts, :input, [])
{prepend, input_opts} = Keyword.pop(input_opts, :prepend)
{append, input_opts} = Keyword.pop(input_opts, :append)
{help, input_opts} = Keyword.pop(input_opts, :help)
input =
draw_input(type, form, field, options, input_opts)
|> draw_input_group(prepend, append)
help = draw_help(help)
error = draw_error_message(get_error(form, field))
label = Tag.content_tag(:label, "", class: "custom-file-label")
[input, label, error, help]
end
end
defp draw_control(type, form, field, options, opts) do
Tag.content_tag :div, class: control_col_class(form) do
is_valid_class = is_valid_class(form, field)
input_opts =
[class: "form-control #{is_valid_class}"] ++
Keyword.get(opts, :input, []) ++
Keyword.get(opts, :multiple, [])
{prepend, input_opts} = Keyword.pop(input_opts, :prepend)
{append, input_opts} = Keyword.pop(input_opts, :append)
{help, input_opts} = Keyword.pop(input_opts, :help)
input =
draw_input(type, form, field, options, input_opts)
|> draw_input_group(prepend, append)
help = draw_help(help)
error = draw_error_message(get_error(form, field))
[input, error, help]
end
end
defp draw_input(:select, form, field, options, opts) do
Form.select(form, field, options, merge_css_classes(opts))
end
defp draw_input(type, form, field, nil, opts) do
apply(Form, type, [form, field, merge_css_classes(opts)])
end
defp draw_form_group(label, content, opts) do
Tag.content_tag :div, class: form_group_class(opts) do
[label, content]
end
end
defp draw_label(form, field, opts) when is_atom(field) do
label_opts = Keyword.get(opts, :label, [])
if Keyword.get(label_opts, :show, true) do
{text, label_opts} = Keyword.pop(label_opts, :text, Form.humanize(field))
label_opts = [class: "#{label_col_class(form)} #{label_align_class(form)}"] ++ label_opts
label_opts = merge_css_classes(label_opts)
{is_span, label_opts} = Keyword.pop(label_opts, :span, false)
if is_span do
Tag.content_tag(:span, text, label_opts)
else
Form.label(form, field, text, label_opts)
end
else
Tag.content_tag(:span, "")
end
end
defp draw_input_group(input, nil, nil), do: input
defp draw_input_group(input, prepend, append) do
Tag.content_tag :div, class: "input-group" do
[
draw_input_group_addon_prepend(prepend),
input,
draw_input_group_addon_append(append)
]
end
end
defp draw_input_group_addon_prepend(nil), do: ""
defp draw_input_group_addon_prepend(content) do
text = Tag.content_tag(:span, content, class: "input-group-text")
Tag.content_tag(:div, text, class: "input-group-prepend")
end
defp draw_input_group_addon_append(nil), do: ""
defp draw_input_group_addon_append(content) do
text = Tag.content_tag(:span, content, class: "input-group-text")
Tag.content_tag(:div, text, class: "input-group-append")
end
defp draw_help(nil), do: ""
defp draw_help(content) do
Tag.content_tag(:small, content, class: "form-text text-muted")
end
defp draw_submit(form = %Form{}, label, opts) do
{alternative, opts} = Keyword.pop(opts, :alternative, "")
opts = [class: "btn"] ++ opts
content =
Tag.content_tag :div, class: "#{control_col_class(form)} ml-auto" do
[Form.submit(label || "Submit", merge_css_classes(opts)), alternative]
end
draw_form_group("", content, opts)
end
defp draw_form_check(input, label, for_attr, error, is_inline) do
inline_class = if is_inline, do: "form-check-inline", else: ""
label = Tag.content_tag(:label, label, for: for_attr, class: "form-check-label")
Tag.content_tag :div, class: "form-check #{inline_class}" do
[input, label, error]
end
end
defp draw_error_message(nil), do: ""
defp draw_error_message(message) do
Tag.content_tag(:div, message, class: "invalid-feedback")
end
defp translate_error(msg, opts) do
default_fn = fn {msg, opts} ->
Enum.reduce(opts, msg, fn {key, value}, acc ->
String.replace(acc, "%{#{key}}", to_string(value))
end)
end
translate_error_fn =
Application.get_env(:phoenix_mdbootstrap_form, :translate_error_function, default_fn)
translate_error_fn.({msg, opts})
end
end
|
lib/phoenix_mdbootstrap_form.ex
| 0.874252 | 0.807233 |
phoenix_mdbootstrap_form.ex
|
starcoder
|
defmodule Infer.Ecto.Query do
@moduledoc """
Functions to dynamically generate Ecto query parts.
"""
alias Infer.Util
alias Infer.Evaluation, as: Eval
alias __MODULE__.Builder
import Ecto.Query, only: [dynamic: 1, dynamic: 2, from: 2]
defguard is_simple(val)
when is_integer(val) or is_float(val) or is_atom(val) or is_binary(val) or
is_boolean(val) or is_nil(val) or is_struct(val)
@lt_ops ~w(< lt less_than before)a
@lte_ops ~w(<= lte less_than_or_equal on_or_before at_or_before)a
@gte_ops ~w(>= gte greater_than_or_equal on_or_after at_or_after)a
@gt_ops ~w(> gt greater_than after)a
@all_ops @lt_ops ++ @lte_ops ++ @gte_ops ++ @gt_ops
defmodule TranslationError do
defexception [:queryable, :condition]
def message(e),
do: "Could not translate some conditions to SQL:\n#{inspect(e.condition, pretty: true)}"
end
@doc """
Add predicate-based filters to a queryable and return it.
"""
def where(queryable, condition, opts \\ []) when is_list(opts) do
eval = Eval.from_options(opts)
case apply_condition(queryable, condition, eval) do
{queryable, true} -> queryable
{queryable, condition} -> raise TranslationError, queryable: queryable, condition: condition
end
end
@doc """
Returns a 2-tuple with
1. the modified queryable with the given conditions applied as WHERE clauses
2. any remaining conditions that couldn't be added to the query
Returns `{query, true}` if all conditions could be added to the query.
"""
def apply_condition(queryable, condition, %Eval{} = eval) do
{builder, condition} =
queryable
|> Builder.init(eval)
|> apply_condition(condition)
{builder.root_query, condition}
end
# maps a condition and adds it to the current `root_query`
defp apply_condition(builder, condition) when is_map(condition) do
apply_condition(builder, {:all, condition})
end
defp apply_condition(builder, {:all, conditions}) do
Enum.reduce(conditions, {builder, []}, fn condition, {builder, remaining_conditions} ->
case map_condition(builder, condition) do
{builder, where} ->
query = builder.root_query
query = from(q in query, where: ^where)
builder = %{builder | root_query: query}
{builder, remaining_conditions}
:error ->
{builder, [condition | remaining_conditions]}
end
end)
|> case do
{builder, []} -> {builder, true}
{builder, conditions} -> {builder, {:all, conditions}}
end
end
defp apply_condition(builder, condition) do
case map_condition(builder, condition) do
{builder, where} ->
query = builder.root_query
query = from(q in query, where: ^where)
builder = %{builder | root_query: query}
{builder, true}
:error ->
{builder, condition}
end
end
# maps a Infer condition to an Ecto query condition
defp map_condition(builder, bool) when is_boolean(bool) do
{builder, bool}
end
defp map_condition(builder, {:not, condition}) do
case map_condition(builder, condition) do
:error -> :error
{builder, where} -> {builder, dynamic(not (^where))}
end
end
defp map_condition(builder, conditions) when is_map(conditions) do
map_condition(builder, {:all, conditions})
end
defp map_condition(builder, {:all, conditions}) do
Enum.reduce_while(conditions, {builder, true}, fn condition, {builder, acc_query} ->
case map_condition(builder, condition) do
:error -> {:halt, :error}
{builder, where} -> {:cont, {builder, combine_and(where, acc_query)}}
end
end)
end
defp map_condition(builder, conditions) when is_list(conditions) do
Enum.reduce_while(conditions, {builder, false}, fn condition, {builder, acc_query} ->
case map_condition(builder, condition) do
:error -> {:halt, :error}
{builder, where} -> {:cont, {builder, combine_or(where, acc_query)}}
end
end)
end
defp map_condition(builder, {:args, sub_condition}) do
case Infer.Engine.evaluate_condition(
{:args, sub_condition},
builder.eval.root_subject,
builder.eval
) do
{:ok, result, _} -> {builder, result}
end
end
defp map_condition(builder, {key, val}) when is_atom(key) do
case field_info(key, builder) do
:field ->
left = Builder.field(builder, key)
case val do
vals when is_list(vals) ->
groups =
vals
|> List.flatten()
|> Enum.group_by(fn
val when is_integer(val) -> :integer
val when is_float(val) -> :float
val when is_binary(val) or is_atom(val) -> :string
_other -> :other
end)
{other_vals, simple_groups} = Map.pop(groups, :other, [])
grouped_vals = Map.values(simple_groups) ++ other_vals
Enum.reduce_while(grouped_vals, {builder, false}, fn val, {builder, acc_query} ->
case val do
vals when is_list(vals) -> {builder, compare(left, :eq, vals, builder)}
val -> map_condition(builder, {key, val})
end
|> case do
:error -> {:halt, :error}
{builder, where} -> {:cont, {builder, combine_or(where, acc_query)}}
end
end)
{:not, val} ->
Builder.negate(builder, fn builder ->
with {builder, right} <- to_val(builder, val) do
{builder, compare(left, :eq, right, builder)}
end
end)
{op, val} when op in @all_ops ->
with {builder, right} <- to_val(builder, val) do
{builder, compare(left, op, right, builder)}
end
val ->
with {builder, right} <- to_val(builder, val) do
{builder, compare(left, :eq, right, builder)}
end
end
{:predicate, rules} ->
case rules_for_value(rules, val, builder) do
:error -> :error
condition -> map_condition(builder, condition)
end
{:assoc, :one, _assoc} ->
Builder.with_join(builder, key, fn builder ->
map_condition(builder, val)
end)
{:assoc, :many, assoc} ->
%{queryable: queryable, related_key: related_key, owner_key: owner_key} = assoc
as = Builder.current_alias(builder)
subquery =
from(q in queryable,
where: field(q, ^related_key) == field(parent_as(^as), ^owner_key)
)
Builder.step_into(builder, key, subquery, fn builder ->
map_condition(builder, val)
end)
end
end
# maps the right side of a Infer condition to an Ecto Query value
defp to_val(builder, {:ref, path}), do: reference_path(builder, path)
defp to_val(builder, val) when is_simple(val), do: {builder, val}
# returns a reference to a field as an Ecto Query value
defp reference_path(builder, path) do
Builder.from_root(builder, fn builder ->
do_ref(builder, path)
end)
end
defp do_ref(builder, [:args | _] = path) do
case Infer.Engine.resolve_source({:ref, path}, builder.eval) do
{:ok, result, _} -> {builder, result}
end
end
defp do_ref(builder, field) when is_atom(field), do: do_ref(builder, [field])
defp do_ref(builder, [field]) do
{builder, Builder.field(builder, field, true)}
end
defp do_ref(builder, [field | path]) do
case field_info(field, builder) do
{:assoc, :one, _assoc} -> Builder.with_join(builder, field, &do_ref(&1, path))
_other -> :error
end
end
defp combine_and(true, right), do: right
defp combine_and(left, true), do: left
defp combine_and(false, _right), do: false
defp combine_and(_left, false), do: false
defp combine_and(left, right), do: dynamic(^left and ^right)
defp combine_or(true, _right), do: true
defp combine_or(_left, true), do: true
defp combine_or(false, right), do: right
defp combine_or(left, false), do: left
defp combine_or(left, right), do: dynamic(^left or ^right)
defp compare(left, :eq, nil, %{negate?: false}),
do: dynamic(is_nil(^left))
defp compare(left, :eq, nil, %{negate?: true}),
do: dynamic(not is_nil(^left))
defp compare(left, :eq, vals, %{negate?: false}) when is_list(vals),
do: dynamic(^left in ^vals)
defp compare(left, :eq, vals, %{negate?: true}) when is_list(vals),
do: dynamic(^left not in ^vals)
defp compare(left, :eq, val, %{negate?: false}),
do: dynamic(^left == ^val)
defp compare(left, :eq, val, %{negate?: true}),
do: dynamic(^left != ^val)
defp compare(left, op, val, %{negate?: false}) when op in @lt_ops,
do: dynamic(^left < ^val)
defp compare(left, op, val, %{negate?: true}) when op in @lt_ops,
do: dynamic(^left >= ^val)
defp compare(left, op, val, %{negate?: false}) when op in @lte_ops,
do: dynamic(^left <= ^val)
defp compare(left, op, val, %{negate?: true}) when op in @lte_ops,
do: dynamic(^left > ^val)
defp compare(left, op, val, %{negate?: false}) when op in @gte_ops,
do: dynamic(^left >= ^val)
defp compare(left, op, val, %{negate?: true}) when op in @gte_ops,
do: dynamic(^left < ^val)
defp compare(left, op, val, %{negate?: false}) when op in @gt_ops,
do: dynamic(^left > ^val)
defp compare(left, op, val, %{negate?: true}) when op in @gt_ops,
do: dynamic(^left <= ^val)
defp field_info(predicate, %Builder{} = builder) do
type = Builder.current_type(builder)
case Util.rules_for_predicate(predicate, type, builder.eval) do
[] ->
case Util.Ecto.association_details(type, predicate) do
%_{cardinality: :one} = assoc ->
{:assoc, :one, assoc}
%_{cardinality: :many} = assoc ->
{:assoc, :many, assoc}
_other ->
case Util.Ecto.field_details(type, predicate) do
nil ->
raise ArgumentError,
"""
Unknown field #{inspect(predicate)} on #{inspect(type)}.
Path: #{inspect(builder.path)}
Types: #{inspect(builder.types)}
"""
_other ->
:field
end
end
rules ->
{:predicate, rules}
end
end
# maps a comparison of "predicate equals value" to an Infer condition
defp rules_for_value(rules, val, %{negate?: false}) do
vals = List.wrap(val)
rules
|> Enum.reverse()
|> Enum.reduce_while(false, fn
{condition, val}, acc when is_simple(val) ->
if val in vals do
{:cont, [condition, acc]}
else
{:cont, {:all, [{:not, condition}, acc]}}
end
_other, _acc ->
{:halt, :error}
end)
|> simplify_condition()
end
defp simplify_condition(conditions) when is_map(conditions) do
simplify_condition({:all, Enum.to_list(conditions)})
end
defp simplify_condition({:all, []}), do: true
defp simplify_condition({:all, conditions}) when is_list(conditions) do
conditions
# flatten
|> Enum.reduce([], fn
{:all, []}, acc ->
acc
{:all, other}, acc when is_list(other) ->
case simplify_condition({:all, other}) do
{:all, other} -> acc ++ other
other -> acc ++ [other]
end
other, acc ->
acc ++ [other]
end)
# shorten
|> Enum.reverse()
|> Enum.reduce_while([], fn condition, acc ->
case simplify_condition(condition) do
false -> {:halt, false}
true -> {:cont, acc}
condition -> {:cont, [condition | acc]}
end
end)
# wrap / unwrap
|> case do
[condition] -> condition
conditions when is_list(conditions) -> {:all, conditions}
other -> other
end
end
defp simplify_condition([]), do: false
defp simplify_condition(conditions) when is_list(conditions) do
conditions
# flatten
|> Enum.flat_map(fn
[] -> [false]
list when is_list(list) -> simplify_condition(list) |> wrap_condition()
other -> [other]
end)
# shorten
|> Enum.reverse()
|> Enum.reduce_while([], fn condition, acc ->
case simplify_condition(condition) do
true -> {:halt, true}
false -> {:cont, acc}
other -> {:cont, [other, acc]}
end
end)
# unwrap
|> case do
[condition] -> condition
other -> other
end
end
defp simplify_condition(condition), do: condition
defp wrap_condition(list) when is_list(list), do: list
defp wrap_condition(other), do: [other]
@doc """
Applies all known options to the given `queryable`
and returns it, along with all options that were unknown.
"""
def apply_options(queryable, opts) do
Enum.reduce(opts, {queryable, []}, fn
{:where, conditions}, {query, opts} -> {where(query, conditions), opts}
{:limit, limit}, {query, opts} -> {limit(query, limit), opts}
{:order_by, order}, {query, opts} -> {order_by(query, order), opts}
other, {query, opts} -> {query, [other | opts]}
end)
|> case do
{queryable, opts} -> {queryable, Enum.reverse(opts)}
end
end
@doc "Apply all options to the given `queryable`, raise on any unknown option."
def from_options(queryable, opts) do
{queryable, []} = apply_options(queryable, opts)
queryable
end
def limit(queryable, limit) do
from(q in queryable, limit: ^limit)
end
def order_by(queryable, field) when is_atom(field) do
from(q in queryable, order_by: field(q, ^field))
end
# see https://hexdocs.pm/ecto/Ecto.Query.html#dynamic/2-order_by
def order_by(queryable, fields) when is_list(fields) do
fields =
fields
|> Enum.map(fn
{direction, field} -> {direction, dynamic([q], field(q, ^field))}
field -> dynamic([q], field(q, ^field))
end)
from(q in queryable, order_by: ^fields)
end
def inspect(query, repo) do
IO.puts(to_sql(repo, query))
query
end
@doc "Returns generated SQL for given query with all params replaced"
def to_sql(repo, query) do
{sql, params} = repo.to_sql(:all, query)
params
|> Enum.with_index(1)
|> Enum.reverse()
|> Enum.reduce(sql, fn {param, i}, sql ->
String.replace(sql, "$#{i}", sql_escape(param))
end)
end
defp sql_escape(term, dquote \\ false)
defp sql_escape(true, _), do: "TRUE"
defp sql_escape(false, _), do: "FALSE"
defp sql_escape(nil, _), do: "NULL"
defp sql_escape(number, _) when is_integer(number) or is_float(number), do: to_string(number)
defp sql_escape(list, _) when is_list(list),
do: "'{#{Enum.map_join(list, ", ", &sql_escape(&1, true))}}'"
defp sql_escape(str, true) when is_binary(str), do: "\"#{String.replace(str, "\"", "\\\"")}\""
defp sql_escape(str, false) when is_binary(str), do: "'#{String.replace(str, "'", "\'")}'"
defp sql_escape(other, dquote), do: other |> to_string() |> sql_escape(dquote)
end
|
lib/infer/ecto/query.ex
| 0.855444 | 0.505798 |
query.ex
|
starcoder
|
defmodule Expo.PluralForms.Evaluator do
@moduledoc false
alias Expo.PluralForms
@boolean_operators [:!=, :>, :<, :==, :>=, :<=, :&&, :||]
defmodule IntegerOperators do
@moduledoc false
# credo:disable-for-lines:28 Credo.Check.Readability.Specs
# credo:disable-for-lines:27 Credo.Check.Readability.FunctionNames
def left != right when Kernel.!=(left, right), do: 1
def _left != _right, do: 0
def left == right when Kernel.==(left, right), do: 1
def _left == _right, do: 0
def left > right when Kernel.>(left, right), do: 1
def _left > _right, do: 0
def left < right when Kernel.<(left, right), do: 1
def _left < _right, do: 0
def left <= right when Kernel.<=(left, right), do: 1
def _left <= _right, do: 0
def left >= right when Kernel.>=(left, right), do: 1
def _left >= _right, do: 0
# credo:disable-for-next-line Credo.Check.Warning.BoolOperationOnSameValues
def 1 && 1, do: 1
def _left && _right, do: 0
def 1 || _right, do: 1
def _left || 1, do: 1
def _left || _right, do: 0
end
@spec index(plural_forms :: PluralForms.plural(), n :: non_neg_integer()) :: non_neg_integer()
def index(plural, n)
def index(:n, n) when is_integer(n), do: n
def index(number, _n) when is_integer(number), do: number
def index({:if, condition, truthy, falsy}, n),
do: if(index(condition, n) == 1, do: index(truthy, n), else: index(falsy, n))
def index({:paren, content}, n), do: index(content, n)
for operator <- @boolean_operators do
def index({unquote(operator), left, right}, n),
do: IntegerOperators.unquote(operator)(index(left, n), index(right, n))
end
def index({:%, left, right}, n), do: rem(index(left, n), index(right, n))
@spec compile_index(plural_forms :: PluralForms.plural()) :: Macro.t()
def compile_index(plural) do
plural
|> unroll_nested_ifs_to_cond()
|> _compile_index()
end
defp unroll_nested_ifs_to_cond(plural)
defp unroll_nested_ifs_to_cond({:if, condition, truthy, falsy}) do
condition = unroll_nested_ifs_to_cond(condition)
truthy = unroll_nested_ifs_to_cond(truthy)
falsy = unroll_nested_ifs_to_cond(falsy)
# Only the false branch needs unrolling since only that is actually used
conditions =
case falsy do
{:cond, child_conditions} -> [{condition, truthy} | child_conditions]
other -> [{condition, truthy}, {1, other}]
end
{:cond, conditions}
end
defp unroll_nested_ifs_to_cond({:paren, plural}),
do: {:paren, unroll_nested_ifs_to_cond(plural)}
defp unroll_nested_ifs_to_cond({operator, left, right})
when operator in [:!=, :>, :<, :==, :%, :>=, :<=, :&&, :||],
do: {operator, unroll_nested_ifs_to_cond(left), unroll_nested_ifs_to_cond(right)}
defp unroll_nested_ifs_to_cond(:n), do: :n
defp unroll_nested_ifs_to_cond(number) when is_integer(number), do: number
defp _compile_index(plural)
defp _compile_index(:n) do
quote do
var!(n)
end
end
defp _compile_index(number) when is_integer(number), do: number
defp _compile_index({:cond, conditions}) do
conditions =
Enum.map(conditions, fn
{1, result} ->
{:->, [], [[true], _compile_index(result)]}
{condition, result} ->
{:->, [],
[
[
quote do
Kernel.==(unquote(_compile_index(condition)), 1)
end
],
_compile_index(result)
]}
end)
quote do
cond do: unquote(conditions)
end
end
defp _compile_index({:paren, content}), do: _compile_index(content)
defp _compile_index({:%, left, right}) do
quote do
rem(unquote(_compile_index(left)), unquote(_compile_index(right)))
end
end
for operator <- @boolean_operators do
defp _compile_index({unquote(operator) = operator, left, right}) do
quote do
IntegerOperators.unquote(operator)(
unquote(_compile_index(left)),
unquote(_compile_index(right))
)
end
end
end
end
|
lib/expo/plural_forms/evaluator.ex
| 0.639511 | 0.623864 |
evaluator.ex
|
starcoder
|
defmodule Axon.Metrics do
@moduledoc """
Metric functions.
Metrics are used to measure the performance and compare
performance of models in easy-to-understand terms. Often
times, neural networks use surrogate loss functions such
as negative log-likelihood to indirectly optimize a certain
performance metric. Metrics such as accuracy, also called
the 0-1 loss, do not have useful derivatives (e.g. they
are information sparse), and are often intractable even
with low input dimensions.
Despite not being able to train specifically for certain
metrics, it's still useful to track these metrics to
monitor the performance of a neural network during training.
Metrics such as accuracy provide useful feedback during
training, whereas loss can sometimes be difficult to interpret.
All of the functions in this module are implemented as
numerical functions and can be JIT or AOT compiled with
any supported `Nx` compiler.
"""
import Nx.Defn
# Standard Metrics
@doc ~S"""
Computes the accuracy of the given predictions.
If the size of the last axis is 1, it performs a binary
accuracy computation with a threshold of 0.5. Otherwise,
computes categorical accuracy.
## Argument Shapes
* `y_true` - $\(d_0, d_1, ..., d_n\)$
* `y_pred` - $\(d_0, d_1, ..., d_n\)$
## Examples
iex> Axon.Metrics.accuracy(Nx.tensor([[1], [0], [0]]), Nx.tensor([[1], [1], [1]]))
#Nx.Tensor<
f32
0.3333333432674408
>
iex> Axon.Metrics.accuracy(Nx.tensor([[0, 1], [1, 0], [1, 0]]), Nx.tensor([[0, 1], [1, 0], [0, 1]]))
#Nx.Tensor<
f32
0.6666666865348816
>
iex> Axon.Metrics.accuracy(Nx.tensor([[0, 1, 0], [1, 0, 0]]), Nx.tensor([[0, 1, 0], [0, 1, 0]]))
#Nx.Tensor<
f32
0.5
>
"""
defn accuracy(y_true, y_pred) do
if elem(Nx.shape(y_pred), Nx.rank(y_pred) - 1) == 1 do
y_pred
|> Nx.greater(0.5)
|> Nx.equal(y_true)
|> Nx.mean()
else
y_true
|> Nx.argmax(axis: -1)
|> Nx.equal(Nx.argmax(y_pred, axis: -1))
|> Nx.mean()
end
end
@doc ~S"""
Computes the precision of the given predictions with
respect to the given targets.
## Argument Shapes
* `y_true` - $\(d_0, d_1, ..., d_n\)$
* `y_pred` - $\(d_0, d_1, ..., d_n\)$
## Options
* `:threshold` - threshold for truth value of the predictions.
Defaults to `0.5`
## Examples
iex> Axon.Metrics.precision(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
#Nx.Tensor<
f32
0.6666666865348816
>
"""
defn precision(y_true, y_pred, opts \\ []) do
true_positives = true_positives(y_true, y_pred, opts)
false_positives = false_positives(y_true, y_pred, opts)
true_positives
|> Nx.divide(true_positives + false_positives + 1.0e-16)
end
@doc ~S"""
Computes the recall of the given predictions with
respect to the given targets.
## Argument Shapes
* `y_true` - $\(d_0, d_1, ..., d_n\)$
* `y_pred` - $\(d_0, d_1, ..., d_n\)$
## Options
* `:threshold` - threshold for truth value of the predictions.
Defaults to `0.5`
## Examples
iex> Axon.Metrics.recall(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
#Nx.Tensor<
f32
0.6666666865348816
>
"""
defn recall(y_true, y_pred, opts \\ []) do
true_positives = true_positives(y_true, y_pred, opts)
false_negatives = false_negatives(y_true, y_pred, opts)
Nx.divide(true_positives, false_negatives + true_positives + 1.0e-16)
end
@doc """
Computes the number of true positive predictions with respect
to given targets.
## Options
* `:threshold` - threshold for truth value of predictions.
Defaults to `0.5`.
## Examples
iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])
iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])
iex> Axon.Metrics.true_positives(y_true, y_pred)
#Nx.Tensor<
u64
1
>
"""
defn true_positives(y_true, y_pred, opts \\ []) do
opts = keyword!(opts, threshold: 0.5)
thresholded_preds =
y_pred
|> Nx.greater(opts[:threshold])
thresholded_preds
|> Nx.equal(y_true)
|> Nx.logical_and(Nx.equal(thresholded_preds, 1))
|> Nx.sum()
end
@doc """
Computes the number of false negative predictions with respect
to given targets.
## Options
* `:threshold` - threshold for truth value of predictions.
Defaults to `0.5`.
## Examples
iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])
iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])
iex> Axon.Metrics.false_negatives(y_true, y_pred)
#Nx.Tensor<
u64
3
>
"""
defn false_negatives(y_true, y_pred, opts \\ []) do
opts = keyword!(opts, threshold: 0.5)
thresholded_preds =
y_pred
|> Nx.greater(opts[:threshold])
thresholded_preds
|> Nx.not_equal(y_true)
|> Nx.logical_and(Nx.equal(thresholded_preds, 0))
|> Nx.sum()
end
@doc """
Computes the number of true negative predictions with respect
to given targets.
## Options
* `:threshold` - threshold for truth value of predictions.
Defaults to `0.5`.
## Examples
iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])
iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])
iex> Axon.Metrics.true_negatives(y_true, y_pred)
#Nx.Tensor<
u64
1
>
"""
defn true_negatives(y_true, y_pred, opts \\ []) do
opts = keyword!(opts, threshold: 0.5)
thresholded_preds =
y_pred
|> Nx.greater(opts[:threshold])
thresholded_preds
|> Nx.equal(y_true)
|> Nx.logical_and(Nx.equal(thresholded_preds, 0))
|> Nx.sum()
end
@doc """
Computes the number of false positive predictions with respect
to given targets.
## Options
* `:threshold` - threshold for truth value of predictions.
Defaults to `0.5`.
## Examples
iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])
iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])
iex> Axon.Metrics.false_positives(y_true, y_pred)
#Nx.Tensor<
u64
2
>
"""
defn false_positives(y_true, y_pred, opts \\ []) do
opts = keyword!(opts, threshold: 0.5)
thresholded_preds =
y_pred
|> Nx.greater(opts[:threshold])
thresholded_preds
|> Nx.not_equal(y_true)
|> Nx.logical_and(Nx.equal(thresholded_preds, 1))
|> Nx.sum()
end
@doc ~S"""
Computes the sensitivity of the given predictions
with respect to the given targets.
## Argument Shapes
* `y_true` - $\(d_0, d_1, ..., d_n\)$
* `y_pred` - $\(d_0, d_1, ..., d_n\)$
## Options
* `:threshold` - threshold for truth value of the predictions.
Defaults to `0.5`
## Examples
iex> Axon.Metrics.sensitivity(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
#Nx.Tensor<
f32
0.6666666865348816
>
"""
defn sensitivity(y_true, y_pred, opts \\ []) do
opts = keyword!(opts, threshold: 0.5)
recall(y_true, y_pred, opts)
end
@doc ~S"""
Computes the specificity of the given predictions
with respect to the given targets.
## Argument Shapes
* `y_true` - $\(d_0, d_1, ..., d_n\)$
* `y_pred` - $\(d_0, d_1, ..., d_n\)$
## Options
* `:threshold` - threshold for truth value of the predictions.
Defaults to `0.5`
## Examples
iex> Axon.Metrics.specificity(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
#Nx.Tensor<
f32
0.0
>
"""
defn specificity(y_true, y_pred, opts \\ []) do
opts = keyword!(opts, threshold: 0.5)
thresholded_preds = Nx.greater(y_pred, opts[:threshold])
true_negatives =
thresholded_preds
|> Nx.equal(y_true)
|> Nx.logical_and(Nx.equal(thresholded_preds, 0))
|> Nx.sum()
false_positives =
thresholded_preds
|> Nx.not_equal(y_true)
|> Nx.logical_and(Nx.equal(thresholded_preds, 1))
|> Nx.sum()
Nx.divide(true_negatives, false_positives + true_negatives + 1.0e-16)
end
@doc ~S"""
Calculates the mean absolute error of predictions
with respect to targets.
$$l_i = \sum_i |\hat{y_i} - y_i|$$
## Argument Shapes
* `y_true` - $\(d_0, d_1, ..., d_n\)$
* `y_pred` - $\(d_0, d_1, ..., d_n\)$
## Examples
iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
iex> Axon.Metrics.mean_absolute_error(y_true, y_pred)
#Nx.Tensor<
f32
0.5
>
"""
defn mean_absolute_error(y_true, y_pred) do
y_true
|> Nx.subtract(y_pred)
|> Nx.abs()
|> Nx.mean()
end
# Combinators
@doc """
Returns a function which computes a running average given current average,
new observation, and current iteration.
## Examples
iex> cur_avg = 0.5
iex> iteration = 1
iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])
iex> y_pred = Nx.tensor([[0, 1], [1, 0], [1, 0]])
iex> avg_acc = Axon.Metrics.running_average(&Axon.Metrics.accuracy/2)
iex> avg_acc.(cur_avg, [y_true, y_pred], iteration)
#Nx.Tensor<
f32
0.75
>
"""
def running_average(metric) do
&running_average_impl(&1, apply(metric, &2), &3)
end
defnp running_average_impl(avg, obs, i) do
avg
|> Nx.multiply(i)
|> Nx.add(obs)
|> Nx.divide(Nx.add(i, 1))
end
@doc """
Returns a function which computes a running sum given current sum,
new observation, and current iteration.
## Examples
iex> cur_sum = 12
iex> iteration = 2
iex> y_true = Nx.tensor([0, 1, 0, 1])
iex> y_pred = Nx.tensor([1, 1, 0, 1])
iex> fps = Axon.Metrics.running_sum(&Axon.Metrics.false_positives/2)
iex> fps.(cur_sum, [y_true, y_pred], iteration)
#Nx.Tensor<
s64
13
>
"""
def running_sum(metric) do
&running_sum_impl(&1, apply(metric, &2), &3)
end
defnp running_sum_impl(sum, obs, _) do
Nx.add(sum, obs)
end
end
|
lib/axon/metrics.ex
| 0.937232 | 0.920504 |
metrics.ex
|
starcoder
|
defmodule Nebulex.Adapters.Replicated do
@moduledoc ~S"""
Built-in adapter for replicated cache topology.
The replicated cache excels in its ability to handle data replication,
concurrency control and failover in a cluster, all while delivering
in-memory data access speeds. A clustered replicated cache is exactly
what it says it is: a cache that replicates its data to all cluster nodes.
There are several challenges to building a reliably replicated cache. The
first is how to get it to scale and perform well. Updates to the cache have
to be sent to all cluster nodes, and all cluster nodes have to end up with
the same data, even if multiple updates to the same piece of data occur at
the same time. Also, if a cluster node requests a lock, ideally it should
not have to get all cluster nodes to agree on the lock or at least do it in
a very efficient way (`:global` is used for this), otherwise it will scale
extremely poorly; yet in the case of a cluster node failure, all of the data
and lock information must be kept safely.
The best part of a replicated cache is its access speed. Since the data is
replicated to each cluster node, it is available for use without any waiting.
This is referred to as "zero latency access," and is perfect for situations
in which an application requires the highest possible speed in its data
access.
However, there are some limitations:
* <ins>Cost Per Update</ins> - Updating a replicated cache requires pushing
the new version of the data to all other cluster members, which will limit
scalability if there is a high frequency of updates per member.
* <ins>Cost Per Entry</ins> - The data is replicated to every cluster
member, so Memory Heap space is used on each member, which will impact
performance for large caches.
> Based on **"Distributed Caching Essential Lessons"** by **<NAME>**.
When used, the Cache expects the `:otp_app` and `:adapter` as options.
The `:otp_app` should point to an OTP application that has the cache
configuration. For example:
defmodule MyApp.ReplicatedCache do
use Nebulex.Cache,
otp_app: :my_app,
adapter: Nebulex.Adapters.Replicated
end
Optionally, you can configure the desired primary storage adapter with the
option `:primary_storage_adapter`; defaults to `Nebulex.Adapters.Local`.
defmodule MyApp.ReplicatedCache do
use Nebulex.Cache,
otp_app: :my_app,
adapter: Nebulex.Adapters.Replicated,
primary_storage_adapter: Nebulex.Adapters.Local
end
The configuration for the cache must be in your application environment,
usually defined in your `config/config.exs`:
config :my_app, MyApp.ReplicatedCache,
primary: [
gc_interval: 3_600_000,
backend: :shards
]
For more information about the usage, see `Nebulex.Cache` documentation.
## Options
This adapter supports the following options and all of them can be given via
the cache configuration:
* `:primary` - The options that will be passed to the adapter associated
with the local primary storage. These options will depend on the local
adapter to use.
* `task_supervisor_opts` - Start-time options passed to
`Task.Supervisor.start_link/1` when the adapter is initialized.
* `:bootstrap_timeout` - a timeout in milliseconds that bootstrap process
will wait after the cache supervision tree is started so the data can be
imported from remote nodes. Defaults to `1000`.
## Shared options
Almost all of the cache functions outlined in `Nebulex.Cache` module
accept the following options:
* `:timeout` - The time-out value in milliseconds for the command that
will be executed. If the timeout is exceeded, then the current process
will exit. For executing a command on remote nodes, this adapter uses
`Task.await/2` internally for receiving the result, so this option tells
how much time the adapter should wait for it. If the timeout is exceeded,
the task is shut down but the current process doesn't exit, only the
result associated with that task is skipped in the reduce phase.
## Extended API
This adapter provides some additional convenience functions to the
`Nebulex.Cache` API.
Retrieving the primary storage or local cache module:
MyCache.__primary__()
Retrieving the cluster nodes associated with the given cache name:
MyCache.nodes()
MyCache.nodes(:cache_name)
"""
# Provide Cache Implementation
@behaviour Nebulex.Adapter
@behaviour Nebulex.Adapter.Queryable
# Inherit default transaction implementation
use Nebulex.Adapter.Transaction
# Inherit default persistence implementation
use Nebulex.Adapter.Persistence
import Nebulex.Helpers
alias Nebulex.Cache.{Cluster, Stats}
alias Nebulex.RPC
## Adapter
@impl true
defmacro __before_compile__(env) do
otp_app = Module.get_attribute(env.module, :otp_app)
opts = Module.get_attribute(env.module, :opts)
primary = Keyword.get(opts, :primary_storage_adapter, Nebulex.Adapters.Local)
quote do
defmodule Primary do
@moduledoc """
This is the cache for the primary storage.
"""
use Nebulex.Cache,
otp_app: unquote(otp_app),
adapter: unquote(primary)
end
@doc """
A convenience function for getting the primary storage cache.
"""
def __primary__, do: Primary
@doc """
A convenience function for getting the cluster nodes.
"""
def nodes(name \\ __MODULE__), do: Cluster.get_nodes(name)
end
end
@impl true
def init(opts) do
# required cache name
cache = Keyword.fetch!(opts, :cache)
name = opts[:name] || cache
# maybe use stats
stat_counter = opts[:stat_counter] || Stats.init(opts)
# primary cache options
primary_opts =
opts
|> Keyword.get(:primary, [])
|> Keyword.put(:stat_counter, stat_counter)
# maybe put a name to primary storage
primary_opts =
if opts[:name],
do: [name: normalize_module_name([name, Primary])] ++ primary_opts,
else: primary_opts
# task supervisor to execute parallel and/or remote commands
task_sup_name = normalize_module_name([name, TaskSupervisor])
task_sup_opts = Keyword.get(opts, :task_supervisor_opts, [])
# bootstrap timeout in milliseconds
bootstrap_timeout = Keyword.get(opts, :bootstrap_timeout, 1000)
meta = %{
name: name,
primary_name: primary_opts[:name],
task_sup: task_sup_name,
stat_counter: stat_counter,
bootstrap_timeout: bootstrap_timeout
}
child_spec =
Nebulex.Adapters.Supervisor.child_spec(
name: normalize_module_name([name, Supervisor]),
strategy: :rest_for_one,
children: [
{cache.__primary__, primary_opts},
{Nebulex.Adapters.Replicated.Bootstrap, Map.put(meta, :cache, cache)},
{Task.Supervisor, [name: task_sup_name] ++ task_sup_opts}
]
)
# join the cache to the cluster
:ok = Cluster.join(name)
{:ok, child_spec, meta}
end
@impl true
def get(meta, key, opts) do
with_dynamic_cache(meta, :get, [key, opts])
end
@impl true
def get_all(meta, keys, opts) do
with_dynamic_cache(meta, :get_all, [keys, opts])
end
@impl true
def put(adapter_meta, key, value, _ttl, :put, opts) do
:ok = with_transaction(adapter_meta, :put, [key], [key, value, opts], opts)
true
end
def put(adapter_meta, key, value, _ttl, :put_new, opts) do
with_transaction(adapter_meta, :put_new, [key], [key, value, opts], opts)
end
def put(adapter_meta, key, value, _ttl, :replace, opts) do
with_transaction(adapter_meta, :replace, [key], [key, value, opts], opts)
end
@impl true
def put_all(adapter_meta, entries, _ttl, on_write, opts) do
keys = for {k, _} <- entries, do: k
action = if on_write == :put_new, do: :put_new_all, else: :put_all
with_transaction(adapter_meta, action, keys, [entries, opts], opts) || action == :put_all
end
@impl true
def delete(adapter_meta, key, opts) do
with_transaction(adapter_meta, :delete, [key], [key, opts], opts)
end
@impl true
def take(adapter_meta, key, opts) do
with_transaction(adapter_meta, :take, [key], [key, opts], opts)
end
@impl true
def incr(adapter_meta, key, incr, _ttl, opts) do
with_transaction(adapter_meta, :incr, [key], [key, incr, opts], opts)
end
@impl true
def has_key?(meta, key) do
with_dynamic_cache(meta, :has_key?, [key])
end
@impl true
def ttl(meta, key) do
with_dynamic_cache(meta, :ttl, [key])
end
@impl true
def expire(adapter_meta, key, ttl) do
with_transaction(adapter_meta, :expire, [key], [key, ttl])
end
@impl true
def touch(adapter_meta, key) do
with_transaction(adapter_meta, :touch, [key], [key])
end
@impl true
def size(meta) do
with_dynamic_cache(meta, :size, [])
end
@impl true
def flush(cache) do
# This operation locks all later write-like operations, but not the ones
# taking place at the moment the flush is performed, this may yield to
# inconsistency issues. Come up with a better solution.
with_transaction(cache, :flush)
end
## Queryable
@impl true
def all(meta, query, opts) do
with_dynamic_cache(meta, :all, [query, opts])
end
@impl true
def stream(meta, query, opts) do
with_dynamic_cache(meta, :stream, [query, opts])
end
## Helpers
@doc """
Helper function to use dynamic cache for internal primary cache storage
when needed.
"""
def with_dynamic_cache(%{cache: cache, primary_name: nil}, action, args) do
apply(cache.__primary__, action, args)
end
def with_dynamic_cache(%{cache: cache, primary_name: primary_name}, action, args) do
cache.__primary__.with_dynamic_cache(primary_name, fn ->
apply(cache.__primary__, action, args)
end)
end
## Private Functions
defp with_transaction(
%{name: name} = adapter_meta,
action,
keys \\ [:"$global_lock"],
args \\ [],
opts \\ []
) do
# Encapsulation is being broken here since we are accessing to the internal
# table `:global_locks` managed by `:global`, but thus far, it was the
# simplest and fastest way to block all write-like operations when the
# `flush` action is performed or a new node is joined and the entries are
# being imported to it from another node. Perhaps find a better way for
# addressing these scenarios.
case :ets.lookup(:global_locks, {name, :"$global_lock"}) do
[_] ->
:ok = Process.sleep(1)
with_transaction(adapter_meta, action, keys, args, opts)
[] ->
transaction(
adapter_meta,
[keys: keys, nodes: Cluster.get_nodes(name)],
fn ->
multi_call(adapter_meta, action, args, opts)
end
)
end
end
defp multi_call(
%{name: name, task_sup: task_sup} = meta,
action,
args,
opts
) do
task_sup
|> RPC.multi_call(
Cluster.get_nodes(name),
__MODULE__,
:with_dynamic_cache,
[meta, action, args],
opts
)
|> handle_rpc_multi_call(action)
end
defp handle_rpc_multi_call({res, []}, _action), do: hd(res)
defp handle_rpc_multi_call({_, errors}, action) do
raise Nebulex.RPCMultiCallError, action: action, errors: errors
end
end
defmodule Nebulex.Adapters.Replicated.Bootstrap do
@moduledoc false
use GenServer
import Nebulex.Helpers
alias Nebulex.Adapters.Replicated
alias Nebulex.Cache.Cluster
alias Nebulex.Entry
@doc false
def start_link(%{name: name} = adapter_meta) do
GenServer.start_link(
__MODULE__,
adapter_meta,
name: normalize_module_name([name, Bootstrap])
)
end
@impl true
def init(%{bootstrap_timeout: timeout} = adapter_meta) do
timer_ref = Process.send_after(self(), :import, timeout)
{:ok, %{timer_ref: timer_ref, adapter_meta: adapter_meta}}
end
@impl true
def handle_info(:import, %{timer_ref: timer_ref, adapter_meta: adapter_meta} = state) do
_ = Process.cancel_timer(timer_ref)
:ok = import_from_nodes(adapter_meta)
{:noreply, state}
end
defp import_from_nodes(%{name: name, cache: cache} = meta) do
cluster_nodes = Cluster.get_nodes(name)
case cluster_nodes -- [node()] do
[] ->
:ok
nodes ->
Replicated.transaction(
meta,
[
keys: [:"$global_lock"],
nodes: cluster_nodes
],
fn ->
nodes
|> Enum.reduce_while([], &stream_entries(meta, &1, &2))
|> Enum.each(&cache.__primary__.put(&1.key, &1.value, ttl: Entry.ttl(&1)))
end
)
end
end
defp stream_entries(meta, node, acc) do
# FIXME: this is because coveralls does not check this as covered
# coveralls-ignore-start
stream_fun = fn ->
meta
|> Replicated.stream(:unexpired, return: :entry, page_size: 10)
|> Stream.map(& &1)
|> Enum.to_list()
end
# coveralls-ignore-stop
case :rpc.call(node, Kernel, :apply, [stream_fun, []]) do
{:badrpc, _} -> {:cont, acc}
entries -> {:halt, entries}
end
end
end
|
lib/nebulex/adapters/replicated.ex
| 0.856347 | 0.684183 |
replicated.ex
|
starcoder
|
defmodule Aecore.Chain.ChainState do
@moduledoc """
Module used for calculating the block and chain states.
The chain state is a map, telling us what amount of tokens each account has.
"""
require Logger
@doc """
Calculates the balance of each account mentioned
in the transactions a single block, returns a map with the
accounts as key and their balance as value.
"""
@spec calculate_block_state(list(), integer()) :: map()
def calculate_block_state(txs, latest_block_height) do
empty_block_state = %{}
block_state = for transaction <- txs do
updated_block_state =
cond do
transaction.data.from_acc != nil ->
update_block_state(empty_block_state, transaction.data.from_acc,
-(transaction.data.value + transaction.data.fee),
transaction.data.nonce, transaction.data.lock_time_block, false)
true ->
empty_block_state
end
add_to_locked = latest_block_height + 1 <= transaction.data.lock_time_block
update_block_state(updated_block_state, transaction.data.to_acc, transaction.data.value,
0, transaction.data.lock_time_block, add_to_locked)
end
reduce_map_list(block_state)
end
@doc """
Calculates the state of the chain with the new block added
to the current state, returns a map with the
accounts as key and their balance as value.
"""
@spec calculate_chain_state(map(), map()) :: map()
def calculate_chain_state(block_state, chain_state) do
merge_states(block_state, chain_state)
end
@doc """
Builds a merkle tree from the passed chain state and
returns the root hash of the tree.
"""
@spec calculate_chain_state_hash(map()) :: binary()
def calculate_chain_state_hash(chain_state) do
merkle_tree_data =
for {account, data} <- chain_state do
{account, :erlang.term_to_binary(data)}
end
if Enum.empty?(merkle_tree_data) do
<<0::256>>
else
merkle_tree =
merkle_tree_data
|> List.foldl(:gb_merkle_trees.empty(), fn node, merkle_tree ->
:gb_merkle_trees.enter(elem(node, 0), elem(node, 1), merkle_tree)
end)
:gb_merkle_trees.root_hash(merkle_tree)
end
end
@spec calculate_total_tokens(map()) :: integer()
def calculate_total_tokens(chain_state) do
Enum.reduce(chain_state, {0, 0, 0}, fn({_account, data}, acc) ->
{total_tokens, total_unlocked_tokens, total_locked_tokens} = acc
locked_tokens =
Enum.reduce(data.locked, 0, fn(%{amount: amount}, locked_sum) ->
locked_sum + amount
end)
new_total_tokens = total_tokens + data.balance + locked_tokens
new_total_unlocked_tokens = total_unlocked_tokens + data.balance
new_total_locked_tokens = total_locked_tokens + locked_tokens
{new_total_tokens, new_total_unlocked_tokens, new_total_locked_tokens}
end)
end
@spec validate_chain_state(map()) :: boolean()
def validate_chain_state(chain_state) do
chain_state
|> Enum.map(fn{_account, data} -> Map.get(data, :balance, 0) >= 0 end)
|> Enum.all?()
end
@spec update_chain_state_locked(map(), integer()) :: map()
def update_chain_state_locked(chain_state, new_block_height) do
Enum.reduce(chain_state, %{}, fn({account, %{balance: balance, nonce: nonce, locked: locked}}, acc) ->
{unlocked_amount, updated_locked} =
Enum.reduce(locked, {0, []}, fn(%{amount: amount, block: lock_time_block}, {amount_update_value, updated_locked}) ->
cond do
lock_time_block > new_block_height ->
{amount_update_value, updated_locked ++ [%{amount: amount, block: lock_time_block}]}
lock_time_block == new_block_height ->
{amount_update_value + amount, updated_locked}
true ->
Logger.error(fn ->
"Update chain state locked:
new block height (#{new_block_height}) greater than lock time block (#{lock_time_block})"
end)
{amount_update_value, updated_locked}
end
end)
Map.put(acc, account, %{balance: balance + unlocked_amount, nonce: nonce, locked: updated_locked})
end)
end
@spec update_block_state(map(), binary(), integer(), integer(), integer(), boolean()) :: map()
defp update_block_state(block_state, account, value, nonce, lock_time_block, add_to_locked) do
block_state_filled_empty =
cond do
!Map.has_key?(block_state, account) ->
Map.put(block_state, account, %{balance: 0, nonce: 0, locked: []})
true ->
block_state
end
new_balance = if(add_to_locked) do
block_state_filled_empty[account].balance
else
block_state_filled_empty[account].balance + value
end
new_nonce = cond do
block_state_filled_empty[account].nonce < nonce ->
nonce
true ->
block_state_filled_empty[account].nonce
end
new_locked = if(add_to_locked) do
block_state_filled_empty[account].locked ++ [%{amount: value, block: lock_time_block}]
else
block_state_filled_empty[account].locked
end
new_account_state = %{balance: new_balance,
nonce: new_nonce,
locked: new_locked}
Map.put(block_state_filled_empty, account, new_account_state)
end
@spec reduce_map_list(list()) :: map()
defp reduce_map_list(list) do
List.foldl(list, %{}, fn x, acc ->
merge_states(x, acc)
end)
end
@spec merge_states(map(), map()) :: map()
defp merge_states(new_state, destination_state) do
Map.merge(new_state, destination_state, fn _key, v1, v2 ->
new_nonce = cond do
v1.nonce > v2.nonce ->
v1.nonce
v2.nonce > v1.nonce ->
v2.nonce
true ->
v1.nonce
end
%{balance: v1.balance + v2.balance,
nonce: new_nonce,
locked: v1.locked ++ v2.locked}
end)
end
end
|
apps/aecore/lib/aecore/chain/chain_state.ex
| 0.775435 | 0.618608 |
chain_state.ex
|
starcoder
|
use Croma
defmodule Antikythera.Xml do
el = inspect(__MODULE__.Element)
@moduledoc """
Convenient XML parser module wrapping [fast_xml](https://github.com/processone/fast_xml).
`decode/2` can parse XML into `#{el}.t`, and `encode/2` can serialize `#{el}.t` back to XML string.
`#{el}.t` is XML element data structure, and it is JSON-convertible struct.
You can safely convert them to JSON using `Poison.encode/2` while keeping order of appearance of children,
and also convert them back to `#{el}.t` with `Poison.decode/2` and `#{el}.new/1`.
Note that order of attributes will not be preserved, since it is not significant.
See [here](https://www.w3.org/TR/xml/#sec-starttags)
Namespace of tags (e.g. "ns" in `<ns:tag>`) are kept as is in `:name` of elements.
Namespace definitions (e.g. `xmlns:ns='http://example.com/ns'`) are treated as plain attributes,
and kept as is in `:attributes` of elements.
## `Access` behaviour
`#{el}` implements `Access` behaviour for convenient lookups and updates.
Following access patterns are available:
- `element[:name]`, `element[:attributes]`, `element[:children]`
- Fetch values of fields in dynamic lookup style.
- `element["@some_attr"]`
- Fetch value of "some_attr" in `:attributes` map.
- `element[:texts]`
- Fetch text (character data) children. It always returns list.
- `element["some_name"]`
- Fetch child elements with `name: "some_name"`. It always returns list.
You can also use these patterns in `Kernel.get_in/2` and its variants.
iex> xml = "<a>foo<b>bar</b>baz</a>"
iex> element = #{inspect(__MODULE__)}.decode!(xml)
%#{el}{name: "a", attributes: %{}, children: [
"foo",
%#{el}{name: "b", attributes: %{}, children: ["bar"]},
"baz",
]}
iex> get_in(element, [:texts])
["foo", "baz"]
iex> get_in(element, ["b", Access.at(0), :texts])
["bar"]
iex> get_and_update_in(element, [:children, Access.at(0)], fn _ -> :pop end)
{"foo",
%#{el}{name: "a", attributes: %{}, children: [
%#{el}{name: "b", attributes: %{}, children: ["bar"]},
"baz",
]}}
iex> update_in(element, [:children, Access.all()], fn
...> text when is_binary(text) -> %#{el}{name: "b", attributes: %{}, children: [text]}
...> e -> e
...> end)
%#{el}{name: "a", attributes: %{}, children: [
%#{el}{name: "b", attributes: %{}, children: ["foo"]},
%#{el}{name: "b", attributes: %{}, children: ["bar"]},
%#{el}{name: "b", attributes: %{}, children: ["baz"]},
]}
iex> update_in(element, ["@id"], fn _ -> "001" end)
%#{el}{name: "a", attributes: %{"id" => "001"}, children: [
"foo",
%#{el}{name: "b", attributes: %{}, children: ["bar"]},
"baz",
]}
Notes on updating with `Kernel.get_and_update_in/3` and its variants:
- Struct fields are static and cannot be popped.
- Custom access keys except "@some_attr" cannot be used in updating.
Use `:children` instead, in order to update children while preserving order of appearance.
"""
alias Croma.Result, as: R
defmodule Content do
alias Antikythera.Xml.Element
@type t :: String.t | Element.t
defun valid?(v :: term) :: boolean do
is_binary(v) or Element.valid?(v)
end
defun new(v :: term) :: R.t(t) do
s when is_binary(s) -> {:ok, s}
m when is_map(m) -> Element.new(m)
_ -> {:error, {:invalid_value, [__MODULE__]}}
end
end
defmodule Element do
use Croma.Struct, recursive_new?: true, fields: [
name: Croma.String,
attributes: Croma.Map,
children: Croma.TypeGen.list_of(Content),
]
@behaviour Access
# Access behaviour implementations
@impl true
def fetch(%__MODULE__{name: n} , :name ) , do: {:ok, n}
def fetch(%__MODULE__{attributes: a}, :attributes ) , do: {:ok, a}
def fetch(%__MODULE__{children: c} , :children ) , do: {:ok, c}
def fetch(%__MODULE__{attributes: a}, "@" <> attribute) , do: Map.fetch(a, attribute)
def fetch(%__MODULE__{children: c} , :texts ) , do: {:ok, Enum.filter(c, &is_binary/1)}
def fetch(%__MODULE__{children: c} , key ) when is_binary(key), do: {:ok, Enum.filter(c, &has_name?(&1, key))}
def fetch(%__MODULE__{} , _ ) , do: :error
defp has_name?(%__MODULE__{name: n}, n), do: true
defp has_name?(_, _), do: false
@impl true
def get_and_update(%__MODULE__{} = e, key, f) when key in [:name, :attributes, :children] do
case e |> Map.fetch!(key) |> f.() do
{get_value, new_value} -> {get_value, update_struct_field(e, key, new_value)}
:pop -> raise "Cannot pop struct field!"
end
end
def get_and_update(%__MODULE__{attributes: as} = e, "@" <> attribute, f) do
current_value = Map.get(as, attribute)
case f.(current_value) do
{get_value, new_attr} when is_binary(new_attr) -> {get_value , %__MODULE__{e | attributes: Map.put(as, attribute, new_attr)}}
:pop -> {current_value, %__MODULE__{e | attributes: Map.delete(as, attribute)}}
end
end
def get_and_update(_e, key, _f) do
raise ~s[#{inspect(__MODULE__)}.get_and_update/3 only accepts :name, :attributes, :children or "@attribute" as key for updating, got: #{inspect(key)}]
end
defp update_struct_field(%__MODULE__{} = e, :name , new_name ) when is_binary(new_name) , do: %__MODULE__{e | name: new_name}
defp update_struct_field(%__MODULE__{} = e, :attributes, new_attrs ) when is_map(new_attrs) , do: %__MODULE__{e | attributes: new_attrs}
defp update_struct_field(%__MODULE__{} = e, :children , new_children) when is_list(new_children), do: %__MODULE__{e | children: new_children}
@impl true
def pop(element, key) do
get_and_update(element, key, fn _ -> :pop end)
end
end
@type decode_option :: {:trim, boolean}
@doc """
Reads an XML string and parses it into `#{el}.t`.
Comments and header will be discarded.
It can read XHTML document as long as they are well-formatted,
though it does not understand Document Type Definition (DTD, header line with "<!DOCTYPE html PUBLIC ..."),
so you must remove them.
It tries to read a document with UTF-8 encoding, regardless of "encoding" attribute in the header.
Options:
- `:trim` - Drop whitespace-only texts. Default `false`.
- There are no universal way to distinguish significant and insignificant whitespaces,
so this option may alter the meaning of original document. Use with caution.
- In [W3C recommendation](https://www.w3.org/TR/REC-xml/#sec-white-space),
it is stated that whitespace texts (character data) are basically significant and must be preserved.
"""
defun decode(xml_string :: v[String.t], opts :: v[[decode_option]] \\ []) :: R.t(Element.t) do
case :fxml_stream.parse_element(xml_string) do
{:error, _} = e -> e
record -> from_record(record, Keyword.get(opts, :trim, false)) |> R.wrap_if_valid(Element)
end
end
defunp from_record({:xmlel, name, attrs, children} :: :fxml.xmlel, trim :: v[boolean]) :: Element.t do
%Element{
name: name,
attributes: Map.new(attrs),
children: children(children, trim, []),
}
end
defp children([] , _ , acc), do: Enum.reverse(acc)
defp children([{:xmlcdata, text} | tail] , true, acc), do: children(tail, true , cons_trimmed(text, acc))
defp children([{:xmlcdata, text} | tail] , _ , acc), do: children(tail, false, [text | acc])
defp children([{:xmlel, _, _, _} = el | tail], trim, acc), do: children(tail, trim , [from_record(el, trim) | acc])
defp cons_trimmed(text, acc) do
case String.trim(text) do
"" -> acc # Nothing other than whitespaces; must be indents
_ -> [text | acc] # Otherwise, keep leading/trailing whitespaces since they may have meanings
end
end
@xml_header ~S(<?xml version='1.0' encoding='UTF-8'?>)
@type encode_option :: {:pretty | :with_header, boolean}
@doc """
Serializes `#{el}.t` into XML string.
Specifications:
- Trailing newline will not be generated.
- All single- and double-quotations in attribute values or entity values are escaped to
`'` and `"` respectively.
- All attribute values are SINGLE-quoted.
- Does not insert a whitespace before "/>" in element without children.
Options:
- `:pretty` - Pretty print with 2-space indents. Default `false`.
- Similar to `:trim` option in `decode/2`, inserted whitespaces may be significant,
thus it can alter meaning of original document. Use with caution.
- It does not insert whitespaces to elements with [mixed-content](https://www.w3.org/TR/REC-xml/#sec-mixed-content)
and their descendants, in order to reduce probability to alter the meaning of original document.
- `:with_header` - Prepend `#{@xml_header}\\n`. Default `false`.
"""
defun encode(xml_element :: v[Element.t], opts :: v[[encode_option]] \\ []) :: String.t do
body = xml_element |> to_record(Keyword.get(opts, :pretty, false), 0) |> :fxml.element_to_binary()
case opts[:with_header] do
true -> "#{@xml_header}\n" <> body
_ -> body
end
end
defunp to_record(content :: Content.t, pretty? :: boolean, level :: non_neg_integer) :: :fxml.xmlel do
(%Element{name: n, attributes: a, children: c}, true, level) -> {:xmlel, n, Map.to_list(a), prettified_children(c, level)}
(%Element{name: n, attributes: a, children: c}, _ , _ ) -> {:xmlel, n, Map.to_list(a), Enum.map(c, &to_record(&1, false, 0))}
(text, _, _) when is_binary(text) -> {:xmlcdata, text}
end
defp prettified_children([] , _level), do: []
defp prettified_children([text] , _level) when is_binary(text), do: [{:xmlcdata, text}] # If there is only a single text child, directly produce non-prettified record
defp prettified_children(children, level), do: map_to_record_and_interleave_whitespaces(children, level)
@indent_unit " "
defp map_to_record_and_interleave_whitespaces(children, level) do
{children, mixed?} = map_to_record(children, level)
interleave_whitespaces(children, level, mixed?)
end
defp map_to_record(children, level) do
Enum.map_reduce(children, false, fn
(text, _mixed?) when is_binary(text) -> {{:xmlcdata, text} , true }
(%Element{} = e, mixed?) -> {to_record(e, !mixed?, level + 1), mixed?}
end)
end
defp interleave_whitespaces(children, _level, true ), do: children
defp interleave_whitespaces(children, level, false) do
child_indent = {:xmlcdata, "\n" <> String.duplicate(@indent_unit, level + 1)}
close_tag_indent = {:xmlcdata, "\n" <> String.duplicate(@indent_unit, level)}
Enum.flat_map(children, &[child_indent, &1]) ++ [close_tag_indent]
end
R.define_bang_version_of([decode: 1, decode: 2])
end
|
lib/type/xml.ex
| 0.844985 | 0.409162 |
xml.ex
|
starcoder
|
defmodule Numerix.Correlation do
@moduledoc """
Statistical correlation functions between two vectors.
"""
use Numerix.Tensor
import Numerix.LinearAlgebra
alias Numerix.{Common, Statistics}
@doc """
Calculates the Pearson correlation coefficient between two vectors.
"""
@spec pearson(Common.vector(), Common.vector()) :: Common.maybe_float()
def pearson(%Tensor{items: []}, _), do: nil
def pearson(_, %Tensor{items: []}), do: nil
def pearson(%Tensor{items: x}, %Tensor{items: y}) when length(x) != length(y), do: nil
def pearson(x = %Tensor{}, y = %Tensor{}) do
sum1 = sum(x)
sum2 = sum(y)
sum_of_squares1 = sum(pow(x, 2))
sum_of_squares2 = sum(pow(y, 2))
sum_of_products = dot(x, y)
size = Enum.count(x.items)
num = sum_of_products - sum1 * sum2 / size
density =
:math.sqrt(
(sum_of_squares1 - :math.pow(sum1, 2) / size) *
(sum_of_squares2 - :math.pow(sum2, 2) / size)
)
case density do
0.0 -> 0.0
_ -> num / density
end
end
def pearson(vector1, vector2) do
x = Tensor.new(vector1)
y = Tensor.new(vector2)
pearson(x, y)
end
@doc """
Calculates the weighted Pearson correlation coefficient between two vectors.
"""
@spec pearson(Common.vector(), Common.vector(), Common.vector()) :: Common.maybe_float()
def pearson(%Tensor{items: []}, _, _), do: nil
def pearson(_, %Tensor{items: []}, _), do: nil
def pearson(_, _, %Tensor{items: []}), do: nil
def pearson([], _, _), do: nil
def pearson(_, [], _), do: nil
def pearson(_, _, []), do: nil
def pearson(vector1, vector2, weights) do
weighted_covariance_xy = Statistics.weighted_covariance(vector1, vector2, weights)
weighted_covariance_xx = Statistics.weighted_covariance(vector1, vector1, weights)
weighted_covariance_yy = Statistics.weighted_covariance(vector2, vector2, weights)
weighted_covariance_xy / :math.sqrt(weighted_covariance_xx * weighted_covariance_yy)
end
end
|
lib/correlation.ex
| 0.886282 | 0.743378 |
correlation.ex
|
starcoder
|
defmodule Datix.NaiveDateTime do
@moduledoc """
A `NaiveDateTime` parser using `Calendar.strftime` format-string.
"""
@doc """
Parses a datetime string according to the given `format`.
See the `Calendar.strftime` documentation for how to specify a format-string.
The `:ok` tuple contains always an UTC datetime and a tuple with the time zone
infos.
## Options
* `:calendar` - the calendar to build the `Date`, defaults to `Calendar.ISO`
* `:preferred_date` - a string for the preferred format to show dates,
it can't contain the `%x` format and defaults to `"%Y-%m-%d"`
if the option is not received
* `:month_names` - a list of the month names, if the option is not received
it defaults to a list of month names in English
* `:abbreviated_month_names` - a list of abbreviated month names, if the
option is not received it defaults to a list of abbreviated month names in
English
* `:day_of_week_names` - a list of day names, if the option is not received
it defaults to a list of day names in English
* `:abbreviated_day_of_week_names` - a list of abbreviated day names, if the
option is not received it defaults to a list of abbreviated day names in
English
* `:preferred_time` - a string for the preferred format to show times,
it can't contain the `%X` format and defaults to `"%H:%M:%S"`
if the option is not received
* `:am_pm_names` - a keyword list with the names of the period of the day,
defaults to `[am: "am", pm: "pm"]`.
Time zone infos will be ignored.
## Examples
```elixir
iex> Datix.NaiveDateTime.parse("2021/01/10 12:14:24", "%Y/%m/%d %H:%M:%S")
{:ok, ~N[2021-01-10 12:14:24]}
iex> Datix.NaiveDateTime.parse("2018/06/27 11:23:55 CEST+0200", "%Y/%m/%d %H:%M:%S %Z%z")
{:ok, ~N[2018-06-27 11:23:55Z]}
```
"""
@spec parse(String.t(), String.t(), list()) ::
{:ok, NaiveDateTime.t()}
| {:error, :invalid_date}
| {:error, :invalid_input}
| {:error, {:parse_error, expected: String.t(), got: String.t()}}
| {:error, {:conflict, [expected: term(), got: term(), modifier: String.t()]}}
| {:error, {:invalid_string, [modifier: String.t()]}}
| {:error, {:invalid_integer, [modifier: String.t()]}}
| {:error, {:invalid_modifier, [modifier: String.t()]}}
def parse(naive_datetime_str, format_str, opts \\ []) do
with {:ok, data} <- Datix.strptime(naive_datetime_str, format_str, opts) do
new(data, opts)
end
end
@doc """
Parses a datetime string according to the given `format`, erroring out for
invalid arguments.
"""
@spec parse!(String.t(), String.t(), list()) :: NaiveDateTime.t()
def parse!(naive_datetime_str, format_str, opts \\ []) do
naive_datetime_str
|> Datix.strptime!(format_str, opts)
|> new(opts)
|> case do
{:ok, date} ->
date
{:error, reason} ->
raise ArgumentError, "cannot build naive-date-time, reason: #{inspect(reason)}"
end
end
@doc false
def new(data, opts) do
with {:ok, date} <- Datix.Date.new(data, opts),
{:ok, time} <- Datix.Time.new(data, opts) do
NaiveDateTime.new(date, time)
end
end
end
|
lib/datix/naive_date_time.ex
| 0.926835 | 0.926037 |
naive_date_time.ex
|
starcoder
|
defmodule Bs.Game do
alias Bs.Game.PubSub
alias Bs.Game.Registry
alias Bs.Game.Supervisor
alias Bs.Game.Server
use GenServer
import GenServer, only: [call: 2]
defdelegate(handle_call(request, from, state), to: Server)
defdelegate(handle_cast(request, state), to: Server)
defdelegate(handle_info(request, state), to: Server)
defdelegate(init(args), to: Server)
defdelegate(subscribe(name), to: PubSub)
@moduledoc """
The Game is a GenServer that handles running a single Bs match.
"""
def start_link(args, opts \\ [])
def start_link(args, opts) do
GenServer.start_link(__MODULE__, args, opts)
end
def get_game_state(id) when is_binary(id) do
id |> do_ensure_started |> call(:get_game_state)
end
def next(id) when is_binary(id) do
id |> do_ensure_started |> call(:next)
end
def pause(id) when is_binary(id) do
id |> dispatch(&call(&1, :pause))
end
def prev(id) when is_binary(id) do
id |> dispatch(&call(&1, :prev))
end
def stop(id, reason \\ :normal)
def stop(id, reason) when is_binary(id) do
id |> dispatch(&GenServer.stop(&1, reason))
end
def restart(id) when is_binary(id) do
case ensure_started(id) do
{:ok, pid, :already_started} ->
ref = Process.monitor(pid)
GenServer.stop(pid)
receive do
{:DOWN, ^ref, _, ^pid, :normal} -> :ok
end
ensure_started(id)
{:ok, _pid, :started} ->
:ok
end
end
def resume(id) when is_binary(id) do
id |> do_ensure_started |> call(:resume)
end
def alive?(id) do
case Registry.lookup(id) do
[_] ->
true
_ ->
false
end
end
def find!(name) do
case lookup_or_create(name) do
{:ok, pid} when is_pid(pid) ->
pid
{:error, {:already_started, pid}} when is_pid(pid) ->
pid
{:error, err} ->
{:error, err}
end
end
def lookup_or_create(id) when is_binary(id) do
case Registry.lookup(id) do
[{pid, _}] ->
{:ok, pid}
_ ->
start(id)
end
end
def ensure_started(id) do
with [] <- Registry.lookup(id),
{:ok, pid} <- start(id) do
{:ok, pid, :started}
else
[{pid, _}] ->
{:ok, pid, :already_started}
{:error, {:already_started, pid}} ->
{:ok, pid, :already_started}
end
end
def start(id) when is_binary(id) do
Elixir.Supervisor.start_child(Supervisor, [
id,
[name: {:via, Elixir.Registry, {Registry, id}}]
])
end
defp do_ensure_started(id) do
{:ok, pid, _} = ensure_started(id)
pid
end
defp dispatch(id, fun) when is_binary(id and is_function(fun)) do
Registry.dispatch(id, fn [{pid, _}] -> apply(fun, [pid]) end)
end
end
|
lib/bs/game.ex
| 0.51562 | 0.429669 |
game.ex
|
starcoder
|
defmodule AWS.EKS do
@moduledoc """
Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes
it easy for you to run Kubernetes on AWS without needing to stand up or maintain
your own Kubernetes control plane.
Kubernetes is an open-source system for automating the deployment, scaling, and
management of containerized applications.
Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so
you can use all the existing plugins and tooling from the Kubernetes community.
Applications running on Amazon EKS are fully compatible with applications
running on any standard Kubernetes environment, whether running in on-premises
data centers or public clouds. This means that you can easily migrate any
standard Kubernetes application to Amazon EKS without any code modification
required.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2017-11-01",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "eks",
global?: false,
protocol: "rest-json",
service_id: "EKS",
signature_version: "v4",
signing_name: "eks",
target_prefix: nil
}
end
@doc """
Associate encryption configuration to an existing cluster.
You can use this API to enable encryption on existing clusters which do not have
encryption already enabled. This allows you to implement a defense-in-depth
security strategy without migrating applications to new EKS clusters.
"""
def associate_encryption_config(%Client{} = client, cluster_name, input, options \\ []) do
url_path = "/clusters/#{URI.encode(cluster_name)}/encryption-config/associate"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Associate an identity provider configuration to a cluster.
If you want to authenticate identities using an identity provider, you can
create an identity provider configuration and associate it to your cluster.
After configuring authentication to your cluster you can create Kubernetes
`roles` and `clusterroles` to assign permissions to the roles, and then bind the
roles to the identities using Kubernetes `rolebindings` and
`clusterrolebindings`. For more information see [Using RBAC Authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) in
the Kubernetes documentation.
"""
def associate_identity_provider_config(%Client{} = client, cluster_name, input, options \\ []) do
url_path = "/clusters/#{URI.encode(cluster_name)}/identity-provider-configs/associate"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates an Amazon EKS add-on.
Amazon EKS add-ons help to automate the provisioning and lifecycle management of
common operational software for Amazon EKS clusters. Amazon EKS add-ons can only
be used with Amazon EKS clusters running version 1.18 with platform version
`eks.3` or later because add-ons rely on the Server-side Apply Kubernetes
feature, which is only available in Kubernetes 1.18 and later.
"""
def create_addon(%Client{} = client, cluster_name, input, options \\ []) do
url_path = "/clusters/#{URI.encode(cluster_name)}/addons"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates an Amazon EKS control plane.
The Amazon EKS control plane consists of control plane instances that run the
Kubernetes software, such as `etcd` and the API server. The control plane runs
in an account managed by AWS, and the Kubernetes API is exposed via the Amazon
EKS API server endpoint. Each Amazon EKS cluster control plane is single-tenant
and unique and runs on its own set of Amazon EC2 instances.
The cluster control plane is provisioned across multiple Availability Zones and
fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also
provisions elastic network interfaces in your VPC subnets to provide
connectivity from the control plane instances to the nodes (for example, to
support `kubectl exec`, `logs`, and `proxy` data flows).
Amazon EKS nodes run in your AWS account and connect to your cluster's control
plane via the Kubernetes API server endpoint and a certificate file that is
created for your cluster.
Cluster creation typically takes several minutes. After you create an Amazon EKS
cluster, you must configure your Kubernetes tooling to communicate with the API
server and launch nodes into your cluster. For more information, see [Managing Cluster
Authentication](https://docs.aws.amazon.com/eks/latest/userguide/managing-auth.html)
and [Launching Amazon EKS nodes](https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html) in
the *Amazon EKS User Guide*.
"""
def create_cluster(%Client{} = client, input, options \\ []) do
url_path = "/clusters"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates an AWS Fargate profile for your Amazon EKS cluster.
You must have at least one Fargate profile in a cluster to be able to run pods
on Fargate.
The Fargate profile allows an administrator to declare which pods run on Fargate
and specify which pods run on which Fargate profile. This declaration is done
through the profile’s selectors. Each profile can have up to five selectors that
contain a namespace and labels. A namespace is required for every selector. The
label field consists of multiple optional key-value pairs. Pods that match the
selectors are scheduled on Fargate. If a to-be-scheduled pod matches any of the
selectors in the Fargate profile, then that pod is run on Fargate.
When you create a Fargate profile, you must specify a pod execution role to use
with the pods that are scheduled with the profile. This role is added to the
cluster's Kubernetes [Role Based Access Control](https://kubernetes.io/docs/admin/authorization/rbac/) (RBAC) for
authorization so that the `kubelet` that is running on the Fargate
infrastructure can register with your Amazon EKS cluster so that it can appear
in your cluster as a node. The pod execution role also provides IAM permissions
to the Fargate infrastructure to allow read access to Amazon ECR image
repositories. For more information, see [Pod Execution Role](https://docs.aws.amazon.com/eks/latest/userguide/pod-execution-role.html)
in the *Amazon EKS User Guide*.
Fargate profiles are immutable. However, you can create a new updated profile to
replace an existing profile and then delete the original after the updated
profile has finished creating.
If any Fargate profiles in a cluster are in the `DELETING` status, you must wait
for that Fargate profile to finish deleting before you can create any other
profiles in that cluster.
For more information, see [AWS Fargate Profile](https://docs.aws.amazon.com/eks/latest/userguide/fargate-profile.html)
in the *Amazon EKS User Guide*.
"""
def create_fargate_profile(%Client{} = client, cluster_name, input, options \\ []) do
url_path = "/clusters/#{URI.encode(cluster_name)}/fargate-profiles"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates a managed node group for an Amazon EKS cluster.
You can only create a node group for your cluster that is equal to the current
Kubernetes version for the cluster. All node groups are created with the latest
AMI release version for the respective minor Kubernetes version of the cluster,
unless you deploy a custom AMI using a launch template. For more information
about using launch templates, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html).
An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and
associated Amazon EC2 instances that are managed by AWS for an Amazon EKS
cluster. Each node group uses a version of the Amazon EKS optimized Amazon Linux
2 AMI. For more information, see [Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html)
in the *Amazon EKS User Guide*.
"""
def create_nodegroup(%Client{} = client, cluster_name, input, options \\ []) do
url_path = "/clusters/#{URI.encode(cluster_name)}/node-groups"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Delete an Amazon EKS add-on.
When you remove the add-on, it will also be deleted from the cluster. You can
always manually start an add-on on the cluster using the Kubernetes API.
"""
def delete_addon(%Client{} = client, addon_name, cluster_name, input, options \\ []) do
url_path = "/clusters/#{URI.encode(cluster_name)}/addons/#{URI.encode(addon_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes the Amazon EKS cluster control plane.
If you have active services in your cluster that are associated with a load
balancer, you must delete those services before deleting the cluster so that the
load balancers are deleted properly. Otherwise, you can have orphaned resources
in your VPC that prevent you from being able to delete the VPC. For more
information, see [Deleting a Cluster](https://docs.aws.amazon.com/eks/latest/userguide/delete-cluster.html)
in the *Amazon EKS User Guide*.
If you have managed node groups or Fargate profiles attached to the cluster, you
must delete them first. For more information, see `DeleteNodegroup` and
`DeleteFargateProfile`.
"""
def delete_cluster(%Client{} = client, name, input, options \\ []) do
url_path = "/clusters/#{URI.encode(name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes an AWS Fargate profile.
When you delete a Fargate profile, any pods running on Fargate that were created
with the profile are deleted. If those pods match another Fargate profile, then
they are scheduled on Fargate with that profile. If they no longer match any
Fargate profiles, then they are not scheduled on Fargate and they may remain in
a pending state.
Only one Fargate profile in a cluster can be in the `DELETING` status at a time.
You must wait for a Fargate profile to finish deleting before you can delete any
other profiles in that cluster.
"""
def delete_fargate_profile(
%Client{} = client,
cluster_name,
fargate_profile_name,
input,
options \\ []
) do
url_path =
"/clusters/#{URI.encode(cluster_name)}/fargate-profiles/#{URI.encode(fargate_profile_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes an Amazon EKS node group for a cluster.
"""
def delete_nodegroup(%Client{} = client, cluster_name, nodegroup_name, input, options \\ []) do
url_path = "/clusters/#{URI.encode(cluster_name)}/node-groups/#{URI.encode(nodegroup_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Describes an Amazon EKS add-on.
"""
def describe_addon(%Client{} = client, addon_name, cluster_name, options \\ []) do
url_path = "/clusters/#{URI.encode(cluster_name)}/addons/#{URI.encode(addon_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Describes the Kubernetes versions that the add-on can be used with.
"""
def describe_addon_versions(
%Client{} = client,
addon_name \\ nil,
kubernetes_version \\ nil,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/addons/supported-versions"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(kubernetes_version) do
[{"kubernetesVersion", kubernetes_version} | query_params]
else
query_params
end
query_params =
if !is_nil(addon_name) do
[{"addonName", addon_name} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns descriptive information about an Amazon EKS cluster.
The API server endpoint and certificate authority data returned by this
operation are required for `kubelet` and `kubectl` to communicate with your
Kubernetes API server. For more information, see [Create a kubeconfig for Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html).
The API server endpoint and certificate authority data aren't available until
the cluster reaches the `ACTIVE` state.
"""
def describe_cluster(%Client{} = client, name, options \\ []) do
url_path = "/clusters/#{URI.encode(name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns descriptive information about an AWS Fargate profile.
"""
def describe_fargate_profile(
%Client{} = client,
cluster_name,
fargate_profile_name,
options \\ []
) do
url_path =
"/clusters/#{URI.encode(cluster_name)}/fargate-profiles/#{URI.encode(fargate_profile_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns descriptive information about an identity provider configuration.
"""
def describe_identity_provider_config(%Client{} = client, cluster_name, input, options \\ []) do
url_path = "/clusters/#{URI.encode(cluster_name)}/identity-provider-configs/describe"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns descriptive information about an Amazon EKS node group.
"""
def describe_nodegroup(%Client{} = client, cluster_name, nodegroup_name, options \\ []) do
url_path = "/clusters/#{URI.encode(cluster_name)}/node-groups/#{URI.encode(nodegroup_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns descriptive information about an update against your Amazon EKS cluster
or associated managed node group.
When the status of the update is `Succeeded`, the update is complete. If an
update fails, the status is `Failed`, and an error detail explains the reason
for the failure.
"""
def describe_update(
%Client{} = client,
name,
update_id,
addon_name \\ nil,
nodegroup_name \\ nil,
options \\ []
) do
url_path = "/clusters/#{URI.encode(name)}/updates/#{URI.encode(update_id)}"
headers = []
query_params = []
query_params =
if !is_nil(nodegroup_name) do
[{"nodegroupName", nodegroup_name} | query_params]
else
query_params
end
query_params =
if !is_nil(addon_name) do
[{"addonName", addon_name} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Disassociates an identity provider configuration from a cluster.
If you disassociate an identity provider from your cluster, users included in
the provider can no longer access the cluster. However, you can still access the
cluster with AWS IAM users.
"""
def disassociate_identity_provider_config(
%Client{} = client,
cluster_name,
input,
options \\ []
) do
url_path = "/clusters/#{URI.encode(cluster_name)}/identity-provider-configs/disassociate"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Lists the available add-ons.
"""
def list_addons(
%Client{} = client,
cluster_name,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/clusters/#{URI.encode(cluster_name)}/addons"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists the Amazon EKS clusters in your AWS account in the specified Region.
"""
def list_clusters(%Client{} = client, max_results \\ nil, next_token \\ nil, options \\ []) do
url_path = "/clusters"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists the AWS Fargate profiles associated with the specified cluster in your AWS
account in the specified Region.
"""
def list_fargate_profiles(
%Client{} = client,
cluster_name,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/clusters/#{URI.encode(cluster_name)}/fargate-profiles"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
A list of identity provider configurations.
"""
def list_identity_provider_configs(
%Client{} = client,
cluster_name,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/clusters/#{URI.encode(cluster_name)}/identity-provider-configs"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists the Amazon EKS managed node groups associated with the specified cluster
in your AWS account in the specified Region.
Self-managed node groups are not listed.
"""
def list_nodegroups(
%Client{} = client,
cluster_name,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/clusters/#{URI.encode(cluster_name)}/node-groups"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
List the tags for an Amazon EKS resource.
"""
def list_tags_for_resource(%Client{} = client, resource_arn, options \\ []) do
url_path = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists the updates associated with an Amazon EKS cluster or managed node group in
your AWS account, in the specified Region.
"""
def list_updates(
%Client{} = client,
name,
addon_name \\ nil,
max_results \\ nil,
next_token \\ nil,
nodegroup_name \\ nil,
options \\ []
) do
url_path = "/clusters/#{URI.encode(name)}/updates"
headers = []
query_params = []
query_params =
if !is_nil(nodegroup_name) do
[{"nodegroupName", nodegroup_name} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(addon_name) do
[{"addonName", addon_name} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Associates the specified tags to a resource with the specified `resourceArn`.
If existing tags on a resource are not specified in the request parameters, they
are not changed. When a resource is deleted, the tags associated with that
resource are deleted as well. Tags that you create for Amazon EKS resources do
not propagate to any other resources associated with the cluster. For example,
if you tag a cluster with this operation, that tag does not automatically
propagate to the subnets and nodes associated with the cluster.
"""
def tag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes specified tags from a resource.
"""
def untag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{URI.encode(resource_arn)}"
headers = []
{query_params, input} =
[
{"tagKeys", "tagKeys"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates an Amazon EKS add-on.
"""
def update_addon(%Client{} = client, addon_name, cluster_name, input, options \\ []) do
url_path = "/clusters/#{URI.encode(cluster_name)}/addons/#{URI.encode(addon_name)}/update"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates an Amazon EKS cluster configuration.
Your cluster continues to function during the update. The response output
includes an update ID that you can use to track the status of your cluster
update with the `DescribeUpdate` API operation.
You can use this API operation to enable or disable exporting the Kubernetes
control plane logs for your cluster to CloudWatch Logs. By default, cluster
control plane logs aren't exported to CloudWatch Logs. For more information, see
[Amazon EKS Cluster Control Plane Logs](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html)
in the * *Amazon EKS User Guide* *.
CloudWatch Logs ingestion, archive storage, and data scanning rates apply to
exported control plane logs. For more information, see [Amazon CloudWatch Pricing](http://aws.amazon.com/cloudwatch/pricing/).
You can also use this API operation to enable or disable public and private
access to your cluster's Kubernetes API server endpoint. By default, public
access is enabled, and private access is disabled. For more information, see
[Amazon EKS Cluster Endpoint Access Control](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html)
in the * *Amazon EKS User Guide* *.
At this time, you can not update the subnets or security group IDs for an
existing cluster.
Cluster updates are asynchronous, and they should finish within a few minutes.
During an update, the cluster status moves to `UPDATING` (this status transition
is eventually consistent). When the update is complete (either `Failed` or
`Successful`), the cluster status moves to `Active`.
"""
def update_cluster_config(%Client{} = client, name, input, options \\ []) do
url_path = "/clusters/#{URI.encode(name)}/update-config"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates an Amazon EKS cluster to the specified Kubernetes version.
Your cluster continues to function during the update. The response output
includes an update ID that you can use to track the status of your cluster
update with the `DescribeUpdate` API operation.
Cluster updates are asynchronous, and they should finish within a few minutes.
During an update, the cluster status moves to `UPDATING` (this status transition
is eventually consistent). When the update is complete (either `Failed` or
`Successful`), the cluster status moves to `Active`.
If your cluster has managed node groups attached to it, all of your node groups’
Kubernetes versions must match the cluster’s Kubernetes version in order to
update the cluster to a new Kubernetes version.
"""
def update_cluster_version(%Client{} = client, name, input, options \\ []) do
url_path = "/clusters/#{URI.encode(name)}/updates"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates an Amazon EKS managed node group configuration.
Your node group continues to function during the update. The response output
includes an update ID that you can use to track the status of your node group
update with the `DescribeUpdate` API operation. Currently you can update the
Kubernetes labels for a node group or the scaling configuration.
"""
def update_nodegroup_config(
%Client{} = client,
cluster_name,
nodegroup_name,
input,
options \\ []
) do
url_path =
"/clusters/#{URI.encode(cluster_name)}/node-groups/#{URI.encode(nodegroup_name)}/update-config"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates the Kubernetes version or AMI version of an Amazon EKS managed node
group.
You can update a node group using a launch template only if the node group was
originally deployed with a launch template. If you need to update a custom AMI
in a node group that was deployed with a launch template, then update your
custom AMI, specify the new ID in a new version of the launch template, and then
update the node group to the new version of the launch template.
If you update without a launch template, then you can update to the latest
available AMI version of a node group's current Kubernetes version by not
specifying a Kubernetes version in the request. You can update to the latest AMI
version of your cluster's current Kubernetes version by specifying your
cluster's Kubernetes version in the request. For more information, see [Amazon EKS optimized Amazon Linux 2 AMI
versions](https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html)
in the *Amazon EKS User Guide*.
You cannot roll back a node group to an earlier Kubernetes version or AMI
version.
When a node in a managed node group is terminated due to a scaling action or
update, the pods in that node are drained first. Amazon EKS attempts to drain
the nodes gracefully and will fail if it is unable to do so. You can `force` the
update if Amazon EKS is unable to drain the nodes as a result of a pod
disruption budget issue.
"""
def update_nodegroup_version(
%Client{} = client,
cluster_name,
nodegroup_name,
input,
options \\ []
) do
url_path =
"/clusters/#{URI.encode(cluster_name)}/node-groups/#{URI.encode(nodegroup_name)}/update-version"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
end
|
lib/aws/generated/eks.ex
| 0.911895 | 0.41401 |
eks.ex
|
starcoder
|
defmodule Bounds.Chunked do
@moduledoc false
defstruct [:bounds, :step]
def new(%Bounds{lower: lower, upper: upper} = bounds, step, partial_strategy) when is_integer(step) and step >= 1 do
case {rem(upper - lower, step), partial_strategy} do
{0, _} ->
%Bounds.Chunked{bounds: bounds, step: step}
{_, :discard} ->
%Bounds.Chunked{bounds: bounds, step: step}
{n, :return} when n > 0 ->
last_part = %Bounds{lower: upper - n, upper: upper}
%Bounds.ExtendedEnumerable{most: %Bounds.Chunked{bounds: bounds, step: step}, extra: last_part}
{n, :extend} when n > 0 ->
last_part = %Bounds{lower: upper - n, upper: upper - n + step}
%Bounds.ExtendedEnumerable{most: %Bounds.Chunked{bounds: bounds, step: step}, extra: last_part}
end
end
end
defimpl Enumerable, for: Bounds.Chunked do
alias Bounds
alias Bounds.Chunked
def count(%Chunked{bounds: %Bounds{lower: lower, upper: upper}, step: step}) do
{:ok, div(upper - lower, step)}
end
def member?(%Chunked{bounds: %Bounds{lower: lower, upper: upper}, step: step}, %Bounds{lower: a, upper: b}) do
{:ok, (a >= lower) and (b <= upper) and ((b - a) == step) and (rem(a - lower, step) == 0) and (rem(b - lower, step) == 0)}
end
def reduce(%Chunked{bounds: %Bounds{lower: lower, upper: upper}, step: step}, acc, fun) do
reduce(lower, upper, step, acc, fun)
end
defp reduce(_lower, _upper, _step, {:halt, acc}, _fun), do:
{:halted, acc}
defp reduce(lower, upper, step, {:suspend, acc}, fun), do:
{:suspended, acc, &reduce(lower, upper, step, &1, fun)}
defp reduce(lower, upper, step, {:cont, acc}, _fun) when lower + step > upper, do:
{:done, acc}
defp reduce(lower, upper, step, {:cont, acc}, fun) when lower + step <= upper do
next = lower + step
val = %Bounds{lower: lower, upper: next}
reduce(next, upper, step, fun.(val, acc), fun)
end
def slice(%Chunked{bounds: %Bounds{lower: lower, upper: upper}, step: step_size}) do
count = div(upper - lower, step_size)
slicer = fn offset, len ->
new_lower = lower + (offset * step_size)
new_upper = :erlang.min(new_lower + (len * step_size), upper)
slicer_accum(new_lower, new_upper, step_size, [])
end
{:ok, count, slicer}
end
defp slicer_accum(lower, upper, step_size, acc) do
prev = upper - step_size
if prev < lower do
acc
else
val = %Bounds{lower: prev, upper: upper}
slicer_accum(lower, prev, step_size, [val | acc])
end
end
end
|
lib/bounds/chunked.ex
| 0.812235 | 0.642397 |
chunked.ex
|
starcoder
|
defmodule ProperCase do
@moduledoc """
An Elixir library that converts keys in maps between `snake_case` and `camel_case`
"""
import String, only: [first: 1, replace: 4, downcase: 1, upcase: 1]
@doc """
Converts all the keys in a map to `camelCase` if mode is :lower or `CamleCase` if mode is :upper.
If the map is a struct with no `Enumerable` implementation,
the struct is considered to be a single value.
"""
def to_camel_case(any), do: to_camel_case(any, :lower)
def to_camel_case(map, mode) when is_map(map) do
try do
for {key, val} <- map,
into: %{},
do: {camel_case(key, mode), to_camel_case(val, mode)}
rescue
# Not Enumerable
Protocol.UndefinedError -> map
end
end
def to_camel_case(list, mode) when is_list(list) do
list
|> Enum.map(&to_camel_case(&1, mode))
end
def to_camel_case(final_val, _mode), do: final_val
@doc """
Converts all the keys in a map to `snake_case`.
If the map is a struct with no `Enumerable` implementation,
the struct is considered to be a single value.
"""
def to_snake_case(map) when is_map(map) do
try do
for {key, val} <- map,
into: %{},
do: {snake_case(key), to_snake_case(val)}
rescue
# Not Enumerable
Protocol.UndefinedError -> map
end
end
def to_snake_case(list) when is_list(list) do
list
|> Enum.map(&to_snake_case/1)
end
def to_snake_case(other_types), do: other_types
@doc """
Converts an atom to a `camelCase` string
"""
def camel_case(key), do: camel_case(key, :lower)
def camel_case(key, mode) when is_atom(key) do
key
|> Atom.to_string
|> camel_case(mode)
end
def camel_case(val, _mode) when is_number(val), do: val
@doc """
Converts a string to `camelCase`
"""
def camel_case("_" <> rest, mode) do
"_#{camel_case(rest, mode)}"
end
def camel_case(key, :lower) when is_binary(key) do
first_char = key |> first
key
|> Macro.camelize
|> replace(upcase(first_char), downcase(first_char), global: false)
end
def camel_case(key, :upper) when is_binary(key) do
key
|> Macro.camelize
end
@doc """
Converts an to `snake_case` string`
"""
def snake_case(val) when is_atom(val) do
val
|> Atom.to_string
|> Macro.underscore
end
def snake_case(val) when is_number(val) do
val
end
@doc """
Converts a string to `snake_case`
"""
def snake_case(val) do
val |> Macro.underscore
end
end
|
lib/proper_case.ex
| 0.658857 | 0.604282 |
proper_case.ex
|
starcoder
|
defmodule CubDB.Btree.KeyRange do
@moduledoc false
# `CubDB.Btree.KeyRange` is a module implementing the `Enumerable` protocol to
# iterate through a range of entries on a Btree bounded by a minimum and
# maximum key. The bounds can be exclusive or inclusive: bounds are either
# `nil` or tuples of `{key, boolean}`, where the boolean indicates whether the
# bound is inclusive or not. This is primarily used for selection operations.
alias CubDB.Btree
alias CubDB.Btree.KeyRange
@type bound :: {Btree.key(), boolean} | nil
@type t :: %KeyRange{btree: Btree.t(), min_key: bound, max_key: bound, reverse: boolean}
@enforce_keys [:btree]
defstruct btree: nil, min_key: nil, max_key: nil, reverse: false
@spec new(Btree.t(), bound, bound, boolean) :: KeyRange.t()
def new(btree, min_key \\ nil, max_key \\ nil, reverse \\ false) do
%KeyRange{btree: btree, min_key: min_key, max_key: max_key, reverse: reverse}
end
end
defimpl Enumerable, for: CubDB.Btree.KeyRange do
alias CubDB.Btree
alias CubDB.Store
alias CubDB.Btree.KeyRange
@leaf Btree.__leaf__()
@branch Btree.__branch__()
@value Btree.__value__()
@deleted Btree.__deleted__()
def reduce(key_range, cmd_acc, fun) do
%KeyRange{btree: btree, min_key: min_key, max_key: max_key, reverse: reverse} = key_range
Btree.Enumerable.reduce(btree, cmd_acc, fun, &get_children(min_key, max_key, reverse, &1, &2))
end
def count(_), do: {:error, __MODULE__}
def member?(%KeyRange{min_key: {min, true}, max_key: _}, {key, _}) when key < min do
{:ok, false}
end
def member?(%KeyRange{min_key: _, max_key: {max, true}}, {key, _}) when key > max do
{:ok, false}
end
def member?(%KeyRange{min_key: {min, false}, max_key: _}, {key, _}) when key <= min do
{:ok, false}
end
def member?(%KeyRange{min_key: _, max_key: {max, false}}, {key, _}) when key >= max do
{:ok, false}
end
def member?(%KeyRange{btree: btree}, {key, value}) do
case Btree.fetch(btree, key) do
{:ok, ^value} -> {:ok, true}
_ -> {:ok, false}
end
end
def member?(_, _), do: {:ok, false}
def slice(_), do: {:error, __MODULE__}
defp get_children(min_key, max_key, reverse, {@branch, locs}, store) do
children =
locs
|> Enum.chunk_every(2, 1)
|> Enum.filter(fn
[{key, _}, {next_key, _}] -> filter_branch(min_key, max_key, key, next_key)
[{key, _}] -> filter_branch(nil, max_key, key, nil)
end)
|> Enum.map(fn [{k, loc} | _] ->
{k, Store.get_node(store, loc)}
end)
if reverse, do: Enum.reverse(children), else: children
end
defp get_children(min_key, max_key, reverse, {@leaf, locs}, store) do
children =
locs
|> Enum.filter(fn {key, _} ->
filter_leave(min_key, max_key, key)
end)
|> Enum.map(fn {k, loc} ->
{k, Store.get_node(store, loc)}
end)
|> Enum.filter(fn {_, node} ->
node != @deleted
end)
if reverse, do: Enum.reverse(children), else: children
end
defp get_children(_, _, _, {@value, v}, _), do: v
defp filter_branch(nil, nil, _, _), do: true
defp filter_branch(nil, {max, true}, key, _), do: key <= max
defp filter_branch(nil, {max, false}, key, _), do: key < max
defp filter_branch({min, _}, nil, _, next_key), do: next_key > min
defp filter_branch({min, _}, {max, true}, key, next_key), do: key <= max && next_key > min
defp filter_branch({min, _}, {max, false}, key, next_key), do: key < max && next_key > min
defp filter_leave(nil, nil, _), do: true
defp filter_leave({min, true}, nil, key), do: key >= min
defp filter_leave({min, false}, nil, key), do: key > min
defp filter_leave(nil, {max, true}, key), do: key <= max
defp filter_leave(nil, {max, false}, key), do: key < max
defp filter_leave({min, true}, {max, true}, key), do: key >= min && key <= max
defp filter_leave({min, false}, {max, true}, key), do: key > min && key <= max
defp filter_leave({min, true}, {max, false}, key), do: key >= min && key < max
defp filter_leave({min, false}, {max, false}, key), do: key > min && key < max
end
|
lib/cubdb/btree/key_range.ex
| 0.896639 | 0.480844 |
key_range.ex
|
starcoder
|
defmodule Phoenix.Socket do
@moduledoc ~S"""
Holds state for every channel, pointing to its transport,
pubsub server and more.
## Socket Fields
* `id` - The string id of the socket
* `assigns` - The map of socket assigns, default: `%{}`
* `channel` - The channel module where this socket originated
* `channel_pid` - The channel pid
* `endpoint` - The endpoint module where this socket originated
* `joined` - If the socket has effectively joined the channel
* `pubsub_server` - The registered name of the socket's PubSub server
* `ref` - The latest ref sent by the client
* `topic` - The string topic, ie `"rooms:123"`
* `transport` - The socket's transport, ie: `Phoenix.Transports.WebSocket`
* `transport_pid` - The pid of the socket's transport process
## Channels
Channels allow you to route pubsub events to channel handlers in your application.
By default, Phoenix supports both `:websocket` and `:longpoll` transports.
See the `Phoenix.Channel.Transport` documentation for more information on writing
your own transports. Channels are defined within a socket handler, using the
`channel/2` macro, as seen below.
## Socket Behaviour
Socket handlers are mounted in Endpoints and must define two callbacks:
* `connect/2` - receives the socket params and authenticates the connection.
Often used to wire up default `%Phoenix.Socket{}` assigns
for all channels.
* `id/1` - receives the socket returned by `connect/2`, and returns the
string id of this connection. Used for forcing a disconnect for
connection and all child channels. For sockets requiring no
authentication, `nil` can be returned.
Callback examples:
defmodule MyApp.UserSocket do
use Phoenix.Socket
channel "rooms:*", MyApp.RoomChannel
def connect(params, socket) do
{:ok, assign(socket, :user_id, params["user_id"])}
end
def id(socket), do: "users_socket:#{socket.assigns.user_id}"
end
...
# disconnect all user's socket connections and their multiplexed channels
MyApp.Endpoint.broadcast("users_socket:" <> user.id, "disconnect")
## Transport Configuration
Transports are defined and configured within socket handlers. By default,
Phoenix defines the `:websocket`, and `:longpoll` transports automaticaly with
overridable options. Check the transport modules for transport specific
options. A list of allowed origins can be specified in the `:origins` key for
the `:websocket` and `:longpoll` transports. This will restrict clients based
on the given Origin header.
transport :longpoll, Phoenix.Transports.LongPoll,
origins: ["//example.com", "http://example.com", "https://example.com"]
transport :websocket, Phoenix.Transports.WebSocket,
origins: ["//example.com", "http://example.com", "https://example.com"]
If no such header is sent no verification will be performed. If the
Origin header does not match the list of allowed origins a 403 Forbidden
response will be sent to the client. See `transport/3` for more information.
"""
use Behaviour
alias Phoenix.Socket
alias Phoenix.Socket.Helpers
defcallback connect(params :: map, Socket.t) :: {:ok, Socket.t} | :error
defcallback id(Socket.t) :: String.t | nil
@default_transports [:websocket, :longpoll]
defmodule InvalidMessageError do
@moduledoc """
Raised when the socket message is invalid.
"""
defexception [:message]
end
@type t :: %Socket{id: nil,
assigns: %{},
channel: atom,
channel_pid: pid,
endpoint: atom,
joined: boolean,
pubsub_server: atom,
ref: term,
topic: String.t,
transport: atom,
transport_pid: pid}
defstruct id: nil,
assigns: %{},
channel: nil,
channel_pid: nil,
endpoint: nil,
joined: false,
pubsub_server: nil,
ref: nil,
topic: nil,
transport: nil,
transport_pid: nil
defmacro __using__(_) do
quote do
@behaviour Phoenix.Socket
import unquote(__MODULE__)
Module.register_attribute(__MODULE__, :phoenix_channels, accumulate: true)
@phoenix_transports %{}
@before_compile unquote(__MODULE__)
end
end
defmacro __before_compile__(env) do
transports = Module.get_attribute(env.module, :phoenix_transports)
channel_defs =
env.module
|> Module.get_attribute(:phoenix_channels)
|> Helpers.defchannels(transports)
transport_defs =
for {name, {mod, conf}} <- transports do
quote do
def __transport__(name) when name in [unquote(name), unquote(to_string(name))] do
{unquote(mod), unquote(conf)}
end
end
end
quote do
def __transports__, do: unquote(Macro.escape(transports))
unquote(transport_defs)
def __transport__(_name), do: :unsupported
unquote(channel_defs)
end
end
@doc """
Adds key/value pair to socket assigns.
## Examples
iex> socket.assigns[:token]
nil
iex> socket = assign(socket, :token, "bar")
iex> socket.assigns[:token]
"bar"
"""
def assign(socket = %Socket{}, key, value) do
update_in socket.assigns, &Map.put(&1, key, value)
end
@doc """
Defines a channel matching the given topic and transports.
* `topic_pattern` - The string pattern, ie "rooms:*", "users:*", "system"
* `module` - The channel module handler, ie `MyApp.RoomChannel`
* `opts` - The optional list of options, see below
## Options
* `:via` - the transport adapters to accept on this channel.
Defaults `[:websocket, :longpoll]`
## Examples
channel "topic1:*", MyChannel
channel "topic2:*", MyChannel, via: [:websocket]
channel "topic", MyChannel, via: [:longpoll]
## Topic Patterns
The `channel` macro accepts topic patterns in two flavors. A splat argument
can be provided as the last character to indicate a "topic:subtopic" match. If
a plain string is provied, only that topic will match the channel handler.
Most use-cases will use the "topic:*" pattern to allow more versatile topic
scoping.
See `Phoenix.Channel` for more information
"""
defmacro channel(topic_pattern, module, opts \\ []) do
# Tear the alias to simply store the root in the AST.
# This will make Elixir unable to track the dependency
# between endpoint <-> socket and avoid recompiling the
# endpoint (alongside the whole project ) whenever the
# socket changes.
module = tear_alias(module)
quote do
@phoenix_channels {
unquote(topic_pattern),
unquote(module),
unquote(Keyword.put_new(opts, :via, @default_transports))
}
end
end
defp tear_alias({:__aliases__, meta, [h|t]}) do
alias = {:__aliases__, meta, [h]}
quote do
Module.concat([unquote(alias)|unquote(t)])
end
end
defp tear_alias(other), do: other
@doc """
Defines a transport with configuration.
## Examples
# customize default `:websocket` transport options
transport :websocket, Phoenix.Transports.WebSocket,
timeout: 10_000
# define separate transport, using websocket handler
transport :websocket_slow_clients, Phoenix.Transports.WebSocket,
timeout: 60_000
"""
defmacro transport(name, module, config \\ []) do
quote do
@phoenix_transports Phoenix.Socket.Helpers.register_transport(
@phoenix_transports, unquote(name), unquote(module), unquote(config))
end
end
end
defmodule Phoenix.Socket.Message do
@moduledoc """
Defines a message dispatched over transport to channels and vice-versa.
The message format requires the following keys:
* `topic` - The string topic or topic:subtopic pair namespace, ie "messages", "messages:123"
* `event`- The string event name, ie "phx_join"
* `payload` - The message payload
* `ref` - The unique string ref
"""
defstruct topic: nil, event: nil, payload: nil, ref: nil
@doc """
Converts a map with string keys into a message struct.
Raises `Phoenix.Socket.InvalidMessageError` if not valid.
"""
def from_map!(map) when is_map(map) do
try do
%Phoenix.Socket.Message{
topic: Map.fetch!(map, "topic"),
event: Map.fetch!(map, "event"),
payload: Map.fetch!(map, "payload"),
ref: Map.fetch!(map, "ref")
}
rescue
err in [KeyError] ->
raise Phoenix.Socket.InvalidMessageError, message: "missing key #{inspect err.key}"
end
end
end
defmodule Phoenix.Socket.Reply do
@moduledoc """
Defines a reply sent from channels to transports.
The message format requires the following keys:
* `topic` - The string topic or topic:subtopic pair namespace, ie "messages", "messages:123"
* `status` - The reply status as an atom
* `payload` - The reply payload
* `ref` - The unique string ref
"""
defstruct topic: nil, status: nil, payload: nil, ref: nil
end
defmodule Phoenix.Socket.Broadcast do
@moduledoc """
Defines a message sent from pubsub to channels and vice-versa.
The message format requires the following keys:
* `topic` - The string topic or topic:subtopic pair namespace, ie "messages", "messages:123"
* `event`- The string event name, ie "phx_join"
* `payload` - The message payload
"""
defstruct topic: nil, event: nil, payload: nil
end
|
lib/phoenix/socket.ex
| 0.890169 | 0.426859 |
socket.ex
|
starcoder
|
defprotocol Vow.Conformable do
@moduledoc """
TODO
"""
alias Vow.ConformError
@fallback_to_any true
@type conformed :: term
@type result :: {:ok, conformed} | {:error, [ConformError.Problem.t()]}
@doc """
Given a vow and a value, return an error if the value does not match
the vow, otherwise returns the (potentially) destructured value.
The other parameters are for tracking composed conform calls:
* path - the set of keys used to `Access` the current vow
from the parent vow
* via - the set of `Vow.Ref` navigated to get to the current vow
* route - the set of keys used to `Access` the current value from
the parent value
"""
@spec conform(t, [term], [Vow.Ref.t()], [term], term) :: result
def conform(vow, path, via, route, val)
@doc """
Given a vow and a conformed value, returns the original unconformed value,
otherwise return an `Vow.UnformError`.
"""
@spec unform(t, conformed) :: {:ok, val :: term} | {:error, Vow.UnformError.t()}
def unform(vow, conformed_value)
@doc """
Returns `true` if the vow is a `Vow.RegexOperator`, otherwise returns `false`.
"""
@spec regex?(t) :: boolean
def regex?(vow)
end
defimpl Vow.Conformable, for: Function do
@moduledoc false
import Vow.FunctionWrapper, only: [wrap: 1]
alias Vow.ConformError
@impl Vow.Conformable
def conform(vow, path, via, route, val) when is_function(vow, 1) do
case safe_execute(vow, val) do
{:ok, true} ->
{:ok, val}
{:ok, false} ->
{:error, [ConformError.new_problem(vow, path, via, route, val)]}
{:ok, _} ->
{:error,
[
ConformError.new_problem(
vow,
path,
via,
route,
val,
"Non-boolean return values are invalid"
)
]}
{:error, reason} ->
{:error, [ConformError.new_problem(vow, path, via, route, val, reason)]}
end
end
def conform(_vow, path, via, route, val) do
{:error, [ConformError.new_problem(wrap(&is_function(&1, 1)), path, via, route, val)]}
end
@impl Vow.Conformable
def unform(_vow, val) do
{:ok, val}
end
@impl Vow.Conformable
def regex?(_vow), do: false
@spec safe_execute((term -> term), term) :: {:ok, term} | {:error, term}
defp safe_execute(fun, val) do
{:ok, fun.(val)}
rescue
reason -> {:error, reason}
catch
:exit, reason -> {:error, {:exit, reason}}
caught -> {:error, caught}
end
end
defimpl Vow.Conformable, for: List do
@moduledoc false
import Vow.FunctionWrapper, only: [wrap: 1]
import Vow.Utils, only: [compatible_form?: 2, improper_info: 1]
alias Vow.{ConformError, ConformError.Problem}
@impl Vow.Conformable
def conform(vow, path, via, route, val)
when is_list(val) and length(vow) == length(val) do
vow
|> Enum.zip(val)
|> Enum.with_index()
|> Enum.reduce({:ok, []}, &conform_reducer(&1, &2, path, via, route))
|> case do
{:error, problems} -> {:error, problems}
{:ok, conformed} -> {:ok, Enum.reverse(conformed)}
end
end
def conform(vow, path, via, route, val) when is_list(val) do
list_info = {improper_info(vow), improper_info(val)}
conform_non_similar(list_info, vow, path, via, route, val)
end
def conform(_vow, path, via, route, val) do
{:error, [ConformError.new_problem(&is_list/1, path, via, route, val)]}
end
@impl Vow.Conformable
def unform(vow, val)
when is_list(val) and length(vow) == length(val) do
Enum.reduce(Enum.zip(vow, val), {:ok, []}, fn
_, {:error, reason} ->
{:error, reason}
{v, val}, {:ok, acc} ->
case @protocol.unform(v, val) do
{:ok, unformed} -> {:ok, [unformed | acc]}
{:error, reason} -> {:error, reason}
end
end)
end
def unform(vow, val) when is_list(val) do
case {improper_info(vow), improper_info(val)} do
{{true, n}, {true, n}} ->
unform_improper_impl(vow, val)
_ ->
{:error, %Vow.UnformError{vow: vow, val: val}}
end
end
def unform(vow, val) do
{:error, %Vow.UnformError{vow: vow, val: val}}
end
@impl Vow.Conformable
def regex?(_vow), do: false
@spec conform_reducer(
{{Vow.t(), term}, non_neg_integer},
@protocol.result,
[term],
[Vow.Ref.t()],
[term]
) :: @protocol.result
defp conform_reducer({{vow, val}, i}, {:ok, acc}, path, via, route) do
case @protocol.conform(vow, [i | path], via, [i | route], val) do
{:error, ps} -> {:error, ps}
{:ok, ch} -> {:ok, [ch | acc]}
end
end
defp conform_reducer({{vow, val}, i}, {:error, pblms}, path, via, route) do
case @protocol.conform(vow, [i | path], via, [i | route], val) do
{:error, ps} -> {:error, pblms ++ ps}
{:ok, _} -> {:error, pblms}
end
end
@spec conform_non_similar({info, info}, Vow.t(), [term], [Vow.Ref.t()], [term], term) ::
@protocol.result
when info: {boolean, non_neg_integer}
defp conform_non_similar({{true, n}, {true, n}}, vow, path, via, route, val) do
conform_improper(vow, path, via, route, val, 0)
end
defp conform_non_similar({{false, _}, {false, _}}, vow, path, via, route, val) do
pred = wrap(&(length(&1) == length(vow)))
{:error, [Problem.new(pred, path, via, route, val)]}
end
defp conform_non_similar(_, vow, path, via, route, val) do
pred = wrap(&compatible_form?(vow, &1))
{:error, [Problem.new(pred, path, via, route, val)]}
end
@spec unform_improper_impl(
nonempty_improper_list(Vow.t(), Vow.t()),
nonempty_improper_list(term, term)
) ::
nonempty_improper_list(term, term) | term
defp unform_improper_impl([hv | tv], [hval | tval]) do
with {:ok, uh} <- @protocol.unform(hv, hval),
{:ok, ut} <- unform_improper_impl(tv, tval) do
{:ok, [uh | ut]}
else
{:error, reason} -> {:error, reason}
end
end
defp unform_improper_impl(vow, val) do
@protocol.unform(vow, val)
end
@type position :: non_neg_integer | :__improper_tail__
@spec conform_improper(
nonempty_improper_list(Vow.t(), Vow.t()) | Vow.t(),
[term],
[Vow.Ref.t()],
[term],
nonempty_improper_list(term, term) | term,
position
) ::
{:ok, term} | {:error, [ConformError.Problem.t()]}
defp conform_improper([sh | st], path, via, route, [vh | vt], pos) do
head = @protocol.conform(sh, [pos | path], via, [pos | route], vh)
tail = conform_improper(st, path, via, route, vt, pos + 1)
conform_improper_tail({head, tail})
end
defp conform_improper(vow, path, via, route, val, pos) do
@protocol.conform(vow, [pos | path], via, [pos | route], val)
end
@spec conform_improper_tail({@protocol.result, @protocol.result}) :: @protocol.result
defp conform_improper_tail({{:ok, ch}, {:ok, ct}}), do: {:ok, [ch | ct]}
defp conform_improper_tail({{:error, hps}, {:error, tps}}), do: {:error, hps ++ tps}
defp conform_improper_tail({_, {:error, ps}}), do: {:error, ps}
defp conform_improper_tail({{:error, ps}, _}), do: {:error, ps}
end
defimpl Vow.Conformable, for: Tuple do
@moduledoc false
alias Vow.ConformError
@impl Vow.Conformable
def conform(vow, path, via, route, val) when is_tuple(val) do
{ls, lv} = {Tuple.to_list(vow), Tuple.to_list(val)}
case @protocol.List.conform(ls, path, via, route, lv) do
{:ok, list} -> {:ok, List.to_tuple(list)}
{:error, problems} -> {:error, problems}
end
end
def conform(_vow, path, via, route, val) do
{:error, [ConformError.new_problem(&is_tuple/1, path, via, route, val)]}
end
@impl Vow.Conformable
def unform(vow, val) when is_tuple(val) do
{ls, lv} = {Tuple.to_list(vow), Tuple.to_list(val)}
case @protocol.List.unform(ls, lv) do
{:ok, list} -> {:ok, List.to_tuple(list)}
{:error, reason} -> {:error, reason}
end
end
def unform(vow, val) do
{:error, %Vow.UnformError{vow: vow, val: val}}
end
@impl Vow.Conformable
def regex?(_vow), do: false
end
defimpl Vow.Conformable, for: Map do
@moduledoc false
import Vow.FunctionWrapper
alias Vow.ConformError
@type result :: {:ok, Vow.Conformable.conformed()} | {:error, [ConformError.Problem.t()]}
@impl Vow.Conformable
def conform(vow, path, via, route, val) when is_map(val) do
Enum.reduce(
vow,
{:ok, val},
conform_reducer(path, via, route, val)
)
end
def conform(_vow, path, via, route, val) do
{:error, [ConformError.new_problem(&is_map/1, path, via, route, val)]}
end
@impl Vow.Conformable
def unform(vow, val) when is_map(val) do
Enum.reduce(vow, {:ok, val}, unform_reducer(val))
end
def unform(vow, val) do
{:error, %Vow.UnformError{vow: vow, val: val}}
end
@impl Vow.Conformable
def regex?(_vow), do: false
@spec conform_reducer([term], [Vow.Ref.t()], [term], map) :: ({term, Vow.t()}, result -> result)
defp conform_reducer(path, via, route, val) do
&conform_reducer(path, via, route, val, &1, &2)
end
@spec conform_reducer([term], [Vow.Ref.t()], [term], map, {term, Vow.t()}, result) :: result
defp conform_reducer(path, via, route, val, {k, s}, {:ok, c}) do
if Map.has_key?(val, k) do
case @protocol.conform(s, [k | path], via, [k | route], Map.get(val, k)) do
{:ok, conformed} -> {:ok, Map.put(c, k, conformed)}
{:error, problems} -> {:error, problems}
end
else
{:error,
[
ConformError.new_problem(
wrap(&Map.has_key?(&1, k), k: k),
path,
via,
route,
val
)
]}
end
end
defp conform_reducer(path, via, route, val, {k, s}, {:error, ps}) do
if Map.has_key?(val, k) do
case @protocol.conform(s, [k | route], via, [k | route], Map.get(val, k)) do
{:ok, _conformed} -> {:error, ps}
{:error, problems} -> {:error, ps ++ problems}
end
else
{:error,
[
ConformError.new_problem(
wrap(&Map.has_key?(&1, k), k: k),
path,
via,
route,
val
)
]}
end
end
@spec unform_reducer(map) :: ({term, Vow.t()}, result -> result)
defp unform_reducer(val) do
fn x, acc -> unform_reducer(val, x, acc) end
end
@spec unform_reducer(map, {term, Vow.t()}, result) :: result
defp unform_reducer(_value, _item, {:error, reason}) do
{:error, reason}
end
defp unform_reducer(val, {k, v}, {:ok, acc}) do
if Map.has_key?(val, k) do
case @protocol.unform(v, Map.get(val, k)) do
{:ok, unformed} -> {:ok, Map.put(acc, k, unformed)}
{:error, reason} -> {:error, reason}
end
end
end
end
defimpl Vow.Conformable, for: MapSet do
@moduledoc false
import Vow.FunctionWrapper, only: [wrap: 1]
alias Vow.ConformError
@impl Vow.Conformable
def conform(vow, path, via, route, %MapSet{} = val) do
if MapSet.subset?(val, vow) do
{:ok, val}
else
{:error,
[
ConformError.new_problem(
wrap(&MapSet.subset?(&1, vow)),
path,
via,
route,
val
)
]}
end
end
def conform(vow, path, via, route, val) do
if MapSet.member?(vow, val) do
{:ok, val}
else
{:error,
[
ConformError.new_problem(
wrap(&MapSet.member?(vow, &1)),
path,
via,
route,
val
)
]}
end
end
@impl Vow.Conformable
def unform(_vow, val) do
{:ok, val}
end
@impl Vow.Conformable
def regex?(_vow), do: false
end
defimpl Vow.Conformable, for: Regex do
@moduledoc false
import Vow.FunctionWrapper, only: [wrap: 1]
alias Vow.ConformError
@impl Vow.Conformable
def conform(vow, path, via, route, val) when is_bitstring(val) do
if Regex.match?(vow, val) do
{:ok, val}
else
{:error,
[
ConformError.new_problem(
wrap(&Regex.match?(vow, &1)),
path,
via,
route,
val
)
]}
end
end
def conform(_vow, path, via, route, val) do
{:error, [ConformError.new_problem(&is_bitstring/1, path, via, route, val)]}
end
@impl Vow.Conformable
def unform(_vow, val) do
{:ok, val}
end
@impl Vow.Conformable
def regex?(_vow), do: false
end
defimpl Vow.Conformable, for: Range do
@moduledoc false
import Vow.FunctionWrapper, only: [wrap: 1]
alias Vow.{ConformError, ConformError.Problem}
@impl Vow.Conformable
def conform(vow, path, via, route, _.._ = val) do
member = {Enum.member?(vow, val.first), Enum.member?(vow, val.last)}
conform_range(member, vow, path, via, route, val)
end
def conform(vow, path, via, route, val) when is_integer(val) do
if Enum.member?(vow, val) do
{:ok, val}
else
pred = wrap(&Enum.member?(vow, &1))
{:error, [Problem.new(pred, path, via, route, val)]}
end
end
def conform(_vow, path, via, route, val) do
{:error, [ConformError.new_problem(&is_integer/1, path, via, route, val)]}
end
@impl Vow.Conformable
def unform(_vow, val) do
{:ok, val}
end
@impl Vow.Conformable
def regex?(_vow), do: false
@spec conform_range({boolean, boolean}, Vow.t(), [term], [Vow.Ref.t()], [term], term) ::
Vow.Conformable.result()
def conform_range({true, true}, _vow, _path, _via, _route, val) do
{:ok, val}
end
def conform_range({true, false}, vow, path, via, route, val) do
pred = wrap(&Enum.member?(vow, &1.last))
{:error, [Problem.new(pred, path, via, route, val)]}
end
def conform_range({false, true}, vow, path, via, route, val) do
pred = wrap(&Enum.member?(vow, &1.last))
{:error, [Problem.new(pred, path, via, route, val)]}
end
def conform_range({false, false}, vow, path, via, route, val) do
pred1 = wrap(&Enum.member?(vow, &1.first))
pred2 = wrap(&Enum.member?(vow, &1.last))
{:error,
[
Problem.new(pred1, path, via, route, val),
Problem.new(pred2, path, via, route, val)
]}
end
end
defimpl Vow.Conformable, for: Date.Range do
@moduledoc false
import Vow.FunctionWrapper, only: [wrap: 1]
import Vow.Conformable.Range, only: [conform_range: 6]
alias Vow.ConformError.Problem
@impl Vow.Conformable
def conform(vow, path, via, route, %Date.Range{} = val) do
member = {Enum.member?(vow, val.first), Enum.member?(vow, val.last)}
conform_range(member, vow, path, via, route, val)
end
def conform(vow, path, via, route, %Date{} = val) do
if Enum.member?(vow, val) do
{:ok, val}
else
pred = wrap(&Enum.member?(vow, &1))
{:error, [Problem.new(pred, path, via, route, val)]}
end
end
def conform(_vow, path, via, route, val) do
pred = wrap(&match?(%Date{}, &1))
{:error, [Problem.new(pred, path, via, route, val)]}
end
@impl Vow.Conformable
def unform(_vow, val) do
{:ok, val}
end
@impl Vow.Conformable
def regex?(_vow), do: false
end
defimpl Vow.Conformable, for: Any do
@moduledoc false
import Vow.FunctionWrapper, only: [wrap: 1]
alias Vow.ConformError
@impl Vow.Conformable
def conform(%{__struct__: mod} = struct, path, via, route, %{__struct__: mod} = val) do
case @protocol.Map.conform(
Map.delete(struct, :__struct__),
path,
via,
route,
Map.delete(val, :__struct__)
) do
{:ok, conformed} -> {:ok, Map.put(conformed, :__struct__, mod)}
{:error, reason} -> {:error, reason}
end
end
def conform(%{__struct__: _} = vow, path, via, route, %{__struct__: _} = val) do
problem =
ConformError.new_problem(
wrap(&(&1.__struct__ == vow.__struct__)),
path,
via,
route,
val
)
case @protocol.Map.conform(
Map.delete(vow, :__struct__),
path,
via,
route,
Map.delete(val, :__struct__)
) do
{:ok, _conformed} -> {:error, [problem]}
{:error, problems} -> {:error, [problem | problems]}
end
end
def conform(%{__struct__: _}, path, via, route, val) do
{:error,
[
ConformError.new_problem(
wrap(&Map.has_key?(&1, :__struct__)),
path,
via,
route,
val
)
]}
end
def conform(vow, path, via, route, val) do
if vow == val do
{:ok, val}
else
{:error, [ConformError.new_problem(wrap(&(&1 == vow)), path, via, route, val)]}
end
end
@impl Vow.Conformable
def unform(%{__struct__: mod} = vow, %{__struct__: mod} = val) do
case @protocol.Map.unform(Map.delete(vow, :__struct__), Map.delete(val, :__struct__)) do
{:error, reason} -> {:error, reason}
{:ok, unformed} -> {:ok, Map.put(unformed, :__struct__, mod)}
end
end
def unform(%{__struct__: _} = vow, val) do
{:error, %Vow.UnformError{vow: vow, val: val}}
end
def unform(_vow, val) do
{:ok, val}
end
@impl Vow.Conformable
def regex?(_vow), do: false
end
|
lib/vow/conformable.ex
| 0.735642 | 0.409516 |
conformable.ex
|
starcoder
|
defmodule HTS221.CTRLReg1 do
@moduledoc """
Control the power state, data rate, and update strategy of the HTS221
"""
import Bitwise
@power_mode_active 0x80
@wait_for_reading 0x04
@one_Hz 0x01
@seven_Hz 0x02
@twelve_point_five_Hz 0x03
@type power_mode() :: :down | :active
@type block_data_update() :: :continuous | :wait_for_reading
@type output_data_rate() :: :one_shot | :one_Hz | :seven_Hz | :twelve_point_five_Hz
@type t() :: %__MODULE__{
power_mode: power_mode(),
block_data_update: block_data_update(),
output_data_rate: output_data_rate()
}
defstruct power_mode: :down, block_data_update: :continuous, output_data_rate: :one_shot
@doc """
Parse the binary into a `HTS221.CTRLReg1` structure
"""
@spec from_binary(binary()) :: t()
def from_binary(
<<power_mode::size(1), _reserved::size(4), block_data_update::size(1), odr::size(2)>>
) do
%__MODULE__{
power_mode: power_mode_from_bit(power_mode),
block_data_update: block_data_update_from_bit(block_data_update),
output_data_rate: output_data_rate_from_int(odr)
}
end
@doc """
Turn the `HTS221.CTRLReg1` structure into a binary to be sent to the transport
layer
"""
@spec to_binary(t()) :: binary()
def to_binary(ctrl_reg1) do
<<0x20, fields_to_byte(ctrl_reg1)>>
end
defp power_mode_from_bit(0), do: :down
defp power_mode_from_bit(1), do: :active
defp block_data_update_from_bit(0), do: :continuous
defp block_data_update_from_bit(1), do: :wait_for_reading
defp output_data_rate_from_int(0), do: :one_shot
defp output_data_rate_from_int(1), do: :one_Hz
defp output_data_rate_from_int(2), do: :seven_Hz
defp output_data_rate_from_int(3), do: :twelve_point_five_Hz
defp fields_to_byte(ctrl_reg1) do
0
|> mask_with_field(:power_mode, ctrl_reg1.power_mode)
|> mask_with_field(:block_data_update, ctrl_reg1.block_data_update)
|> mask_with_field(:output_data_rate, ctrl_reg1.output_data_rate)
end
defp mask_with_field(int, :power_mode, :active), do: int ||| @power_mode_active
defp mask_with_field(int, :block_data_update, :wait_for_reading), do: int ||| @wait_for_reading
defp mask_with_field(int, :output_data_rate, :one_Hz), do: int ||| @one_Hz
defp mask_with_field(int, :output_data_rate, :seven_Hz), do: int ||| @seven_Hz
defp mask_with_field(int, :output_data_rate, :twelve_point_5_Hz),
do: int ||| @twelve_point_five_Hz
defp mask_with_field(int, _, field) when field in [:down, :continuous, :one_shot], do: int
defimpl HTS221.Register do
alias HTS221.{CTRLReg1, IORead, IOWrite}
def read(_ctrl_reg1) do
{:ok,
%IORead{
register: 0x20,
length: 1
}}
end
def write(ctrl_reg1) do
binary = CTRLReg1.to_binary(ctrl_reg1)
{:ok, IOWrite.new(binary)}
end
end
end
|
lib/hts221/ctrl_reg1.ex
| 0.757346 | 0.69416 |
ctrl_reg1.ex
|
starcoder
|
defmodule JWT.Jwa do
@moduledoc """
Choose a cryptographic algorithm to be used for a JSON Web Signature (JWS)
see http://tools.ietf.org/html/rfc7518
"""
alias JWT.Algorithm.Ecdsa
alias JWT.Algorithm.Hmac
alias JWT.Algorithm.Rsa
@algorithms ~r/(HS|RS|ES)(256|384|512)?/i
@doc """
Return a Message Authentication Code (MAC) for a particular `algorithm`
## Example
iex> key = "<KEY>"
...> JWT.Jwa.sign("HS256", key, "signing_input")
<<90, 34, 44, 252, 147, 130, 167, 173, 86, 191, 247, 93, 94, 12, 200, 30, 173, 115, 248, 89, 246, 222, 4, 213, 119, 74, 70, 20, 231, 194, 104, 103>>
"""
def sign(algorithm, key, signing_input) do
{module, sha_bits} = destructured_alg(algorithm)
apply(module, :sign, [sha_bits, key, signing_input])
end
@doc """
Predicate to validate that `mac` does verify by `algorithm`
## Example
iex> mac = <<90, 34, 44, 252, 147, 130, 167, 173, 86, 191, 247, 93, 94, 12, 200, 30, 173, 115, 248, 89, 246, 222, 4, 213, 119, 74, 70, 20, 231, 194, 104, 103>>
...> key = "<KEY>"
...> JWT.Jwa.verify?(mac, "HS256", key, "signing_input")
true
"""
def verify?(mac, algorithm, key, signing_input) do
{module, sha_bits} = destructured_alg(algorithm)
apply(module, :verify?, [mac, sha_bits, key, signing_input])
end
@doc """
Return a tuple with a valid encryption module and sha_bits; raise if `string` is not a supported algorithm
## Example
iex> JWT.Jwa.destructured_alg("HS256")
{JWT.Algorithm.Hmac, :sha256}
"""
def destructured_alg(string) do
validated_alg(Regex.run(@algorithms, string))
end
defp validated_alg(captures) when length(captures) == 3 do
[_, alg, sha_bits] = captures
{alg_module(String.downcase(alg)), sha_prefixed(sha_bits)}
end
defp validated_alg(_), do: raise("Unrecognized algorithm")
defp alg_module("hs"), do: Hmac
defp alg_module("rs"), do: Rsa
defp alg_module("es"), do: Ecdsa
defp sha_prefixed(sha_bits), do: String.to_atom("sha" <> sha_bits)
end
|
lib/jwt/jwa.ex
| 0.873073 | 0.446193 |
jwa.ex
|
starcoder
|
defmodule UnblockMeSolver.Generator do
@moduledoc false
@doc """
Generates a 5 by 5 problem with 1 block.
Idealy used as a test case, the solution block is unobstructed
## Examples
iex> UnblockMeSolver.Generator.trivial()
[
[nil, nil, nil, nil, nil],
[nil, nil, nil, nil, nil],
['A', 'A', nil, nil, nil],
[nil, nil, nil, nil, nil],
[nil, nil, nil, nil, nil]
]
"""
def trivial do
[
[nil, nil, nil, nil, nil],
[nil, nil, nil, nil, nil],
['A', 'A', nil, nil, nil],
[nil, nil, nil, nil, nil],
[nil, nil, nil, nil, nil]
]
end
@doc """
Generates a 5 by 5 problem with 2 blocks.
Idealy used as a test case or a demo, the solution block is obstucted by another block
## Examples
iex> UnblockMeSolver.Generator.simple()
[
[nil, nil, nil, nil, nil],
[nil, nil, nil, 'B', nil],
['A', 'A', nil, 'B', nil],
[nil, nil, nil, nil, nil],
[nil, nil, nil, nil, nil]
]
"""
def simple do
[
[nil, nil, nil, nil, nil],
[nil, nil, nil, 'B', nil],
['A', 'A', nil, 'B', nil],
[nil, nil, nil, nil, nil],
[nil, nil, nil, nil, nil]
]
end
@doc """
Generates a 5 by 5 problem with randomly placed blocks.
There is no garuentee that the problem will be solvable
"""
def random do
trivial()
|> add_blocks(['B'])
end
def add_blocks(problem, blocks) do
if Enum.empty?(blocks) do
problem
else
[ block | remaining_blocks ] = blocks
problem
|> UnblockMeSolver.Move.Helper.rotate_cw
|> add_block(block)
|> add_blocks(remaining_blocks)
|> UnblockMeSolver.Move.Helper.rotate_ccw
end
end
@doc """
Inserts a block into a problem
## Examples
UnblockMeSolver.Generator.add_block([[nil, nil, :A, nil, nil]], :B) could equal
[[:B, :B, :A, nil, nil]]
"""
def add_block(problem, block) do
index = Enum.random(0..(Enum.count(problem) - 1))
new_row = problem
|> Enum.at(index)
|> insert_block(block)
problem
|> List.replace_at(index, new_row)
end
@doc """
Inserts a block into a row of at least 2 cells long in a row
## Examples
UnblockMeSolver.Generator.insert_block([nil, nil, :A, nil, nil], :B) could equal
[:B, :B, :A, nil, nil]
UnblockMeSolver.Generator.insert_block([nil, nil, nil, :A, nil, nil], :B) could equal
[:B, :B, nil, :A, nil, nil]
"""
def insert_block(row, block) do
chunks = row
|> Enum.chunk_by(fn x -> x != nil end)
index = Enum.find_index(chunks, fn chunk -> Enum.any?(chunk, fn x -> x == nil end) end)
chunk = Enum.find(chunks, fn chunk -> Enum.any?(chunk, fn x -> x == nil end) end)
max_length = Enum.count(chunk)
nums = Enum.random(2..max_length)
chunks
|> List.replace_at(index, mylist(nums, block, max_length))
|> List.flatten
end
@doc """
Generates a list of x values. The remainder of the space is filled with nil (otherwise specified)
If y is less than x, an empty list is returned
## Examples
iex> UnblockMeSolver.Generator.mylist(2, :A, 1)
[]
iex> UnblockMeSolver.Generator.mylist(2, :A, 2)
[:A, :A]
iex> UnblockMeSolver.Generator.mylist(2, :A, 3)
[:A, :A, nil]
iex> UnblockMeSolver.Generator.mylist(2, :A, 4)
[:A, :A, nil, nil]
"""
def mylist(x, value, y, default \\ nil) do
cond do
x == y -> Enum.map(1..x, fn _ -> value end)
y < x -> []
true ->
Enum.map(1..x, fn _ -> value end) ++
Enum.map((x+1)..y, fn _ -> default end)
end
end
end
|
lib/unblock_me_solver/generator.ex
| 0.704262 | 0.582105 |
generator.ex
|
starcoder
|
defmodule Robotica.Plugins.Lifx.Animate do
@moduledoc """
Lifx animation
"""
alias Robotica.Devices.Lifx, as: RLifx
require Logger
alias Robotica.Devices.Lifx.HSBKA
@type hsbkas :: {integer(), list(HSBKA.t()), integer()}
@spec animate(map(), integer()) :: list(hsbkas)
defp animate(animation, number) do
animate_frames(animation.frames, number)
end
@spec animate_frames(list(), integer()) :: list(hsbkas)
defp animate_frames([], _), do: []
defp animate_frames([frame | tail], number) do
frame_count =
case frame.repeat do
nil -> 1
frame_count -> frame_count
end
head = animate_frame_repeat(frame, number, frame_count, 0)
tail = animate_frames(tail, number)
head ++ tail
end
@spec animate_frame_repeat(map(), integer(), integer(), integer()) :: list(hsbkas)
defp animate_frame_repeat(_, _, frame_count, frame_n) when frame_n >= frame_count do
[]
end
defp animate_frame_repeat(frame, number, frame_count, frame_n) do
case RLifx.get_colors_from_command(number, frame, frame_n) do
{:ok, colors} ->
hsbks = {65_535, colors, frame.sleep}
[hsbks | animate_frame_repeat(frame, number, frame_count, frame_n + 1)]
{:error, error} ->
Logger.error("Got error in lifx get_colors_from_command: #{inspect(error)}")
animate_frame_repeat(frame, number, frame_count, frame_n + 1)
end
end
@spec animate_repeat(RLifx.callback(), list(hsbkas), integer | nil, integer) :: :ok
defp animate_repeat(_, _, repeat_count, repeat_n)
when not is_nil(repeat_count) and repeat_n >= repeat_count do
:ok
end
defp animate_repeat(sender, list_hsbkas, repeat_count, repeat_n) do
Enum.each(list_hsbkas, fn hsbkas ->
{power, colors, sleep} = hsbkas
sender.(power, colors)
Process.sleep(sleep)
end)
animate_repeat(sender, list_hsbkas, repeat_count, repeat_n + 1)
end
@spec go(RLifx.callback(), integer(), map()) :: :ok
def go(sender, number, animation) do
repeat_count =
case animation.frames do
[] -> 0
_ -> animation.repeat
end
list_hsbks = animate(animation, number)
animate_repeat(sender, list_hsbks, repeat_count, 0)
end
end
|
robotica/lib/robotica/plugins/lifx/animate.ex
| 0.777384 | 0.4206 |
animate.ex
|
starcoder
|
defmodule News.Util.RandomColor do
# Quick and dirty port of https://github.com/davidmerfield/randomColor
@color_bounds [
{:monochrome, [0,360], [[0,0],[100,0]]},
{:red, [-26,18], [[20,100],[30,92],[40,89],[50,85],[60,78],[70,70],[80,60],[90,55],[100,50]]},
{:orange, [19,46], [[20,100],[30,93],[40,88],[50,86],[60,85],[70,70],[100,70]]},
{:yellow, [47,62], [[25,100],[40,94],[50,89],[60,86],[70,84],[80,82],[90,80],[100,75]]},
{:green, [63,178], [[30,100],[40,90],[50,85],[60,81],[70,74],[80,64],[90,50],[100,40]]},
{:blue, [179, 257], [[20,100],[30,86],[40,80],[50,74],[60,60],[70,52],[80,44],[90,39],[100,35]]},
{:purple, [258, 282], [[20,100],[30,87],[40,79],[50,70],[60,65],[70,59],[80,52],[90,45],[100,42]]},
{:pink, [283, 334], [[20,100],[30,90],[40,86],[60,84],[80,80],[90,75],[100,73]]},
]
@color_bounds_keys Enum.map(@color_bounds, &(elem(&1, 0)))
defstruct hue: :random, luminosity: :random
def get(state \\ %__MODULE__{}) do
:random.seed(:erlang.now)
state = if state.hue == :monochrome, do: %__MODULE__{state | luminosity: 0}, else: state
hue = get_hue(state)
saturation = get_saturation(hue, state)
brightness = get_brightness(hue, saturation, state)
get_color(hue, saturation, brightness)
end
defp define_color({color, hue_range, lower_bounds}) do
s_min = lower_bounds |> List.first |> List.first
s_max = lower_bounds |> List.last |> List.first
b_min = lower_bounds |> List.last |> Enum.at(1)
b_max = lower_bounds |> List.first |> Enum.at(1)
%{
color: color,
lower_bounds: lower_bounds,
hue_range: hue_range,
saturation_range: [s_min, s_max],
brightness_range: [b_min, b_max],
}
end
defp get_hue(state) do
range = get_hue_range(state.hue)
hue = random_within(range)
if hue < 0, do: 360 - hue, else: hue
end
defp get_hue_range(number) when is_integer(number) and number < 360 and number > 0, do: [number, number]
defp get_hue_range(color) when color in @color_bounds_keys, do: elem(List.keyfind(@color_bounds, color, 0), 1)
defp get_hue_range(_), do: [0, 334]
defp get_hue_color_info(hue) when is_integer(hue) do
hue = if hue >= 334 and hue <= 360, do: hue - 360, else: hue
color = Enum.find(@color_bounds, nil, fn(color={_, [hue0, hue1], _}) -> hue >= hue0 and hue <= hue1 end)
if color, do: define_color(color), else: nil
end
def get_saturation(hue, %__MODULE__{luminosity: l}) when is_integer(l), do: l
def get_saturation(hue, state) do
IO.puts inspect(hue) <> inspect(state)
[s_min, s_max] = get_saturation_range(hue) # TODO
range = case state.luminosity do
:random -> [0, 100]
:bright -> [55, s_max]
:dark -> [s_max - 10, s_max]
:light -> [s_min, 55]
end
random_within(range)
end
defp get_saturation_range(hue) do
get_hue_color_info(hue).saturation_range
end
defp get_brightness(hue, saturation, state) do
b_min = get_minimum_brightness(hue, saturation)
b_max = 100
range = case state.luminosity do
:dark -> [b_min, b_min + 20]
:light -> [(b_max + b_min)/2, b_max]
:random -> [0, 100]
end
random_within(range)
end
defp get_minimum_brightness(hue, saturation) do
lower_bounds = get_hue_color_info(hue).lower_bounds
minimum = Enum.map(Enum.with_index(lower_bounds), fn({[s1, v1], index}) ->
[s2, v2] = Enum.at(lower_bounds, index + 1) || [999999,999999]
if s2 && v2 && saturation >= s1 and saturation <= s2 do
m = (v2 - v1)/(s2 - s1)
b = v1 - m*s1
m*saturation + b
end
end)
|> Enum.filter(fn(x) -> x != nil end)
|> List.first
minimum || 0
end
defp get_color(hue, saturation, brightness) do
rgb = hsv_to_rgb(hue, saturation, brightness)
dark = brightness < 65
[hue: hue, saturation: saturation, brightness: brightness, rgb: rgb, dark: dark]
end
defp hsv_to_rgb(h, s, v) do
h = if h == 0, do: 1, else: h
h = if h == 360, do: 359, else: h
h = h / 360
s = s / 100
v = v / 100
h_i = trunc(h*6)
f = h * 6 - h_i
p = v * (1 - s)
q = v * (1 - f*s)
t = v * (1 - (1 - f)*s)
r = 256
g = 256
b = 256
[r,g,b] = case h_i do
0 -> [v, t, p]
1 -> [q, v, p]
2 -> [p, v, t]
3 -> [p, q, v]
4 -> [t, p, v]
5 -> [v, p, q]
end
[trunc(r*255), trunc(g*255), trunc(b*255)]
end
defp random_within([start, stop]) do
trunc((start + :random.uniform)*(stop + 1 - start))
end
end
|
lib/news/util/random_color.ex
| 0.53437 | 0.572006 |
random_color.ex
|
starcoder
|
defmodule ConciergeSite.TimeHelper do
@moduledoc """
Time functions for subscription views
"""
import Phoenix.HTML.Form, only: [select: 4, time_select: 3]
import Phoenix.HTML.Tag, only: [content_tag: 2, content_tag: 3]
alias Calendar.Strftime
@doc """
Takes a time struct and returns HH:MM AM/PM
"""
@spec format_time(DateTime.t() | nil, String.t(), boolean) :: String.t()
def format_time(time, format \\ "%l:%M %p", strip_leading_zero? \\ false)
def format_time(nil, _, _), do: ""
def format_time(time, format, strip_leading_zero?) do
formatted_time =
time
|> time_to_string()
|> format_time_string(format)
if strip_leading_zero?, do: Regex.replace(~r/^0?/, formatted_time, ""), else: formatted_time
end
@doc """
Takes a time string in format HH:MM:SS and returns HH:MM AM/PM
"""
def format_time_string(time_string, format \\ "%I:%M %p") do
time_string
|> String.split(":")
|> Enum.map(&String.to_integer/1)
|> List.to_tuple()
|> Time.from_erl!()
|> Strftime.strftime!(format)
end
@doc """
Converts a Time.t to a string with the H:M:S format
"""
@spec time_to_string(Time.t() | nil) :: String.t() | nil
def time_to_string(nil), do: nil
def time_to_string(time), do: Strftime.strftime!(time, "%H:%M:%S")
@spec trip_time_select(Phoenix.HTML.Form.t(), atom, DateTime.t() | Time.t()) ::
Phoenix.HTML.Safe.t()
def trip_time_select(form, field, time) do
content_tag :div, class: "form__time" do
time_select(form, field, builder: &time_select_builder(&1, field, time))
end
end
defp time_select_builder(builder, field, time) do
content_tag :div do
[
content_tag(:label, "Hour", for: "form__time_hour", class: "sr-only"),
builder.(
:hour,
required: true,
options: zero_padded_numbers().(1..12),
value: format_time(time, "%I", true),
data: [type: "time"]
),
content_tag(:span, ":"),
content_tag(:label, "Minute", for: "form__time_minute", class: "sr-only"),
builder.(
:minute,
required: true,
value: format_time(time, "%M", true),
data: [type: "time"]
),
" ",
content_tag(:label, "AM or PM", for: "form__time_am_pm", class: "sr-only"),
select(
:trip,
:am_pm,
[AM: "AM", PM: "PM"],
required: true,
value: format_time(time, "%p"),
id: "trip_#{field}_am_pm",
name: "trip[#{field}][am_pm]",
data: [type: "time"]
)
]
end
end
defp zero_padded_numbers do
&Enum.map(&1, fn i ->
pre = if i < 10, do: "0"
{"#{pre}#{i}", i}
end)
end
end
|
apps/concierge_site/lib/views/time_helper.ex
| 0.74872 | 0.439747 |
time_helper.ex
|
starcoder
|
defmodule CSSEx.Helpers.EEX do
@moduledoc false
import CSSEx.Parser, only: [open_current: 2, close_current: 1, add_error: 2]
import CSSEx.Helpers.Shared,
only: [
inc_col: 1,
inc_col: 2,
inc_line: 1,
inc_line: 2,
inc_no_count: 1,
file_and_line_opts: 1
]
import CSSEx.Helpers.Error, only: [error_msg: 1]
@line_terminators CSSEx.Helpers.LineTerminators.code_points()
@white_space CSSEx.Helpers.WhiteSpace.code_points()
defstruct line: 0, column: 0, level: 0, acc: "", no_count: 0
def parse(rem, data) do
case do_parse(rem, open_current(data, :eex), %__MODULE__{}) do
{:ok, {_, _} = result} -> result
{:error, _new_data} = error -> error
end
end
def finish(rem, data, %{acc: eex_block, line: s_line}) do
acc = IO.chardata_to_string(eex_block)
final = eval_with_bindings(acc, data)
new_final = :lists.flatten([to_charlist(final), ?$, 0, ?$, 0, ?$ | rem])
:erlang.garbage_collect()
new_data =
data
|> inc_line(s_line)
|> inc_no_count()
|> close_current()
{:ok, {new_final, new_data}}
rescue
error ->
{:error, add_error(%{data | line: s_line}, error_msg({:eex, error}))}
end
def do_parse([], data, %{column: col, line: line}) do
new_data =
data
|> inc_line(line)
|> inc_col(col)
{:error, new_data}
end
def do_parse('<% end %>' ++ rem, data, %{acc: acc} = state) do
%{state | acc: [acc | '<% end %>']}
|> inc_col(9)
|> inc_level(-1)
|> case do
%{level: 0} = new_state -> finish(rem, data, new_state)
new_state -> do_parse(rem, data, new_state)
end
end
def do_parse('<%' ++ rem, data, %{acc: acc, level: level} = state) do
new_state =
state
|> inc_col(2)
|> inc_level()
do_parse(rem, data, %{new_state | acc: [acc | '<%'], level: level + 1})
end
def do_parse('do %>' ++ rem, data, %{acc: acc} = state) do
new_state =
state
|> inc_col(5)
do_parse(rem, data, %{new_state | acc: [acc | 'do %>']})
end
def do_parse('%>' ++ rem, data, %{acc: acc} = state) do
%{state | acc: [acc | '%>']}
|> inc_col(2)
|> inc_level(-1)
|> case do
%{level: 0} = new_state -> finish(rem, data, new_state)
new_state -> do_parse(rem, data, new_state)
end
end
Enum.each(@line_terminators, fn char ->
def do_parse([unquote(char) | rem], data, %{acc: acc} = state),
do: do_parse(rem, data, inc_line(%{state | acc: [acc, unquote(char)]}))
end)
Enum.each(@white_space, fn char ->
def do_parse([unquote(char) | rem], data, %{acc: acc} = state),
do: do_parse(rem, data, inc_col(%{state | acc: [acc, unquote(char)]}))
end)
def do_parse([char | rem], data, %{acc: acc} = state),
do: do_parse(rem, data, inc_col(%{state | acc: [acc, char]}))
def replace_and_extract_assigns(acc, matches, %{assigns: assigns, local_assigns: local_assigns}) do
Enum.reduce_while(matches, {acc, []}, fn <<"@::", name::binary>> = full,
{eex_block, bindings} ->
case Map.get(local_assigns, name) || Map.get(assigns, name) do
nil ->
{:halt, {:error, {:not_declared, :var, name}}}
val ->
{:cont,
{
String.replace(eex_block, full, fn <<"@::", name::binary>> ->
<<"@", name::binary>>
end),
[{String.to_atom(name), val} | bindings]
}}
end
end)
end
def inc_level(%{level: level} = state, amount \\ 1),
do: %{state | level: level + amount}
def eval_with_bindings(acc, data),
do:
EEx.eval_string(
acc,
[assigns: build_bindings(data)],
file_and_line_opts(data)
)
def build_bindings(%{assigns: a, local_assigns: la}),
do:
Enum.map(
Map.merge(a, la),
fn {k, v} -> {String.to_atom(k), v} end
)
end
|
lib/helpers/eex.ex
| 0.636579 | 0.415996 |
eex.ex
|
starcoder
|
defmodule ElixirCardDealer do
@moduledoc """
Provides methods for creating and handling a deck of cards.
"""
@doc """
Greets user.
"""
def hello do
"Hello there, Player."
end
@doc """
Creates and returns a new deck of 52 cards.
"""
def create_deck do
values = ["Ace", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten", "Jack", "Queen", "King"]
suits = ["Clubs", "Diamonds", "Hearts", "Spades"]
# List comprehension (works as a map)
for suit <- suits, value <- values do
"#{value} of #{suit}"
end
end
@doc """
Returns a shuffled deck of cards.
"""
def shuffle(deck) do
Enum.shuffle(deck)
end
@doc """
Ascertains whether deck of contains a given card.
## Examples
iex> deck = ElixirCardDealer.create_deck
iex> ElixirCardDealer.contains?(deck, "Ace of Spades")
true
"""
def contains?(deck, hand) do
Enum.member?(deck, hand)
end
@doc """
Divides a deck of cards into a hand and the remaing deck.
The `hand_size` argument specifies how many cards the hand should contain.
## Examples
iex> deck = ElixirCardDealer.create_deck
iex> {hand, _remaining} = ElixirCardDealer.deal(deck, 1)
iex> hand
["Ace of Clubs"]
"""
def deal(deck, hand_size) do
Enum.split(deck, hand_size)
end
@doc """
Saves a deck of cards to the filesystem.
"""
def save(deck, filename) do
binary = :erlang.term_to_binary(deck)
File.write(filename, binary)
end
@doc """
Loads a deck of cards from the filesystem.
"""
def load(filename) do
case File.read(filename) do
{:ok, binary} -> :erlang.binary_to_term binary
{:error, _reason} -> "The file '#{filename}' does not exist."
end
end
@doc """
Creates, shuffles and divide a deck of cards into a hand.
The `hand_size` argument specifies how many cards the hand should contain.
"""
def create_hand(hand_size) do
ElixirCardDealer.create_deck
|> ElixirCardDealer.shuffle
|> ElixirCardDealer.deal(hand_size)
end
end
|
lib/elixir_card_dealer.ex
| 0.845799 | 0.572424 |
elixir_card_dealer.ex
|
starcoder
|
defmodule Bitcoinex.Transaction do
@moduledoc """
Bitcoin on-chain transaction structure.
Supports serialization of transactions.
"""
alias Bitcoinex.Transaction
alias Bitcoinex.Transaction.In
alias Bitcoinex.Transaction.Out
alias Bitcoinex.Transaction.Witness
alias Bitcoinex.Utils
alias Bitcoinex.Transaction.Utils, as: TxUtils
defstruct [
:version,
:vbytes,
:inputs,
:outputs,
:witnesses,
:lock_time
]
@doc """
Returns the TxID of the given transaction.
TxID is sha256(sha256(nVersion | txins | txouts | nLockTime))
"""
def transaction_id(txn) do
legacy_txn = TxUtils.serialize_legacy(txn)
{:ok, legacy_txn} = Base.decode16(legacy_txn, case: :lower)
Base.encode16(
<<:binary.decode_unsigned(
Utils.double_sha256(legacy_txn),
:big
)::little-size(256)>>,
case: :lower
)
end
@doc """
Decodes a transaction in a hex encoded string into binary.
"""
def decode(tx_hex) when is_binary(tx_hex) do
case Base.decode16(tx_hex, case: :lower) do
{:ok, tx_bytes} ->
case parse(tx_bytes) do
{:ok, txn} ->
{:ok, txn}
:error ->
{:error, :parse_error}
end
:error ->
{:error, :decode_error}
end
end
# Extracts and parses a transaction from the head of a binary
defp parse_one(tx_bytes) do
<<version::little-size(32), remaining::binary>> = tx_bytes
{is_segwit, remaining} =
case remaining do
<<1::size(16), segwit_remaining::binary>> ->
{:segwit, segwit_remaining}
_ ->
{:not_segwit, remaining}
end
# Inputs.
{in_counter, remaining} = TxUtils.get_counter(remaining)
{inputs, remaining} = In.parse_inputs(in_counter, remaining)
# Outputs.
{out_counter, remaining} = TxUtils.get_counter(remaining)
{outputs, remaining} = Out.parse_outputs(out_counter, remaining)
before_witness_bytes = byte_size(remaining)
# If flag 0001 is present, this indicates an attached segregated witness structure.
{witnesses, remaining} =
if is_segwit == :segwit do
Witness.parse_witness(in_counter, remaining)
else
{nil, remaining}
end
# discounted witness bytes = all of the witness segment
# plus segwit marker & segwit flag bytes
witness_byte_size = 2 + before_witness_bytes - byte_size(remaining)
<<lock_time::little-size(32), remaining::binary>> = remaining
initial_byte_size = byte_size(tx_bytes)
remaining_byte_size = byte_size(remaining)
total_byte_size = initial_byte_size - remaining_byte_size
# calculate size in vbytes
vbytes =
if is_segwit == :segwit do
non_witness_byte_size = total_byte_size - witness_byte_size
non_witness_byte_size + (witness_byte_size / 4)
else
total_byte_size
end
txn = %Transaction{
version: version,
vbytes: vbytes,
inputs: inputs,
outputs: outputs,
witnesses: witnesses,
lock_time: lock_time
}
cond do
byte_size(remaining) < 0 ->
:error
byte_size(remaining) > 0 ->
{:ok, txn, remaining}
true ->
{:ok, txn}
end
end
# returns transaction
defp parse(tx_bytes) do
case (parse_one(tx_bytes)) do
{:ok, txn} ->
{:ok, txn}
{:ok, _txn, _remaining} ->
:error
:error ->
:error
end
end
def parse_list(counter, txns), do: do_parse_list(txns, [], counter)
defp do_parse_list(remaining, txns, 0), do: {Enum.reverse(txns), remaining}
defp do_parse_list(remaining, txns, count) do
case parse_one(remaining) do
{:ok, txn} ->
do_parse_list(<<>>, [txn | txns], count - 1)
{:ok, txn, remaining} ->
do_parse_list(remaining, [txn | txns], count - 1)
end
end
end
defmodule Bitcoinex.Transaction.Utils do
@moduledoc """
Utilities for when dealing with transaction objects.
"""
alias Bitcoinex.Transaction.In
alias Bitcoinex.Transaction.Out
@doc """
Returns the Variable Length Integer used in serialization.
Reference: https://en.bitcoin.it/wiki/Protocol_documentation#Variable_length_integer
"""
@spec get_counter(binary) :: {non_neg_integer(), binary()}
def get_counter(<<counter::little-size(8), vec::binary>>) do
case counter do
# 0xFD followed by the length as uint16_t
0xFD ->
<<len::little-size(16), vec::binary>> = vec
{len, vec}
# 0xFE followed by the length as uint32_t
0xFE ->
<<len::little-size(32), vec::binary>> = vec
{len, vec}
# 0xFF followed by the length as uint64_t
0xFF ->
<<len::little-size(64), vec::binary>> = vec
{len, vec}
_ ->
{counter, vec}
end
end
@doc """
Serializes a transaction without the witness structure.
"""
def serialize_legacy(txn) do
version = <<txn.version::little-size(32)>>
tx_in_count = serialize_compact_size_unsigned_int(length(txn.inputs))
inputs = In.serialize_inputs(txn.inputs)
tx_out_count = serialize_compact_size_unsigned_int(length(txn.outputs))
outputs = Out.serialize_outputs(txn.outputs)
lock_time = <<txn.lock_time::little-size(32)>>
Base.encode16(
version <>
tx_in_count <>
inputs <>
tx_out_count <>
outputs <>
lock_time,
case: :lower
)
end
@doc """
Returns the serialized variable length integer.
"""
def serialize_compact_size_unsigned_int(compact_size) do
cond do
compact_size >= 0 and compact_size <= 0xFC ->
<<compact_size::little-size(8)>>
compact_size <= 0xFFFF ->
<<0xFD>> <> <<compact_size::little-size(16)>>
compact_size <= 0xFFFFFFFF ->
<<0xFE>> <> <<compact_size::little-size(32)>>
compact_size <= 0xFF ->
<<0xFF>> <> <<compact_size::little-size(64)>>
end
end
end
defmodule Bitcoinex.Transaction.Witness do
@moduledoc """
Witness structure part of an on-chain transaction.
"""
alias Bitcoinex.Transaction.Witness
alias Bitcoinex.Transaction.Utils, as: TxUtils
defstruct [
:txinwitness
]
@doc """
Wtiness accepts a binary and deserializes it.
"""
@spec witness(binary) :: %Bitcoinex.Transaction.Witness{
:txinwitness => [any()] | 0
}
def witness(witness_bytes) do
{stack_size, witness_bytes} = TxUtils.get_counter(witness_bytes)
{witness, _} =
if stack_size == 0 do
{%Witness{txinwitness: 0}, witness_bytes}
else
{stack_items, witness_bytes} = parse_stack(witness_bytes, [], stack_size)
{%Witness{txinwitness: stack_items}, witness_bytes}
end
witness
end
def parse_witness(0, remaining), do: {nil, remaining}
def parse_witness(counter, witnesses) do
parse(witnesses, [], counter)
end
defp parse(remaining, witnesses, 0), do: {Enum.reverse(witnesses), remaining}
defp parse(remaining, witnesses, count) do
{stack_size, remaining} = TxUtils.get_counter(remaining)
{witness, remaining} =
if stack_size == 0 do
{%Witness{txinwitness: 0}, remaining}
else
{stack_items, remaining} = parse_stack(remaining, [], stack_size)
{%Witness{txinwitness: stack_items}, remaining}
end
parse(remaining, [witness | witnesses], count - 1)
end
defp parse_stack(remaining, stack_items, 0), do: {Enum.reverse(stack_items), remaining}
defp parse_stack(remaining, stack_items, stack_size) do
{item_size, remaining} = TxUtils.get_counter(remaining)
<<stack_item::binary-size(item_size), remaining::binary>> = remaining
parse_stack(
remaining,
[Base.encode16(stack_item, case: :lower) | stack_items],
stack_size - 1
)
end
end
defmodule Bitcoinex.Transaction.In do
@moduledoc """
Transaction Input part of an on-chain transaction.
"""
alias Bitcoinex.Transaction.In
alias Bitcoinex.Transaction.Utils, as: TxUtils
defstruct [
:prev_txid,
:prev_vout,
:script_sig,
:sequence_no
]
def serialize_inputs(inputs) do
serialize_input(inputs, <<""::binary>>)
end
defp serialize_input([], serialized_inputs), do: serialized_inputs
defp serialize_input(inputs, serialized_inputs) do
[input | inputs] = inputs
{:ok, prev_txid} = Base.decode16(input.prev_txid, case: :lower)
prev_txid =
prev_txid
|> :binary.decode_unsigned(:big)
|> :binary.encode_unsigned(:little)
|> Bitcoinex.Utils.pad(32, :trailing)
{:ok, script_sig} = Base.decode16(input.script_sig, case: :lower)
script_len = TxUtils.serialize_compact_size_unsigned_int(byte_size(script_sig))
serialized_input =
prev_txid <>
<<input.prev_vout::little-size(32)>> <>
script_len <> script_sig <> <<input.sequence_no::little-size(32)>>
serialize_input(inputs, <<serialized_inputs::binary>> <> serialized_input)
end
def parse_inputs(counter, inputs) do
parse(inputs, [], counter)
end
defp parse(remaining, inputs, 0), do: {Enum.reverse(inputs), remaining}
defp parse(
<<prev_txid::binary-size(32), prev_vout::little-size(32), remaining::binary>>,
inputs,
count
) do
{script_len, remaining} = TxUtils.get_counter(remaining)
<<script_sig::binary-size(script_len), sequence_no::little-size(32), remaining::binary>> =
remaining
input = %In{
prev_txid:
Base.encode16(<<:binary.decode_unsigned(prev_txid, :big)::little-size(256)>>, case: :lower),
prev_vout: prev_vout,
script_sig: Base.encode16(script_sig, case: :lower),
sequence_no: sequence_no
}
parse(remaining, [input | inputs], count - 1)
end
end
defmodule Bitcoinex.Transaction.Out do
@moduledoc """
Transaction Output part of an on-chain transaction.
"""
alias Bitcoinex.Transaction.Out
alias Bitcoinex.Transaction.Utils, as: TxUtils
defstruct [
:value,
:script_pub_key
]
def serialize_outputs(outputs) do
serialize_output(outputs, <<""::binary>>)
end
defp serialize_output([], serialized_outputs), do: serialized_outputs
defp serialize_output(outputs, serialized_outputs) do
[output | outputs] = outputs
{:ok, script_pub_key} = Base.decode16(output.script_pub_key, case: :lower)
script_len = TxUtils.serialize_compact_size_unsigned_int(byte_size(script_pub_key))
serialized_output = <<output.value::little-size(64)>> <> script_len <> script_pub_key
serialize_output(outputs, <<serialized_outputs::binary>> <> serialized_output)
end
def output(out_bytes) do
<<value::little-size(64), out_bytes::binary>> = out_bytes
{script_len, out_bytes} = TxUtils.get_counter(out_bytes)
<<script_pub_key::binary-size(script_len), _::binary>> = out_bytes
%Out{value: value, script_pub_key: Base.encode16(script_pub_key, case: :lower)}
end
def parse_outputs(counter, outputs) do
parse(outputs, [], counter)
end
defp parse(remaining, outputs, 0), do: {Enum.reverse(outputs), remaining}
defp parse(<<value::little-size(64), remaining::binary>>, outputs, count) do
{script_len, remaining} = TxUtils.get_counter(remaining)
<<script_pub_key::binary-size(script_len), remaining::binary>> = remaining
output = %Out{
value: value,
script_pub_key: Base.encode16(script_pub_key, case: :lower)
}
parse(remaining, [output | outputs], count - 1)
end
end
|
server/bitcoinex/lib/transaction.ex
| 0.839734 | 0.407628 |
transaction.ex
|
starcoder
|
defmodule Retry.DelayStreams do
@moduledoc """
This module provide a set of helper functions that produce delay streams for
use with `retry`.
"""
@doc """
Returns a stream of delays that increase exponentially.
Example
retry with: exponential_backoff do
# ...
end
"""
@spec exponential_backoff(pos_integer(), pos_integer()) :: Enumerable.t()
def exponential_backoff(initial_delay \\ 10, factor \\ 2) do
Stream.unfold(initial_delay, fn last_delay ->
{last_delay, round(last_delay * factor)}
end)
end
@doc """
Returns a stream in which each element of `delays` is randomly adjusted to a number
between 1 and the original delay.
Example
retry with: exponential_backoff() |> jitter() do
# ...
end
"""
@spec jitter(Enumerable.t()) :: Enumerable.t()
def jitter(delays) do
Stream.map(delays, fn delay ->
delay
|> trunc
|> random_uniform
end)
end
@doc """
Returns a stream of delays that increase linearly.
Example
retry with: linear_backoff(50, 2) do
# ...
end
"""
@spec linear_backoff(pos_integer(), pos_integer()) :: Enumerable.t()
def linear_backoff(initial_delay, factor) do
Stream.unfold(0, fn failures ->
next_d = initial_delay + failures * factor
{next_d, failures + 1}
end)
end
@doc """
Returns a constant stream of delays.
Example
retry with: constant_backoff(50) do
# ...
end
"""
@spec constant_backoff(pos_integer()) :: Enumerable.t()
def constant_backoff(delay \\ 100) do
Stream.repeatedly(fn -> delay end)
end
@doc """
Returns a stream in which each element of `delays` is randomly adjusted no
more than `proportion` of the delay.
Example
retry with: exponential_backoff() |> randomize do
# ...
end
Produces an exponentially increasing delay stream where each delay is randomly
adjusted to be within 10 percent of the original value
"""
@spec randomize(Enumerable.t(), float()) :: Enumerable.t()
def randomize(delays, proportion \\ 0.1) do
Stream.map(delays, fn d ->
max_delta = round(d * proportion)
shift = random_uniform(2 * max_delta) - max_delta
case d + shift do
n when n <= 0 -> 0
n -> n
end
end)
end
@doc """
Returns a stream that is the same as `delays` except that the delays never
exceed `max`. This allow capping the delay between attempts to some max value.
Example
retry with: exponential_backoff() |> cap(10_000) do
# ...
end
Produces an exponentially increasing delay stream until the delay reaches 10
seconds at which point it stops increasing
"""
@spec cap(Enumerable.t(), pos_integer()) :: Enumerable.t()
def cap(delays, max) do
Stream.map(
delays,
fn
d when d <= max -> d
_ -> max
end
)
end
@doc """
Returns a delay stream that is the same as `delays` except it limits the total
life span of the stream to `time_budget`. This calculation takes the execution
time of the block being retried into account.
The execution of the code within the block will not be interrupted, so
the total time of execution may run over the `time_budget` depending on how
long a single try will take.
Optionally, you can specify a minimum delay so the smallest value doesn't go
below the threshold.
Example
retry with: exponential_backoff() |> expiry(1_000) do
# ...
end
Produces a delay stream that ends after 1 second has elapsed since its
creation.
"""
@spec expiry(Enumerable.t(), pos_integer(), pos_integer()) :: Enumerable.t()
def expiry(delays, time_budget, min_delay \\ 100) do
end_t = :os.system_time(:milli_seconds) + time_budget
Stream.transform(delays, :normal, fn preferred_delay, status ->
now_t = :os.system_time(:milli_seconds)
remaining_t = Enum.max([end_t - now_t, min_delay])
cond do
# time expired!
:at_end == status ->
{:halt, status}
# one last try
preferred_delay >= remaining_t or remaining_t == min_delay ->
{[remaining_t], :at_end}
true ->
{[preferred_delay], status}
end
end)
end
defp random_uniform(n) when n <= 0, do: 0
defp random_uniform(n), do: :rand.uniform(n)
end
|
lib/retry/delay_streams.ex
| 0.939872 | 0.652131 |
delay_streams.ex
|
starcoder
|
defmodule ElixirMock do
@moduledoc """
This module contains functions and macros for creating mocks from real modules. It also contains utilities for
verifying that calls were made to functions in the mocks.
"""
# TODO: This module has too many public functions and macros that should really be private
require Logger
@doc false
defmacro inject_monitored_real_functions(real_module, real_functions) do
quote bind_quoted: [real_module: real_module, real_functions: real_functions] do
real_functions
|> Enum.reject(fn {fn_name, _, _} -> fn_name in [:module_info, :__info__, :is_record] end)
|> Enum.map(fn {fn_name, arity, call_through} ->
args = case arity do
0 -> []
_ -> 1..arity |> Enum.map(&(Macro.var(:"arg_#{&1}", __MODULE__)))
end
def unquote(:"#{fn_name}")(unquote_splicing(args)) do
watcher_proc = MockWatcher.get_watcher_name_for(__MODULE__)
GenServer.call(watcher_proc, {:record_call, unquote(fn_name), unquote(args)})
if unquote(call_through) do
unquote(real_module).unquote(fn_name)(unquote_splicing(args))
else
nil
end
end
end)
end
end
@doc """
Creates a mock module from a real module allowing custom definitons for some or all of the functions on the mock.
Mock behaviour can be tuned in a number of different ways depending on your needs. The next few sections enumerate the
tuning options available with examples. We will use the inbuilt `List` module as our base module for these examples.
## Feature: Overriding functions
Creating a mock from the `List` module and overriding its `List.first/1` function.
```
require ElixirMock
import ElixirMock
with_mock(list_mock) = defmock_of List do
def first(_list), do: :mock_response_from_first
end
list_mock.first([1, 2]) == :mock_response_from_first
#=> true
```
## Feature: Delegating calls to the real module with `:call_through`
When a function in a mock defintion returns the atom `:call_through`, ElixirMock will forward all calls made to that
function to the corresponding function on the real module. All calls are still recorded by the mock and are inspectable
with the `assert_called/1` and `refute_called/1` macros.
```
require ElixirMock
import ElixirMock
with_mock(list_mock) = defmock_of List do
def first(_list), do: :call_through
end
list_mock.first([1, 2]) == List.first([1, 2]) == 1
#=> true
```
## Feature: Delegating unspecified function calls to the real module
Sometimes, you only want to stub out specific functions on modules but leave other functions behaving as defined on the
original module. This could be because the overriden functions have side-effects you don't want to deal with in your
tests or because you want to alter the behaviour of just those functions so you can test code that depends on them.
ElixirMock provides the `@call_through_undeclared_functions` mock attribute to help with this. Mocks defined with this
attribute set to `true` will forward calls made to undeclared functions to the real module. Mocks defined without this
attribute simply return `nil` when calls are made to undeclared functions.
All functions calls, whether defined on the mock on not, are still recorded by the mock and are inspectable with the
`assert_called/1` and `refute_called/1` macros.
In the example below, the `List.first/1` function is overriden but the `List.last/1` function retains its original behaviour.
```
require ElixirMock
import ElixirMock
with_mock(list_mock) = defmock_of List do
@call_through_undeclared_functions true
def first(_list), do: :mock_response_from_first
end
list_mock.first([1, 2]) == :mock_response_from_first
list_mock.last([1, 2] == List.last([1, 2]) == 2
#=> true
```
## Info: Mock functions only override function heads of the same arity in the real module.
```
defmodule Real do
def x, do: {:arity, 0}
def x(_arg), do: {:arity, 1}
end
with_mock(mock) = defmock_of Real do
def x, do: :overridden_x
end
mock.x == :overridden_x
#=> true
mock.x(:some_arg) == nil
#=> true
```
## Notes
- An `ElixirMock.MockDefinitionError` is raised if a _public_ function that does not exist in the real module is
declared on the mock.
- Mocks allow private functions to be defined on them. These functions needn't be defined on the real module. In fact,
private functions are not imported from the real module into the mock at all.
- Please refer to the [Getting started guide](getting_started.html) for a broader enumeration of the
characteristics of ElixirMock's mocks.
"""
defmacro defmock_of(real_module, do: nil) do
quote do
ElixirMock.create_mock(unquote(real_module))
end
end
@doc """
Creates a mock module from a real module just like `defmock_of/2` but additionally allows a context map to be injected
into the mock definition.
The context injected in the mock is accessible to the functions within the mock definition via the
`ElixirMock.Mock.context/2` function. It takes in the context key and a mock and looks up the key's value in the
context map passed to mock when it was defined. An `ArgumentError` is thrown if the key doesn't exist in the context
map.
Being able to pass context into mocks is very useful when you need to fix the behaviour of a mock using some values
declared outside the mock's definition.
Example:
```
require ElixirMock
import ElixirMock
fixed_first = 100
with_mock(list_mock) = defmock_of List, %{fixed_first: fixed_first} do
def first(_list), do: ElixirMock.Mock.context(:fixed_first, __MODULE__)
end
list_mock.first([1, 2]) == fixed_first
#=> true
```
For more on the options available within mock definitions, see `defmock_of/2`
"""
defmacro defmock_of(real_module, context \\ {:%{}, [], []}, do: mock_ast) do
call_through_unstubbed_fns = call_through_unstubbed_functions?(mock_ast)
mock_fns = extract_mock_fns(mock_ast)
stubs = Enum.map mock_fns, fn {_fn_type, {name, arity}} -> {name, arity} end
quote do
real_module = unquote(real_module)
mock_fns = unquote(mock_fns)
context = Macro.escape(unquote(context))
stubs = Macro.escape(unquote(stubs))
mock_ast = unquote(Macro.escape(mock_ast))
call_through_unstubbed_fns = unquote(call_through_unstubbed_fns)
mock_module_name = random_module_name()
verify_mock_structure(mock_fns, real_module)
{:ok, _pid} = MockWatcher.start_link(mock_module_name)
contents = mock_contents(real_module, mock_ast, stubs, context, call_through_unstubbed_fns)
Module.create(mock_module_name, contents, Macro.Env.location(__ENV__))
end
end
@doc false
defmacro create_mock(real_module, mock_module_name \\ nil, call_through \\ false) do
quote do
mock_module_name = unquote(mock_module_name) || random_module_name()
{:ok, _pid} = MockWatcher.start_link(mock_module_name)
contents = mock_contents(unquote(real_module), unquote(call_through))
Module.create(mock_module_name, contents, Macro.Env.location(__ENV__))
end
end
def mock_of(real_module, call_through \\ false)
@doc """
Creates mock from real module with all functions on real module defined on the the mock.
By default, all functions on the mock return nil. The behaviour of the module the mock is defined from remains intact.
```
defmodule MyRealModule do
def function_one(_), do: :real_result
end
require ElixirMock
import ElixirMock
my_mock = mock_of MyRealModule
# functions on mock return nil
my_mock.function_one(1) == nil
#=> true
# the real module is still intact
MyRealModule.function_one(1) == :real_result
#=> true
```
### `call_through`
When `:call_through` is provided, functions defined on the mock delegate all calls to the corresponding functions on the
real module.
```
transparent_mock = mock_of MyRealModule, :call_through
transparent_mock.function_one(1) == MyRealModule.function_one(1) == :real_result
#=> true
```
"""
@spec mock_of(module, atom) :: ElixirMock.Mock.mock
def mock_of(real_module, :call_through),
do: mock_of(real_module, true)
def mock_of(real_module, call_through) do
mod_name = random_module_name()
create_mock(real_module, mod_name, call_through)
mod_name
end
@doc """
Verifies that a function on a mock was not called.
```
defmodule MyTest do
use ExUnit.Case
require ElixirMock
import ElixirMock
test "verifies that function on mock was not called" do
mock = mock_of List
mock.first [1, 2]
refute_called mock.first(:some_other_arg) # passes
refute_called mock.first([1, 2]) # fails!
end
end
```
_Note that the function call expressions passed to the macro are not executed. Rather, they are deconstructed to get the function
name and the arguments. The function name and arguments are then used to find the call in the mocks recorded list of calls._
When `refute_called/1` is given a matcher, the macro makes the test pass if the matcher evaluates to false for *all*
recorded calls. See `ElixirMock.Matchers` for more juicy details on Matchers.
```
defmodule MyTest do
use ExUnit.Case
require ElixirMock
import ElixirMock
alias ElixirMock.Matchers
test "verifies that function on mock was not called" do
mock = mock_of List
mock.first [1, 2]
refute_called mock.first(Matchers.any(:number)) # passes
refute_called mock.first(Matchers.any(:list)) # fails!
end
end
```
"""
defmacro refute_called({{:., _, [mock_ast, fn_name]}, _, args} = _function_call_expression) do
quote bind_quoted: [mock_ast: mock_ast, fn_name: fn_name, args: args] do
{mock_module, _} = Code.eval_quoted(mock_ast)
{called, _existing_calls} = mock_module.__elixir_mock__call_exists(fn_name, args)
call_string = build_call_string(fn_name, args)
refute called, "Did not expect #{call_string} to be called but it was."
end
end
@doc """
Verifies that a function on a mock was called.
```
defmodule MyTest do
use ExUnit.Case
require ElixirMock
import ElixirMock
test "verifies that function on mock was called" do
mock = mock_of List
mock.first [1, 2]
assert_called mock.first([1, 2]) # passes
assert_called mock.first(:some_other_arg) # fails!
end
end
```
_Note that the function call expressions passed to the macro are not executed. Rather, they are deconstructed to get the function
name and the arguments. The function name and arguments are then used to find the call in the mocks recorded list of calls._
When `assert_called/1` is given a matcher, the macro makes the test pass if the matcher
evaluates to true for any recorded call. See `ElixirMock.Matchers` for more juicy details on Matchers.
```
defmodule MyTest do
use ExUnit.Case
require ElixirMock
import ElixirMock
alias ElixirMock.Matchers
test "verifies that function on mock was called" do
mock = mock_of List
mock.first [1, 2]
assert_called mock.first(Matchers.any(:list)) # passes
assert_called mock.first(Matchers.any(:atom)) # fails!
end
end
```
"""
defmacro assert_called({{:., _, [mock_ast, fn_name]}, _, args} = _function_call_expression) do
quote bind_quoted: [mock_ast: mock_ast, fn_name: fn_name, args: args] do
{mock_module, _} = Code.eval_quoted(mock_ast)
{called, existing_calls} = mock_module.__elixir_mock__call_exists(fn_name, args)
call_string = build_call_string(fn_name, args)
existing_calls_string = build_calls_string(existing_calls)
failure_message = "Expected #{call_string} to have been called but it was not found among calls:
* #{existing_calls_string}"
assert called, failure_message
end
end
@doc """
A light wrapper that assigns names to mocks created with the `defmock_of/3` and `defmock_of/2` macros.
This is necessary because `defmock_of/3` and `defmock_of/2` return random mock module names wrapped in an ast tuple.
This little macro helps you give the random mock module a human-friendly name.
Example:
```
require ElixirMock
import ElixirMock
with_mock(my_custom_mock) = defmock_of List do end
# you can then use 'my_custom_mock' as a normal module
my_custom_mock.first([1, 2])
#=> nil
```
"""
defmacro with_mock(mock_name) do
quote do
{_, unquote(mock_name), _, _}
end
end
@doc false
def build_call_string(fn_name, args) do
args_string = args |> Enum.map(&(inspect &1)) |> Enum.join(", ")
"#{fn_name}(#{args_string})"
end
@doc false
def build_calls_string([]), do: "#{inspect []}"
@doc false
def build_calls_string(calls) do
calls
|> Enum.map(fn {func, args_list} -> build_call_string(func, args_list) end)
|> Enum.join("\n * ")
end
@doc false
defmacro inject_elixir_mock_utilities(context) do
quote do
@watcher_proc MockWatcher.get_watcher_name_for(__MODULE__)
@mock_context unquote(context)
def __elixir_mock__call_exists(fn_name, args) do
GenServer.call(@watcher_proc, {:call_exists, fn_name, args})
end
def __elixir_mock__reset do
:ok = GenServer.call(@watcher_proc, :clear_calls)
end
def __elixir_mock__list_calls,
do: GenServer.call(@watcher_proc, :list_calls)
def __elixir_mock__mock_context(key) when is_atom(key) do
if Map.has_key?(@mock_context, key) do
Map.get(@mock_context, key)
else
raise ArgumentError, "#{inspect key} not found in mock context #{inspect @mock_context}"
end
end
end
end
@doc false
def verify_mock_structure(mock_fns, real_module) do
real_functions = real_module.module_info(:exports)
invalid_stubs =
mock_fns
|> Enum.filter(fn {fn_type, _} -> fn_type == :def end)
|> Enum.filter(fn {:def, stub} -> not(stub in real_functions) end)
|> Enum.map(fn {:def, stub} -> stub end)
if not Enum.empty?(invalid_stubs) do
ElixirMock.MockDefinitionError.raise_it(invalid_stubs, real_module)
end
end
@doc false
def random_module_name, do: :"#{UUID.uuid4(:hex)}"
@doc false
def mock_contents(real_module, mock_ast, stubs, context, call_through_unstubbed_fns) do
quote do
require ElixirMock
unquote(mock_ast |> inject_elixir_mock_function_utilities |> apply_stub_call_throughs(real_module))
ElixirMock.inject_monitored_real_functions(unquote(real_module),
unquote(real_module).module_info(:exports)
|> Enum.filter(fn {fn_name, arity} -> not({fn_name, arity} in unquote(stubs)) end)
|> Enum.map(fn {fn_name, arity} -> {fn_name, arity, unquote(call_through_unstubbed_fns)} end))
ElixirMock.inject_elixir_mock_utilities(unquote(context))
end
end
@doc false
def mock_contents(real_module, call_through) do
quote do
require ElixirMock
ElixirMock.inject_monitored_real_functions(unquote(real_module),
unquote(real_module).module_info(:exports)
|> Enum.map(fn {fn_name, arity} -> {fn_name, arity, unquote(call_through)} end))
ElixirMock.inject_elixir_mock_utilities(%{})
end
end
defp call_through_unstubbed_functions?({:__block__, _, contents}) do
contents
|> Enum.filter(fn {member_type, _, _} -> member_type == :@ end)
|> Enum.any?(fn {_, _, [{attr_name, _, [attr_val]}]} ->
attr_name == :call_through_undeclared_functions and attr_val == true
end)
end
defp call_through_unstubbed_functions?(_non_block_mock), do: false
defp random_arg_name, do: :"elixir_mock_unignored__#{UUID.uuid4(:hex)}"
defp build_fn_spec(fn_type, fn_name, args) do
arity = case args do
nil -> 0
list -> length(list)
end
{fn_type, {fn_name, arity}}
end
defp extract_mock_fns({:def, _, [{fn_name, _, args}, _]}),
do: [build_fn_spec(:def, fn_name, args)]
defp extract_mock_fns({:defp, _, [{fn_name, _, args}, _]}),
do: [build_fn_spec(:defp, fn_name, args)]
defp extract_mock_fns({:__block__, _, content_ast}) do
content_ast
|> Enum.filter(fn({member_type, _, _}) -> member_type in [:def, :defp] end)
|> Enum.map(fn({fn_type, _, [{fn_name, _, args}, _]}) ->
build_fn_spec(fn_type, fn_name, args)
end)
end
defp cleanup_ignored_args(nil), do: nil
defp cleanup_ignored_args(args) do
Enum.map args, fn
{:_, context, nil} -> {random_arg_name(), context, nil}
used_argument -> used_argument
end
end
defp inject_elixir_mock_utility_lines(lines, fn_name, args) when is_list(lines) do
{:__block__, [], storage_call_lines} = quote do
watcher_proc = MockWatcher.get_watcher_name_for(__MODULE__)
GenServer.call(watcher_proc, {:record_call, unquote(fn_name), unquote(args)})
end
[do: {:__block__, [], storage_call_lines ++ lines}]
end
defp inject_elixir_mock_function_utilities({:def, _, [{fn_name, _, args}, _]} = fn_ast) do
clean_args = cleanup_ignored_args(args)
Macro.postwalk(fn_ast, fn
[do: plain_value] -> inject_elixir_mock_utility_lines([plain_value], fn_name, clean_args)
[do: {:__block__, _, lines}] -> inject_elixir_mock_utility_lines(lines, fn_name, clean_args)
{^fn_name, context, _} -> {fn_name, context, clean_args}
anything_else -> anything_else
end)
end
defp inject_elixir_mock_function_utilities({:__block__, _, _} = block) do
Macro.postwalk block, fn
{:def, _, _} = fn_ast -> inject_elixir_mock_function_utilities(fn_ast)
anything_else -> anything_else
end
end
defp apply_stub_call_throughs({:def, _, [{fn_name, _, args}, _]} = fn_ast, real_module) do
clean_args = if is_nil(args) do [] else args end
call_through_ast = quote do
unquote(real_module).unquote(fn_name)(unquote_splicing(clean_args))
end
Macro.postwalk fn_ast, fn
:call_through -> call_through_ast
anything_else -> anything_else
end
end
defp apply_stub_call_throughs({:defp, _, _} = private_mock_fn_ast, _real_module), do: private_mock_fn_ast
defp apply_stub_call_throughs({:__block__, _, content_ast}, real_module) do
content_ast
|> Enum.filter(fn({member_type, _, _}) -> member_type in [:def, :defp] end)
|> Enum.map(fn(fn_ast) -> apply_stub_call_throughs(fn_ast, real_module) end)
end
end
|
lib/elixir_mock.ex
| 0.61855 | 0.847527 |
elixir_mock.ex
|
starcoder
|
defmodule NatsEx.Connection do
@moduledoc """
A GenServer implementing a connection to Natsd server.
You can set options for Nats in your config.exs. For example
```elixir
config :nats_ex, host: "localhost", port: 4222
```
Supported options are:
- `username`: Username, if auth is required
- `password`: Password, if auth is required
- `host`: Host for Nats server. Defaults to `localhost`
- `port`: Port for Nats server. Defaults to `4222`
For example,
iex> {:ok, conn} = NatsEx.Connection.connection
{:ok, #PID<0.153.0>}
iex> NatsEx.Connection.sub("foo")
:ok
iex> NatsEx.Connection.pub("foo", "hey")
:ok
iex> flush()
{:nats_ex, :msg, "foo", nil, "hey"} # See `NatsEx.Connection.sub/2` for more details about message format
"""
require Logger
import NatsEx.Protocol
use GenServer, restart: :transient
alias NatsEx.{ProcessGroup, SidCounter}
@doc false
def start_link(init_arg) do
GenServer.start_link(__MODULE__, init_arg, [])
end
@doc """
Opens a connection
"""
@spec connection() :: DynamicSupervisor.on_start_child()
def connection() do
DynamicSupervisor.start_child(NatsEx.ConnectionSup, __MODULE__)
end
@doc """
For publishing.
`reply_to` is optional. Returns `:ok`
"""
@spec pub(pid, String.t(), String.t(), String.t() | nil) :: :ok
def pub(conn, subject, payload, reply_to \\ nil) do
GenServer.call(conn, {:pub, subject, reply_to, payload})
end
@doc """
For subscribing to any subject
`queue_group` is optional
When a new message arrives, the subscribed process gets a
message. The format of the message is `{:nats_ex, subject, reply_to_subject, payload}`
"""
@spec sub(pid, String.t(), integer) :: :ok
def sub(conn, subject, queue_group \\ nil) do
sid = SidCounter.inc()
:ok = GenServer.call(conn, {:sub, self(), subject, sid, queue_group})
Registry.register(:sids, {self(), conn, subject}, sid)
:ok
end
defp reg_unpub_gproc(sid, num_of_msgs) do
:ets.insert(:unsub_ets, {{:unsub, sid}, num_of_msgs})
end
@doc """
For unsubscribing from a certain subject.
`num_of_msgs` is the max number of messages received, after which it automatically unsubscribes.
It is optional
"""
@spec unsub(pid, String.t(), integer | nil) :: :ok
def unsub(conn, subject, num_of_msgs \\ nil) do
[{_, sid}] = Registry.lookup(:sids, {self(), conn, subject})
if num_of_msgs == nil do
ProcessGroup.leave(sid, self())
reg_unpub_gproc(sid, 0)
else
# Storing number of messages, after which it has to unsubscribe
reg_unpub_gproc(sid, num_of_msgs)
end
GenServer.cast(conn, {:unsub, self(), sid, num_of_msgs})
end
# Server callbacks
@doc false
@impl GenServer
def init(_init_arg) do
{host, port} = get_host_port()
{:ok, {:hostent, _, _, _, _, [ip_addr | _]}} =
:inet.gethostbyname(host |> String.to_charlist())
{:ok, socket} = :gen_tcp.connect(ip_addr, port, active: false, mode: :binary, packet: :line)
{:ok, info_mesg} = :gen_tcp.recv(socket, 0)
# Decode info
info =
info_mesg
|> parse_info_mesg
|> String.trim_trailing()
|> Jason.decode!()
# Build connect message
connect_mesg =
info
|> require_auth?
|> build_connect_message
:gen_tcp.send(socket, connect_mesg)
:inet.setopts(socket, active: :once)
{:ok, %{socket: socket, info: info}}
end
@spec require_auth?(map) :: boolean
def require_auth?(info) do
info
|> Map.get("auth_required", false)
end
@spec get_host_port() :: {String.t(), integer}
defp get_host_port do
host = Application.get_env(:nats_ex, :host) || "localhost"
port = Application.get_env(:nats_ex, :port) || 4222
{host, port}
end
@spec get_auth_credentials() :: {String.t() | nil, String.t() | nil}
defp get_auth_credentials() do
username = Application.get_env(:nats_ex, :username)
password = Application.get_env(:nats_ex, :password)
{username, password}
end
@spec build_connect_message(boolean) :: String.t()
defp build_connect_message(true) do
get_auth_credentials()
|> case do
{username, password} when username != nil and password != <PASSWORD> ->
msg =
%{
verbose: false,
pedantic: false,
ssl_required: false,
lang: "elixir",
version: "0.1.0",
user: username,
pass: password
}
|> Jason.encode!()
"CONNECT #{msg}\r\n"
_ ->
raise("Authentication is required. You have to set username and password")
end
end
defp build_connect_message(false) do
~s(CONNECT {"verbose": false, "pedantic": false, "ssl_required": false, "lang": "elixir"}\r\n)
end
@doc false
# Handler for publish call
@impl GenServer
def handle_call({:pub, subject, reply_to, payload}, _from, %{socket: socket} = state) do
# Makes a publish string
pub_message = make_pub_message(subject, reply_to, payload)
:gen_tcp.send(socket, pub_message)
{:reply, :ok, state}
end
@doc false
def handle_call({:sub, from, subject, sid, queue_group}, _from, %{socket: socket} = state) do
sub_message = make_sub_message(subject, sid, queue_group)
# Creating since pg2 doesn't automatically create the process group
ProcessGroup.create(sid)
ProcessGroup.create({:conn, self()})
# Join a process group named with `sid`
ProcessGroup.join(sid, from)
# For maintaining subscribed processes for this connections
ProcessGroup.join({:conn, self()}, from)
:gen_tcp.send(socket, sub_message)
{:reply, :ok, state}
end
@doc false
@impl GenServer
def handle_cast({:unsub, _from, sid, num_of_msgs}, %{socket: socket} = state) do
unsub_mesg = make_unsub_message(sid, num_of_msgs)
:gen_tcp.send(socket, unsub_mesg)
{:noreply, state}
end
@doc false
# Handle tcp messages
@impl GenServer
def handle_info({:tcp, _, "MSG " <> msg}, %{socket: socket} = state) do
{subject, rep_to, sid, bytes} = parse_message(msg)
:inet.setopts(socket, packet: :raw)
# Adding 2 for "/r/n"
{:ok, payload} = :gen_tcp.recv(socket, String.to_integer(bytes) + 2)
payload = parse_payload(payload)
:unsub_ets
|> :ets.lookup({:unsub, String.to_integer(sid)})
|> send_subscriber_message(sid, subject, rep_to, payload)
:inet.setopts(socket, packet: :line)
:inet.setopts(socket, active: :once)
{:noreply, state}
end
@doc false
def handle_info({:tcp, _, "-ERR " <> error}, %{socket: socket} = state) do
Logger.warn("Received Error from Nats Server: #{error}")
:inet.setopts(socket, active: :once)
{:noreply, state}
end
@doc false
def handle_info({:tcp, socket, "PING\r\n"}, state) do
:gen_tcp.send(socket, "PONG\r\n")
:inet.setopts(socket, active: :once)
{:noreply, state}
end
@doc false
def handle_info({:tcp_closed, _}, state) do
Logger.warn("Nats Connection closed by the server")
ProcessGroup.create({:conn, self()})
{:conn, self()}
|> ProcessGroup.get_local_members()
|> Enum.each(fn member ->
send(member, {:nats_ex, :conn_down})
end)
{:stop, :normal, state}
end
@doc """
Sends messages to subscribers.
Checks if the process is supposed to unsubscribe after the message is received.
"""
def send_subcriber_message([{_, 1}], _sid, _subject, _rep_to, _payload) do
:ok
end
def send_subscriber_message([{_, _num_of_msgs}], sid, subject, rep_to, payload) do
sid_int = String.to_integer(sid)
# Decreasing the number of messages until the process has to unsub
:ets.update_counter(:unsub_ets, {:unsub, sid_int}, -1)
sid
|> String.to_integer()
|> ProcessGroup.get_local_members()
|> Enum.each(fn member ->
send(member, {:nats_ex, :msg, subject, rep_to, payload})
end)
end
# When this function is called, it means that the subscriber didn't
# send a unsub request.
def send_subscriber_message([], sid, subject, rep_to, payload) do
sid
|> String.to_integer()
|> ProcessGroup.get_local_members()
|> Enum.each(fn member ->
send(member, {:nats_ex, :msg, subject, rep_to, payload})
end)
end
end
|
lib/nats_ex/connection.ex
| 0.869742 | 0.592401 |
connection.ex
|
starcoder
|
defmodule IdleAnimations.Ant do
use GenServer, restart: :temporary
@moduledoc "A langton's ant idle animation"
@fps 20
@max_steps 10_000
@min_changed_between_interesting_inspection 5
@steps_between_interesting_inspection 16
defmodule State do
use TypedStruct
typedstruct enforce: true do
field(:id, String.t())
field(:ant_direction, :up | :down | :left | :right, default: :up)
field(:ant_position, {non_neg_integer(), non_neg_integer()}, default: Screen.centre_pos())
field(:state_matrix, Matrix.t(Pixel.t()))
field(:last_state_matrix, Matrix.t(Pixel.t()))
field(:ruleset, Ant.RuleSet.t())
field(:fading_out, boolean(), default: false)
field(:fader, Fader.t(), default: Fader.new(20))
field(:steps, non_neg_integer(), default: 0)
end
end
defmodule RuleSet do
use TypedStruct
typedstruct enforce: true do
field(:rule_map, %{required(Pixel.t()) => (Ant.State.t() -> Ant.State.t())})
field(:default_state, Pixel.t())
end
end
def start_link(options) do
{screen_x, screen_y} = Screen.dims()
ruleset = get_ruleset()
state_matrix = Matrix.of_dims(screen_x, screen_y, ruleset.default_state)
state = %State{
id: Keyword.get(options, :game_id),
state_matrix: state_matrix,
last_state_matrix: state_matrix,
ruleset: ruleset
}
GenServer.start_link(__MODULE__, state, options)
end
@impl true
def init(state) do
tick_request()
{:ok, state}
end
@impl true
def handle_info(:tick, state) do
render(state)
update_fn = state.ruleset.rule_map[Matrix.at(state.state_matrix, state.ant_position)]
state =
%State{state | steps: state.steps + 1, fader: Fader.step(state.fader)}
|> update_fn.()
|> is_interesting()
if state.steps < @max_steps and not (state.fading_out and Fader.done(state.fader)) do
tick_request()
{:noreply, state}
else
{:stop, :normal, state}
end
end
@impl true
def handle_cast(:terminate, state) do
{:noreply, start_fading_out(state)}
end
@impl true
def terminate(_reason, state) do
Coordinator.notify_idle_animation_terminated(state.id)
end
defp start_fading_out(%State{} = state) do
%State{state | fading_out: true, fader: %Fader{state.fader | direction: :dec}}
end
defp get_ruleset do
pattern = Enum.random([:random, :original, :random, :random, :random, :random])
case pattern do
:original ->
%RuleSet{
default_state: Pixel.empty(),
rule_map: %{
Pixel.empty() => fn s ->
s |> rotate(:left) |> set_colour(Pixel.white()) |> step(1)
end,
Pixel.white() => fn s ->
s |> rotate(:right) |> set_colour(Pixel.empty()) |> step(1)
end
}
}
# :square_thing_0 ->
# %RuleSet{
# default_state: Pixel.empty(),
# rule_map: %{
# Pixel.empty() => fn s ->
# s |> rotate(:right) |> rotate(:right) |> set_colour(Pixel.white()) |> step(3)
# end,
# Pixel.white() => fn s -> s |> rotate(:right) |> set_colour(Pixel.red()) |> step(2) end,
# Pixel.red() => fn s ->
# s |> rotate(:right) |> rotate(:right) |> set_colour(Pixel.blue()) |> step(3)
# end,
# Pixel.blue() => fn s -> s |> rotate(:left) |> set_colour(Pixel.empty()) |> step(1) end
# }
# }
:random ->
random_ruleset()
end
end
defp is_interesting(%State{} = state) do
if rem(state.steps, @steps_between_interesting_inspection) == 0 do
number_changed = Matrix.diff(state.last_state_matrix, state.state_matrix) |> length()
state = %State{state | last_state_matrix: state.state_matrix}
if number_changed < @min_changed_between_interesting_inspection,
do: start_fading_out(state),
else: state
else
state
end
end
@possible_random_colours [
Pixel.white(),
Pixel.red(),
Pixel.blue(),
Pixel.green(),
Pixel.magenta(),
Pixel.cyan()
]
@possible_colour_presets [
Enum.map(
[
"#55CDFC",
"#FFFFFF",
"#F7A8B8"
],
&Pixel.from_hex/1
),
Enum.map(
[
"#D60270",
"#9B4F96",
"#0038A8"
],
&Pixel.from_hex/1
),
Enum.map(
[
"#FF1C8D",
"#FFD700",
"#1AB3FF"
],
&Pixel.from_hex/1
),
Enum.map(
[
"#FCF431",
"#FCFCFC",
"#9D59D2",
"#282828"
],
&Pixel.from_hex/1
),
Enum.map(
[
"#ff0018",
"#ffa52c",
"#ffff41",
"#008018",
"#86007d"
],
&Pixel.from_hex/1
),
Enum.map(
[
"#D62900",
"#FF9B55",
"#FFFFFF",
"#D461A6",
"#A50062"
],
&Pixel.from_hex/1
),
Enum.map(
[
"#22639B",
"#06CDE0",
"#0DB3CF",
"#1498BE",
"#1B7EAC"
],
&Pixel.from_hex/1
)
]
defp generate_rotate() do
n = Enum.random(0..3)
ExclusiveRange.erange(0..n)
|> Enum.map(fn _ -> fn s -> rotate(s, :right) end end)
|> Enum.reduce(
fn s -> s end,
fn f, r ->
fn s -> s |> f.() |> r.() end
end
)
end
defp generate_rule(states) do
states
|> Enum.map(fn col ->
rotate_f = generate_rotate()
# col = Enum.random(states)
steps = Enum.random(0..2)
fn s -> s |> rotate_f.() |> set_colour(col) |> step(steps) end
end)
|> Enum.reduce(fn f, r -> fn s -> s |> f.() |> r.() end end)
end
defp random_ruleset do
preset_or_random = Enum.random([:preset, :preset, :random])
# preset_or_random = :preset
states =
case preset_or_random do
:preset ->
[Pixel.empty() | Enum.random(@possible_colour_presets)]
:random ->
num_rules = Enum.random(2..length(@possible_random_colours))
[Pixel.empty() | Enum.take_random(@possible_random_colours, num_rules)]
end
rules =
states
|> Enum.map(&{&1, generate_rule(states)})
|> Enum.into(%{})
%RuleSet{
default_state: Pixel.empty(),
rule_map: rules
}
end
defp rotate(state, dir) do
dir =
case {dir, state.ant_direction} do
{:left, :up} -> :left
{:left, :right} -> :up
{:left, :down} -> :right
{:left, :left} -> :down
{:right, :up} -> :right
{:right, :right} -> :down
{:right, :down} -> :left
{:right, :left} -> :up
end
%State{state | ant_direction: dir}
end
defp set_colour(state, colour) do
%State{state | state_matrix: Matrix.draw_at(state.state_matrix, state.ant_position, colour)}
end
defp step(state, amount) do
{screen_x, screen_y} = Screen.dims()
{dx, dy} =
case state.ant_direction do
:up -> {0, -1 * amount}
:down -> {0, 1 * amount}
:left -> {-1 * amount, 0}
:right -> {1 * amount, 0}
end
{x, y} = state.ant_position
new_pos = {Integer.mod(x + dx, screen_x), Integer.mod(y + dy, screen_y)}
%State{state | ant_position: new_pos}
end
defp tick_request do
Process.send_after(self(), :tick, Integer.floor_div(1000, @fps))
end
defp render(state) do
frame_vals =
Matrix.reduce(state.state_matrix, [], fn x, y, s, acc ->
[{x, y, {s.r, s.g, s.b}} | acc]
end)
frame =
Screen.blank()
|> NativeMatrix.set_from_list(frame_vals)
|> NativeMatrix.mul(Fader.percentage(state.fader))
Screen.update_frame(frame)
end
end
|
web/lib/infolab_light_games/idle_animations/langtons_ant.ex
| 0.777596 | 0.459015 |
langtons_ant.ex
|
starcoder
|
defmodule Scidata.IMDBReviews do
@moduledoc """
Module for downloading the [Large Movie Review Dataset](https://ai.stanford.edu/~amaas/data/sentiment/).
"""
@base_url "http://ai.stanford.edu/~amaas/data/sentiment/"
@dataset_file "aclImdb_v1.tar.gz"
alias Scidata.Utils
@type train_sentiment :: :pos | :neg | :unsup
@type test_sentiment :: :pos | :neg
@doc """
Downloads the IMDB reviews training dataset or fetches it locally.
`example_types` specifies which examples in the dataset should be returned
according to each example's label: `:pos` for positive examples, `:neg` for
negative examples, and `:unsup` for unlabeled examples. If no `example_types`
are provided, `:pos` and `:neg` examples are fetched.
## Options.
* `:base_url` - Dataset base URL.
Defaults to `"http://ai.stanford.edu/~amaas/data/sentiment/"`
* `:dataset_file` - Dataset filename.
Defaults to `"aclImdb_v1.tar.gz"`
* `:cache_dir` - Cache directory.
Defaults to `System.tmp_dir!()`
"""
@spec download(example_types: [train_sentiment]) :: %{
review: [binary(), ...],
sentiment: [1 | 0 | nil]
}
def download(opts \\ []), do: download_dataset(:train, opts)
@doc """
Downloads the IMDB reviews test dataset or fetches it locally.
`example_types` is the same as in `download/1`, but `:unsup` is
unavailable because all unlabeled examples are in the training set.
Accepts the same options as `download/1`.
"""
@spec download_test(example_types: [test_sentiment]) :: %{
review: [binary(), ...],
sentiment: [1 | 0]
}
def download_test(opts \\ []), do: download_dataset(:test, opts)
defp download_dataset(dataset_type, opts) do
example_types = opts[:example_types] || [:pos, :neg]
base_url = opts[:base_url] || @base_url
dataset_file = opts[:dataset_file] || @dataset_file
files = Utils.get!(base_url <> dataset_file, opts).body
regex = ~r"#{dataset_type}/(#{Enum.join(example_types, "|")})/"
{inputs, labels} =
for {fname, contents} <- files,
List.to_string(fname) =~ regex,
reduce: {[], []} do
{inputs, labels} ->
{[contents | inputs], [get_label(fname) | labels]}
end
%{review: inputs, sentiment: labels}
end
defp get_label(fname) do
fname = List.to_string(fname)
cond do
fname =~ "pos" -> 1
fname =~ "neg" -> 0
fname =~ "unsup" -> nil
end
end
end
|
lib/scidata/imdb_reviews.ex
| 0.870267 | 0.799286 |
imdb_reviews.ex
|
starcoder
|
defmodule VintageNetMobile.Modem.TelitLE910 do
@behaviour VintageNetMobile.Modem
@moduledoc """
Telit LE910 support
```elixir
VintageNet.configure(
"ppp0",
%{
type: VintageNetMobile,
vintage_net_mobile: %{
modem: VintageNetMobile.Modem.TelitLE910,
service_providers: [%{apn: "wireless.twilio.com"}]
}
}
)
```
If multiple service providers are configured, this implementation only
attempts to connect to the first one.
Example of supported properties
```elixir
iex> VintageNet.get_by_prefix ["interface", "ppp0"]
[
{["interface", "ppp0", "addresses"],
[
%{
address: {100, 79, 181, 147},
family: :inet,
netmask: {255, 255, 255, 255},
prefix_length: 32,
scope: :universe
}
]},
{["interface", "ppp0", "config"],
%{
type: VintageNetMobile,
vintage_net_mobile: %{
modem: VintageNetMobile.Modem.TelitLE910,
service_providers: [%{apn: "super"}, %{apn: "wireless.twilio.com"}]
}
}},
{["interface", "ppp0", "connection"], :internet},
{["interface", "ppp0", "hw_path"], "/devices/virtual"},
{["interface", "ppp0", "lower_up"], true},
{["interface", "ppp0", "mobile", "cid"], 123098825},
{["interface", "ppp0", "mobile", "lac"], 32773},
{["interface", "ppp0", "mobile", "signal_4bars"], 3},
{["interface", "ppp0", "mobile", "signal_asu"], 19},
{["interface", "ppp0", "mobile", "signal_dbm"], -75},
{["interface", "ppp0", "present"], true},
{["interface", "ppp0", "state"], :configured},
{["interface", "ppp0", "type"], VintageNetMobile}
]
```
"""
alias VintageNet.Interface.RawConfig
alias VintageNetMobile.{Chatscript, ExChat, PPPDConfig, SignalMonitor}
@impl VintageNetMobile.Modem
def normalize(config) do
config
end
@impl VintageNetMobile.Modem
def add_raw_config(raw_config, %{vintage_net_mobile: mobile} = _config, opts) do
ifname = raw_config.ifname
files = [{Chatscript.path(ifname, opts), Chatscript.default(mobile)}]
at_tty = Map.get(mobile, :at_tty, "ttyUSB2")
ppp_tty = Map.get(mobile, :ppp_tty, "ttyUSB3")
child_specs = [
{ExChat, [tty: at_tty, speed: 9600]},
{SignalMonitor, [ifname: ifname, tty: at_tty]}
]
%RawConfig{
raw_config
| files: files,
child_specs: child_specs
}
|> PPPDConfig.add_child_spec(ppp_tty, 9600, opts)
end
end
|
lib/vintage_net_mobile/modem/telit_LE910.ex
| 0.787278 | 0.57821 |
telit_LE910.ex
|
starcoder
|
defmodule ResultEx do
@moduledoc """
ResultEx is a module for handling functions returning a `t:ResultEx.t/0`.
This module is inspired by the f# Result module, and [Railway Oriented Programming](https://fsharpforfunandprofit.com/rop/) as explained by <NAME>.
A result can be either the tuple {:ok, term} where term will be the expected return value of a function,
or the tuple {:error, term} where term will be an explanation of what went wrong while executing a function.
Using this module, it will be possible to combine functions that return a `t:ResultEx.t/0`, and functions that take the value contained by the ok variant.
In the case one of the functions returns an error variant, subsequent functions expecting an ok result can be prevented from being executed.
Also, functions can be connected that will only execute in the case of an error.
## Examples
iex> defmodule ResultExExample do
...>
...> def divide(0, _), do: {:error, :zero_division_exception}
...> def divide(0.0, _), do: {:error, :zero_division_exception}
...> def divide(x, y), do: ResultEx.return(x / y)
...> def subtract(x, y), do: ResultEx.return(x - y)
...>
...> end
...>
...> ResultExExample.divide(4, 2)
...> |> ResultEx.bind(fn x -> ResultExExample.subtract(x, 2) end)
{:ok, 0.0}
iex> ResultExExample.divide(4, 2)
...> |> ResultEx.bind(fn x -> ResultExExample.subtract(x, 2) end)
...> |> ResultEx.bind(fn x -> ResultExExample.divide(x, 2) end)
...> |> ResultEx.bind(fn x -> ResultExExample.subtract(x, 2) end)
{:error, :zero_division_exception}
iex> ResultExExample.divide(0, 2)
...> |> ResultEx.or_else(2)
2
iex> ResultExExample.divide(0, 2)
...> |> ResultEx.or_else_with(fn _err -> {:ok, 0} end)
{:ok, 0}
"""
@type t ::
{:ok, term}
| {:error, term}
@doc """
Elevates a value to a `t:ResultEx.t/0` type.
## Examples
iex> ResultEx.return(1)
{:ok, 1}
"""
@spec return(term) :: t
def return(value), do: {:ok, value}
@doc """
Runs a function against the `t:ResultEx.t/0`s value.
If the `t:ResultEx.t/0` is an error, the function will not be executed.
## Examples
iex> result = {:ok, 1}
...> ResultEx.map(result, &(&1 + 1))
{:ok, 2}
iex> result = {:error, "Oops"}
...> ResultEx.map(result, &(&1 + 1))
{:error, "Oops"}
"""
@spec map(t, (term -> term)) :: t
def map({:ok, value}, fun) do
{:ok, fun.(value)}
end
def map(result, _), do: result
@doc """
Partially applies `ResultEx.map/2` with the passed function.
"""
@spec map((term -> term)) :: (t -> t)
def map(fun) do
fn result -> map(result, fun) end
end
@doc """
Executes or partially executes the function given as value of the first `t:ResultEx.t/0`,
and applies it with the value of the second `t:ResultEx.t/0`.
If the function has an arity greater than 1, the returned `t:ResultEx.t/0` value will be the function partially applied.
(The function name is 'appl' rather than 'apply' to prevent import conflicts with 'Kernel.apply')
## Examples
iex> value_result = {:ok, 1}
...> function_result = {:ok, fn value -> value + 1 end}
...> ResultEx.appl(function_result, value_result)
{:ok, 2}
iex> {:ok, fn value1, value2, value3 -> value1 + value2 + value3 end}
...> |> ResultEx.appl({:ok, 1})
...> |> ResultEx.appl({:ok, 2})
...> |> ResultEx.appl({:ok, 3})
{:ok, 6}
iex> {:error, "no such function"}
...> |> ResultEx.appl({:ok, 1})
...> |> ResultEx.appl({:ok, 1})
...> |> ResultEx.appl({:ok, 1})
{:error, "no such function"}
iex> {:ok, fn value1, value2, value3 -> value1 + value2 + value3 end}
...> |> ResultEx.appl({:ok, 1})
...> |> ResultEx.appl({:ok, 1})
...> |> ResultEx.appl({:error, "no such value"})
{:error, "no such value"}
"""
@spec appl(t, t) :: t
def appl({:ok, fun}, {:ok, value}) do
case :erlang.fun_info(fun, :arity) do
{_, 0} ->
{:error, "ResultEx.appl: arity error"}
_ ->
{:ok, curry(fun, value)}
end
end
def appl({:error, _} = error, _), do: error
def appl(_, {:error, _} = error), do: error
@doc """
Applies a function with the value of the `t:ResultEx.t/0`.
The passed function is expected to return a `t:ResultEx.t/0`.
This can be useful for chaining functions together that elevate values into `t:ResultEx.t/0`s.
## Examples
iex> divide = fn
...> 0 -> {:error, "Zero division"}
...> n -> {:ok, n / 2}
...> end
...> divide.(4)
...> |> ResultEx.bind(divide)
{:ok, 1.0}
iex> divide = fn
...> 0 -> {:error, "Zero division"}
...> n -> {:ok, n / 2}
...> end
...> divide.(0)
...> |> ResultEx.bind(divide)
{:error, "Zero division"}
"""
@spec bind(t, (term -> t)) :: t
def bind({:ok, value}, fun) do
fun.(value)
end
def bind(result, _), do: result
@doc """
Partially applies `ResultEx.bind/2` with the passed function.
"""
@spec bind((term -> t)) :: (t -> t)
def bind(fun) do
fn result -> bind(result, fun) end
end
@doc """
Unwraps the `t:ResultEx.t/0` to return its value.
Throws an error if the `t:ResultEx.t/0` is an error.
## Examples
iex> ResultEx.return(5)
...> |> ResultEx.unwrap!()
5
"""
@spec unwrap!(t) :: term
def unwrap!({:ok, value}), do: value
def unwrap!({:error, error}), do: throw(error)
@doc """
Unwraps the `t:ResultEx.t/0` to return its value.
The second argument will be a specific error message to throw when the `t:ResultEx.t/0` is an Error.
## Examples
iex> ResultEx.return(5)
...> |> ResultEx.expect!("The value was not what was expected")
5
"""
@spec expect!(t, String.t()) :: term
def expect!({:ok, value}, _), do: value
def expect!(_, message), do: throw(message)
@doc """
Unwraps the `t:ResultEx.t/0` to return its value.
If the `t:ResultEx.t/0` is an error, it will return the default value passed as second argument instead.
## Examples
iex> ResultEx.return(5)
...> |> ResultEx.or_else(4)
5
iex> {:error, "Oops"}
...> |> ResultEx.or_else(4)
4
"""
@spec or_else(t, term) :: term
def or_else({:ok, value}, _), do: value
def or_else(_, default), do: default
@doc """
Unwraps the `t:ResultEx.t/0` to return its value.
If the `t:ResultEx.t/0` is an error, the given function will be applied with the unwrapped error instead.
## Examples
iex> ResultEx.return(5)
...> |> ResultEx.or_else_with(fn err -> IO.inspect(err) end)
5
iex> {:error, "Oops"}
...> |> ResultEx.or_else_with(fn err -> err <> "!" end)
"Oops!"
"""
@spec or_else_with(t, (term -> term)) :: term
def or_else_with({:ok, value}, _), do: value
def or_else_with({:error, error}, fun), do: fun.(error)
@doc """
Flatten nested `t:ResultEx.t/0`s into a single `t:ResultEx.t/0`.
## Examples
iex> ResultEx.return(5)
...> |> ResultEx.return()
...> |> ResultEx.return()
...> |> ResultEx.flatten()
{:ok, 5}
iex> {:ok, {:ok, {:error, "Oops"}}}
...> |> ResultEx.flatten()
{:error, "Oops"}
"""
@spec flatten(t) :: t
def flatten({:ok, {:ok, _} = inner_result}) do
flatten(inner_result)
end
def flatten({:ok, {:error, _} = error}), do: error
def flatten({:ok, _} = result), do: result
def flatten({:error, _} = error), do: error
@doc """
Flattens an `t:Enum.t/0` of `t:ResultEx.t/0`s into a `t:ResultEx.t/0` of enumerables.
## Examples
iex> [{:ok, 1}, {:ok, 2}, {:ok, 3}]
...> |> ResultEx.flatten_enum()
{:ok, [1, 2, 3]}
iex> [{:ok, 1}, {:error, "Oops"}, {:ok, 3}]
...> |> ResultEx.flatten_enum()
{:error, "Oops"}
iex> %{a: {:ok, 1}, b: {:ok, 2}, c: {:ok, 3}}
...> |> ResultEx.flatten_enum()
{:ok, %{a: 1, b: 2, c: 3}}
iex> %{a: {:ok, 1}, b: {:error, "Oops"}, c: {:ok, 3}}
...> |> ResultEx.flatten_enum()
{:error, "Oops"}
"""
@spec flatten_enum(Enum.t()) :: t
def flatten_enum(%{} = enum) do
Enum.reduce(enum, {:ok, %{}}, fn
{key, {:ok, value}}, {:ok, result} ->
Map.put(result, key, value)
|> return
_, {:error, _} = error ->
error
{_, {:error, _} = error}, _ ->
error
end)
end
def flatten_enum(enum) when is_list(enum) do
Enum.reduce(enum, {:ok, []}, fn
{:ok, value}, {:ok, result} ->
{:ok, [value | result]}
_, {:error, _} = error ->
error
{:error, _} = error, _ ->
error
end)
|> map(&Enum.reverse/1)
end
def flatten_enum(_), do: {:error, "ResultEx.flatten_enum Unknown Type"}
@doc """
Converts the `t:ResultEx.t/0` to an Option.
An Option is a {:some, term} tuple pair, or the :none atom.
## Examples
iex> ResultEx.return(5)
...> |> ResultEx.to_option()
{:some, 5}
iex> {:error, "Oops"}
...> |> ResultEx.to_option()
:none
"""
@spec to_option(t) :: {:some, term} | :none
def to_option({:ok, value}) do
{:some, value}
end
def to_option({:error, _}), do: :none
@spec curry(fun, term) :: term
defp curry(fun, arg1), do: apply_curry(fun, [arg1])
@spec apply_curry(fun, [term]) :: term
defp apply_curry(fun, args) do
{_, arity} = :erlang.fun_info(fun, :arity)
if arity == length(args) do
apply(fun, Enum.reverse(args))
else
fn arg -> apply_curry(fun, [arg | args]) end
end
end
end
|
lib/result_ex.ex
| 0.909987 | 0.755141 |
result_ex.ex
|
starcoder
|
defmodule LaFamiglia.Mechanics.Buildings do
@moduledoc """
All build times are specified in microseconds.
"""
alias LaFamiglia.Building
def buildings do
[
%Building{
id: 1,
key: :building_1,
build_time: fn level ->
((200 + 500 * level + :math.pow(level, 1.8)) |> round) * 1_000_000
end,
costs: fn level ->
%{
resource_1: level * 1 + 1,
resource_2: level * 1 + 1,
resource_3: level * 1 + 1
}
end,
maxlevel: 10,
defense: fn level -> 10 end,
points: fn level -> :math.pow(level, 1.5) end
},
%Building{
id: 2,
key: :building_2,
build_time: fn level ->
((200 + 1000 * level + :math.pow(level, 2.6)) |> round) * 1_000_000
end,
costs: fn level ->
%{
resource_1: level * 1 + 1,
resource_2: level * 1 + 1,
resource_3: level * 1 + 1
}
end,
maxlevel: 16,
defense: fn level -> 0 end,
points: fn level -> :math.pow(level, 1.5) end
},
%Building{
id: 3,
key: :building_3,
build_time: fn level -> ((100 + :math.pow(level, 1.8)) |> round) * 1_000_000 end,
costs: fn level ->
%{
resource_1: (20 + :math.pow(level, 2)) |> round,
resource_2: (20 + :math.pow(level, 2)) |> round,
resource_3: (20 + :math.pow(level, 2)) |> round
}
end,
maxlevel: 24,
defense: fn level -> 0 end,
points: fn level -> :math.pow(level, 2) end
},
%Building{
id: 4,
key: :building_4,
build_time: fn level -> ((100 + :math.pow(level, 1.8)) |> round) * 1_000_000 end,
costs: fn level ->
%{
resource_1: (20 + :math.pow(level, 2)) |> round,
resource_2: (20 + :math.pow(level, 2)) |> round,
resource_3: (20 + :math.pow(level, 2)) |> round
}
end,
maxlevel: 24,
defense: fn level -> 0 end,
points: fn level -> :math.pow(level, 2) end
},
%Building{
id: 5,
key: :building_5,
build_time: fn level -> ((100 + :math.pow(level, 1.8)) |> round) * 1_000_000 end,
costs: fn level ->
%{
resource_1: (20 + :math.pow(level, 2)) |> round,
resource_2: (20 + :math.pow(level, 2)) |> round,
resource_3: (20 + :math.pow(level, 2)) |> round
}
end,
maxlevel: 24,
defense: fn level -> 0 end,
points: fn level -> :math.pow(level, 2) end
},
%Building{
id: 6,
key: :building_6,
build_time: fn level ->
((200 + 400 * level + :math.pow(level, 2.2)) |> round) * 1_000_000
end,
costs: fn level ->
%{
resource_1: (20 + :math.pow(level, 2.5)) |> round,
resource_2: (20 + :math.pow(level, 2.5)) |> round,
resource_3: (20 + :math.pow(level, 2.5)) |> round
}
end,
maxlevel: 16,
defense: fn level -> 0 end,
points: fn level -> :math.pow(level, 2.5) |> round end
}
]
end
end
|
lib/mechanics/buildings.ex
| 0.61231 | 0.513729 |
buildings.ex
|
starcoder
|
defmodule GenRegex.Generator do
@moduledoc """
Regex generator module and struct. This will be the intermediate step between interpreting parsed ASTs and generating strings.
For expressions that match an indefinite maximum number of repetitions (e.g a+, a*), the default maximum of 100 will be used
In the future this will be configurable
"""
defstruct type: nil,
min: 1,
max: 1,
value: nil
alias __MODULE__
@max_reps 100
def generate(expr), do: generate(expr, nil)
def generate(list, parent) when is_list(list) do
list
|> Enum.map(&(generate(&1, parent)))
|> Enum.join("")
end
# ==================
# REPETITION CLAUSES
# ==================
def generate(%Generator{min: nil} = gen, _), do: generate(%Generator{gen | min: 0}, nil)
def generate(%Generator{max: nil, min: min} = gen, _) do
case @max_reps > min do
true ->
generate(%Generator{gen | max: @max_reps}, nil)
false ->
generate(%Generator{gen | max: min+1}, nil)
end
end
def generate(%Generator{min: min, max: max, type: type} = gen, _)
when min != 1 and max != 1
do
reps = Enum.random(min..max)
gen_reps("", gen, type, reps)
end
# ==================
# REGULAR CLAUSES
# ==================
def generate(%Generator{type: :word, value: value}, _parent) do
value
|> List.wrap()
|> Enum.map(&generate(&1, :word))
|> Enum.join("")
end
def generate(%Generator{type: :option, value: value}, _parent) do
value
|> Enum.random()
|> generate()
end
def generate(%Generator{type: :set, value: value}, _parent) do
value
|> Enum.random()
|> generate(:set)
end
def generate(%Generator{type: :negset, value: value}, _parent) do
excluded_chars =
value
|> Enum.map(&(generate(&1, :negset)))
|> List.flatten()
|> MapSet.new()
GenRegex.RandomString.all()
|> String.codepoints()
|> MapSet.new()
|> MapSet.difference(excluded_chars)
|> Enum.random()
end
def generate(%Generator{type: :range, value: value} = gen, :negset) do
value
|> do_generate_range()
end
def generate(%Generator{type: :range, value: value}, :set) do
value
|> do_generate_range()
|> Enum.random()
end
def generate(%Generator{type: :range, value: value}, _parent) do
value
|> do_generate_range()
|> String.Chars.List.to_string
end
defp do_generate_range(value) do
value
|> Enum.to_list()
|> Enum.map(&(String.Chars.List.to_string([&1])))
|> List.flatten()
end
def generate(:wildcard, :set), do: "."
def generate(:wildcard, :negset), do: "."
def generate(%Generator{type: :wildcard}, :set), do: "."
def generate(%Generator{type: :wildcard}, :negset), do: "."
def generate(%Generator{type: :wildcard}, _parent) do
GenRegex.RandomString.generate(1)
end
# ==================
# CATCH-ALL CLAUSE
# ==================
def generate(str, _parent), do: to_string(str)
# ==================
# PRIVATE FUNCTIONS
# ==================
defp gen_reps(acc, _generator, _parent, count)
when count <= 0, do: acc
defp gen_reps(acc, generator, parent, count),
do: gen_reps(
acc <> generate(%Generator{generator | min: 1, max: 1}, parent),
generator,
parent,
count - 1
)
end
|
lib/grammar/generator.ex
| 0.601711 | 0.492493 |
generator.ex
|
starcoder
|
defmodule Commanded.Command do
@moduledoc ~S"""
Creates an `Ecto.Schema.embedded_schema` that supplies a command with all the validation power of the `Ecto.Changeset` data structure.
defmodule CreateAccount do
use Commanded.Command,
username: :string,
email: :string,
age: :integer,
aliases: {{:array, :string}}
def handle_validate(changeset) do
changeset
|> Changeset.validate_required([:username, :email, :age])
|> Changeset.validate_format(:email, ~r/@/)
|> Changeset.validate_number(:age, greater_than: 12)
end
end
iex> CreateAccount.new(username: "chris", email: "<EMAIL>", age: 5, aliases: ["christopher", "kris"])
%CreateAccount{username: "chris", email: "<EMAIL>", age: 5, aliases: ["christopher", "kris"]}
iex> CreateAccount.validate(%{username: "chris", email: "<EMAIL>", age: 5, aliases: ["christopher", "kris"]})
#Ecto.Changeset<action: nil, changes: %{age: 5, aliases: ["christopher", "kris"], email: "<EMAIL>", username: "chris"}, errors: [age: {"must be greater than %{number}", [validation: :number, kind: :greater_than, number: 12]}], data: #CreateAccount<>, valid?: false>
iex> CreateAccount.validate(%{email: "emailson", age: 5})
#Ecto.Changeset<action: nil, changes: %{age: 5, email: "emailson"}, errors: [age: {"must be greater than %{number}", [validation: :number, kind: :greater_than, number: 12]}, email: {"has invalid format", [validation: :format]}, username: {"can't be blank", [validation: :required]}], data: #CreateAccount<>, valid?: false>
"""
@doc """
Optional callback to define validation rules
"""
@callback handle_validate(Ecto.Changeset.t()) :: Ecto.Changeset.t()
defmacro __using__(schema) do
quote do
use Ecto.Schema
import Ecto.Schema, only: [embedded_schema: 1, field: 2, field: 3]
import Commanded.Command
alias Ecto.Changeset
@behaviour Commanded.Command
@primary_key false
embedded_schema do
Enum.map(unquote(schema), fn
{name, {{_, _} = composite_type, opts}} -> field(name, field_type(composite_type), opts)
{name, {{_, _} = composite_type}} -> field(name, field_type(composite_type))
{name, {type, opts}} -> field(name, field_type(type), opts)
{name, type} -> field(name, field_type(type))
end)
end
def new(), do: %__MODULE__{}
def new(source)
def new(%{__struct__: _} = source) do
source
|> Map.from_struct()
|> new()
end
def new(source) when is_list(source) do
source
|> Enum.into(%{})
|> new()
end
def new(source) when is_map(source) do
source |> create()
end
use ExConstructor, :create
def validate(command) when is_map(command) do
command
|> cast()
|> handle_validate()
end
def handle_validate(%Ecto.Changeset{} = changeset), do: changeset
defoverridable handle_validate: 1
@cast_keys unquote(schema) |> Enum.into(%{}) |> Map.keys()
defp cast(attrs) do
Ecto.Changeset.cast(%__MODULE__{}, attrs, @cast_keys)
end
end
end
def field_type(:binary_id), do: Ecto.UUID
def field_type(:array) do
raise "`:array` is not a valid Ecto.Type\nIf you are using a composite data type, wrap the type definition like this `{{:array, :string}}`"
end
def field_type(type), do: type
end
|
lib/commanded/command.ex
| 0.803559 | 0.460289 |
command.ex
|
starcoder
|
defmodule BoatServer.Prometheus do
use Prometheus.Metric
require Logger
def setup do
Gauge.new(
name: :wind_speed_apparent,
labels: [:boat, :source],
help: "Apparent wind speed"
)
Gauge.new(
name: :wind_angle_apparent,
labels: [:boat, :source],
help: "Apparent wind angle"
)
Gauge.new(
name: :wind_angle_apparent_deg,
labels: [:boat, :source],
help: "Apparent wind angle in degrees"
)
Gauge.new(
name: :position_latitude,
labels: [:boat, :source],
help: "Latitude of current position"
)
Gauge.new(
name: :position_longitude,
labels: [:boat, :source],
help: "Longitude of current position"
)
Gauge.new(
name: :position_altitude,
labels: [:boat, :source],
help: "Altitude of current position"
)
Gauge.new(
name: :course_over_ground_true,
labels: [:boat, :source],
help: "Current true course over ground"
)
Gauge.new(
name: :speed_over_ground,
labels: [:boat, :source],
help: "Current speed over ground"
)
end
def instrument(%{"navigation" => nav, "environment" => env, "name" => name}) do
wind_speed = convert(:ms_to_kn, env["wind"]["speedApparent"]["value"])
wind_deg = convert(:rad_to_degree, env["wind"]["angleApparent"]["value"])
course = convert(:rad_to_degree, nav["courseOverGroundTrue"]["value"])
boat_speed = convert(:ms_to_kn, nav["speedOverGround"]["value"])
Gauge.set([name: :wind_speed_apparent, labels: [name, "signalk"]], wind_speed)
Gauge.set([name: :wind_angle_apparent_deg, labels: [name, "signalk"]], wind_deg)
Gauge.set([name: :wind_angle_apparent, labels: [name, "signalk"]], env["wind"]["angleApparent"]["value"])
Gauge.set([name: :position_longitude, labels: [name, "signalk"]], nav["position"]["longitude"])
Gauge.set([name: :position_latitude, labels: [name, "signalk"]], nav["position"]["latitude"])
Gauge.set([name: :position_altitude, labels: [name, "signalk"]], nav["position"]["altitude"])
Gauge.set([name: :course_over_ground_true, labels: [name, "signalk"]], course)
Gauge.set([name: :speed_over_ground, labels: [name, "signalk"]], boat_speed)
end
defp convert(:ms_to_kn, value) do
value * 1.9438444924406
end
# convert rad to degrees and make the heading 0 deg and port -180 to 0 and
# starbord 180 - 0
defp convert(:rad_to_degree, value) do
deg = value * (180/:math.pi())
case deg >= 180 do
true -> deg - 360
false -> deg
end
end
end
|
lib/boat_server/prometheus.ex
| 0.629319 | 0.409634 |
prometheus.ex
|
starcoder
|
defmodule GraphQL.QueryBuilder do
@moduledoc """
Functions to simplify the creation of GraphQL queries.
The easiest way to use these functions is to `import` this module directly,
this way you'll have all you need to build a query.
## Helper functions
- `query/4` - creates a new "query" operation
- `mutation/4` - creates a new "mutation" operation
- `field/3` - creates a new field (optionals: variables and subfields)
- `fragment/1` - creates a reference to a fragment
- `fragment/3`- creates a fragment
- `inline_fragment/2` - creates an inline fragment
## Writing queries and mutations
As an example, consider the following GraphQL request:
```
query UserQuery($id: Integer = 1) {
user (id: $id) {
id
email
...personFields
}
}
fragment personField on Person {
firstName
lastName
}
```
Using the functions in this module, you can create a representation of this
query in this way:
```
q = query("UserQuery", %{id: {"Integer", 1}}, [
field(:user, %{}, [
field(:id)
field(:email),
fragment("personFields")
])
], [
fragment("personFields", "Person", [
field("firstName"),
field("lastName")
])
])
```
"""
alias GraphQL.{Node, Query, Variable}
@doc """
Creates a new `GraphQL.Query` struct, for a `:query` operation.
"""
@spec query(String.t(), map(), list(), list()) :: Query.t()
def query(name, variables, fields, fragments \\ []) do
build(:query, name, variables, fields, fragments)
end
@doc """
Creates a new `GraphQL.Query` struct, for a `:mutation` operation
"""
@spec mutation(String.t(), map(), list(), list()) :: Query.t()
def mutation(name, variables, fields, fragments \\ []) do
build(:mutation, name, variables, fields, fragments)
end
defp build(operation, name, variables, fields, fragments) do
%Query{
operation: operation,
name: name,
fields: fields,
fragments: fragments,
variables: parse_variables(variables)
}
end
@doc """
Creates a field.
When rendered, it will have the following body:
1. A simple field, no arguments or sub fields
```
fieldName
```
2. A field with an alias
```
fieldAlias: fieldName
```
3. A field with arguments
```
fieldName(arg: value)
```
4. A field with sub fields
```
fieldName {
subField
}
```
5. A field an alias, arguments and sub fields
```
fieldAlias: fieldName (arg: value) {
subField
}
```
## Examples
iex> field(:some_field)
%GraphQL.Node{node_type: :field, name: :some_field}
iex> field({:some_field, "fieldAlias"})
%GraphQL.Node{node_type: :field, name: :some_field, alias: "fieldAlias"}
iex> field("anotherField", %{}, [field(:id)])
%GraphQL.Node{node_type: :field, name: "anotherField", nodes: [%GraphQL.Node{node_type: :field, name: :id}]}
"""
@spec field(Node.name() | Node.name_and_alias(), map(), Keyword.t(Node.t())) :: Node.t()
def field(name, args \\ nil, fields \\ nil, directives \\ nil) do
args = if(args == %{}, do: nil, else: args)
Node.field(name, args, fields, directives)
end
@doc """
Creates a `GraphQL.Variable` struct.
"""
@spec var(any(), any(), any()) :: Variable.t()
def var(name, type, value \\ nil) do
%Variable{name: name, type: type, default_value: value}
end
@spec enum(String.t()) :: {:enum, String.t()}
def enum(name) do
{:enum, name}
end
@doc """
Creates a reference to a fragment. Use it inside a field.
When rendered, it will generate the following body:
```
...fragmentName
```
## Examples
iex> fragment(:fields)
%GraphQL.Node{node_type: :fragment_ref, name: :fields}
"""
@spec fragment(String.t()) :: Node.t()
def fragment(name) do
Node.fragment(name)
end
@doc """
Creates a fragment. Use it on the query level.
When rendered, it will generate the following body:
```
... fragmentName on Type {
field1
field2
}
```
## Examples
iex> fragment("personFields", "Person", [field(:name)])
%GraphQL.Node{node_type: :fragment, name: "personFields", type: "Person", nodes: [%GraphQL.Node{node_type: :field, name: :name}]}
"""
@spec fragment(String.t(), String.t(), list()) :: Node.t()
def fragment(name, type, fields) do
Node.fragment(name, type, fields)
end
@doc """
Creates an inline fragment. Use it inside a field.
When rendered, it will generate the following body:
```
... on Type {
field1
field2
}
```
## Examples
iex> inline_fragment("Person", [field(:name)])
%GraphQL.Node{node_type: :inline_fragment, type: "Person", nodes: [%GraphQL.Node{node_type: :field, name: :name}]}
"""
@spec inline_fragment(String.t(), list()) :: Node.t()
def inline_fragment(type, fields) do
Node.inline_fragment(type, fields)
end
# Variables
defp parse_variables(vars) do
Enum.map(vars, &parse_variable/1)
end
defp parse_variable({name, {type, default}}) do
%Variable{name: name, type: type, default_value: default}
end
defp parse_variable({name, type}) do
%Variable{name: name, type: type, default_value: nil}
end
end
|
lib/graphql/query_builder.ex
| 0.905241 | 0.941223 |
query_builder.ex
|
starcoder
|
defmodule Lilictocat.Github do
@github_api Application.get_env(:lilictocat, :github_api, Lilictocat.Github.API)
@moduledoc """
this module consumes the github API, and makes the necessary transformations to use the data
"""
@doc """
returns a list of organiztions.
## Examples
iex> Lilictocat.Github.organizations()
["dominaria inc"]
"""
@spec organizations() :: list()
def organizations() do
Enum.map(@github_api.get_organizations(), fn org -> org.login end)
end
@doc """
returns a list of repositories.
## Examples
iex> Lilictocat.Github.organization_repos()
[
%{owner: %{login: "dominaria inc"}, name: "zoombie", archived: true},
%{owner: %{login: "dominaria inc"}, name: "goblin", archived: false}
]
"""
@spec organization_repos() :: Enumerable.t()
def organization_repos() do
organizations()
|> List.first()
|> @github_api.get_organization_repos()
|> Task.async_stream(fn repo ->
%{owner: repo.owner.login, name: repo.name, archived: repo.archived}
end)
end
@doc """
returns a list of repositories with options.
## Examples
iex> Lilictocat.Github.organization_repos(ignore_archived: true)
[
%{owner: %{login: "dominaria inc"}, name: "goblin", archived: false}
]
"""
@spec organization_repos(list()) :: Enumerable.t()
def organization_repos(options) do
case options do
[ignore_archived: true] ->
Stream.filter(organization_repos(), fn {:ok, repo} -> !repo.archived end)
[ignore_archived: false] ->
organization_repos()
[] ->
organization_repos()
_ ->
[]
end
end
@doc """
returns a list of pull requests with status open.
## Examples
iex> Lilictocat.Github.open_pull_requests_of_organization()
[
%{
created_at: "2020-07-23T17:41:20Z",
html_url: "https://link_pr.com/1",
number: 1,
base: %{repo: %{full_name: "dominaria_inc/zoombie"}}
},
%{
created_at: "2020-08-23T17:41:20Z",
html_url: "https://link_pr.com/2",
number: 2,
base: %{repo: %{full_name: "dominaria_inc/goblin"}}
}
]
iex> Lilictocat.Github.open_pull_requests_of_organization(ignore_archived: true)
[
%{
created_at: "2020-08-23T17:41:20Z",
html_url: "https://link_pr.com/2",
number: 2,
base: %{repo: %{full_name: "dominaria_inc/goblin"}}
}
]
"""
@spec open_pull_requests_of_organization(list()) :: Enumerable.t()
def open_pull_requests_of_organization(arguments \\ []) do
organization_repos(arguments)
|> Task.async_stream(fn {:ok, repo} -> @github_api.get_open_pulls(repo.owner, repo.name) end)
|> Stream.filter(fn {:ok, pr} -> !Enum.empty?(pr) end)
|> Stream.flat_map(fn {:ok, pr_list} -> pr_list end)
|> Stream.map(&convert_pr/1)
end
@doc """
returns a boolean to indicate if the pull request has any review.
## Examples
iex> Lilictocat.Github.pull_request_without_review(%{project: "dominaria_inc/goblin", number: 21})
true
"""
@spec pull_request_without_review?(map()) :: boolean()
def pull_request_without_review?(%{project: project, number: number}) do
Enum.empty?(@github_api.get_reviews_of_pr(project, number))
end
defp convert_pr(pr) do
%{
project: pr.base.repo.full_name,
number: pr.number,
link: pr.html_url,
created_at: parse_date(pr.created_at)
}
end
defp parse_date(string) do
{:ok, datetime, 0} = DateTime.from_iso8601(string)
datetime
end
end
|
lib/lilictocat/github.ex
| 0.536799 | 0.437763 |
github.ex
|
starcoder
|
defmodule Membrane.H264 do
@moduledoc """
This module provides format definition for H264 video stream
"""
@typedoc """
Width of single frame in pixels.
Allowed values may be restricted by used encoding parameters, for example, when using
4:2:0 chroma subsampling dimensions must be divisible by 2.
"""
@type width_t :: pos_integer()
@typedoc """
Height of single frame in pixels.
Allowed values may be restricted by used encoding parameters, for example, when using
4:2:0 chroma subsampling dimensions must be divisible by 2.
"""
@type height_t :: pos_integer()
@typedoc """
Number of frames per second. To avoid using floating point numbers,
it is described by 2 integers number of frames per timeframe in seconds.
For example, NTSC's framerate of ~29.97 fps is represented by `{30_000, 1001}`
"""
@type framerate_t :: {frames :: pos_integer, seconds :: pos_integer}
@typedoc """
Describes h264 stream format.
Byte-stream format (often reffered to as 'Annex B' because it is defined in Annex B
of [ITU-T H.264 Recommendation](http://www.itu.int/rec/T-REC-H.264-201704-I/en))
is suitable for writing to file or streaming with MPEG-TS.
In this format each NAL unit is preceded by three or four-byte start code (`0x(00)000001`)
that helps to identify boundaries.
avc1 and avc3 are described by ISO/IEC 14496-15. In such stream NALUs lack the start codes,
but are preceded with their length. Avc streams are more suitable for placing in containers
(e.g. they are used by QuickTime (.mov), MP4, Matroska and FLV). Avc1 and avc3 differ in how PPS and SPS
(Picture Parameter Set and Sequence Parameter Set) are transported.
"""
@type stream_format_t :: :avc1 | :avc3 | :byte_stream
@typedoc """
Describes whether and how buffers are aligned.
`:au` means each buffer contains one Access Unit - all the NAL units required to decode
a single frame of video
`:nal` aligned stream ensures that no NAL unit is split between buffers, but it is possible that
NALUs required for one frame are in different buffers
`:none` means the stream hasn't been parsed and is not aligned.
"""
@type alignment_t :: :au | :nal | :none
@typedoc """
When alignment is set to `:au`, determines whether buffers have NALu info attached in metadata.
If true, each buffer contains the NAL units list under `metadata.h264.nalus`. The list consists of
maps with the following entries:
- `prefixed_poslen: {pos, len}` - position and length of the NALu within the payload
- `unprefixed_poslen: {pos, len}` - as above, but omits Annex B prefix
- `metadata: metadata` - metadata that would be merged into the buffer metadata
if `alignment` was `:nal`.
"""
@type nalu_in_metadata_t :: boolean()
@typedoc """
Profiles defining constraints for encoders and requirements from decoders decoding such stream
"""
@type profile_t ::
:constrained_baseline
| :baseline
| :main
| :high
| :high_10
| :high_422
| :high_444
| :high_10_intra
| :high_422_intra
| :high_444_intra
@type t :: %__MODULE__{
width: width_t(),
height: height_t(),
framerate: framerate_t(),
stream_format: stream_format_t(),
alignment: alignment_t(),
nalu_in_metadata?: nalu_in_metadata_t(),
profile: profile_t()
}
@enforce_keys [:width, :height, :framerate, :stream_format, :profile]
defstruct @enforce_keys ++ [alignment: :au, nalu_in_metadata?: false]
end
|
lib/membrane_h264_format/h264.ex
| 0.926078 | 0.684073 |
h264.ex
|
starcoder
|
defmodule Gamenect.Lobby do
use Gamenect.Web, :model
schema "lobbies" do
field :title, :string
field :finished_at, Ecto.DateTime
field :status, :integer, default: 1
field :password, :string
field :max_players, :integer
belongs_to :game, Gamenect.Game
belongs_to :host, Gamenect.User
timestamps()
end
@doc """
Builds a changeset based on the `struct` and `params`.
"""
def changeset(struct, params \\ %{}) do
struct
|> cast(params, [:title, :finished_at, :status, :password, :max_players])
|> validate_required([:title])
|> validate_inclusion(:max_players, 1..255)
end
def create_changeset(struct, params \\ %{}) do
struct
|> cast(params, [:title, :password, :max_players, :game_id, :host_id])
|> assoc_constraint(:game)
|> assoc_constraint(:host)
|> validate_required([:title, :game_id])
|> validate_inclusion(:max_players, 1..255)
end
def ordered_by(query, nil, _order) do
query
end
@doc """
`ordered_by/3` handles the creation of the `Ecto.Query` part that is responsible for sorting.
It is a convience function that uses `field` and `order` to build an `Ecto.Query` for sorting by
`field` with order type `order(ASC|DESC)`.
This involves casting strings to atoms, as this function is expected to be used in a controller,
thus the received variables are strings from the phoenix/plug `conn` (`Plug.Conn`) `params`.
iex> ordered_by(:query, "title", "asc")
from p in query, order_by: [asc: ^field]
"""
def ordered_by(query, field, order) when is_bitstring field do
case order do
order when is_atom(order) ->
ordered_by query, String.to_atom(field), order
_ ->
ordered_by query, String.to_atom(field), String.to_atom(order)
end
end
def ordered_by(query, field, order) when is_atom field do
case order do
:none ->
ordered_by_desc query, field
:desc ->
ordered_by_desc query, field
:asc ->
ordered_by_asc query, field
_ ->
ordered_by_desc query, field
end
end
def ordered_by_desc(query, field) when is_atom field do
from p in query,
order_by: [desc: ^field]
end
def ordered_by_asc(query, field) when is_atom field do
from p in query,
order_by: [asc: ^field]
end
def open?(query) do
status query, :open
end
def status(query, status) do
case status do
:open ->
from p in query,
where: p.status == 1
:closed ->
from p in query,
where: p.status == 0
:finished ->
from p in query,
where: p.status == 2
_ ->
query
end
end
end
|
project/gamenect/web/models/lobby.ex
| 0.717705 | 0.556219 |
lobby.ex
|
starcoder
|
defmodule Dovetail.Deliver do
@moduledoc """
Use dovecot's LDA [deliver] to deliver mail.
"""
alias Dovetail.Config
require Logger
require EEx
@timeout 5000
@default_exec_path Path.join(Config.dovecot_path(),
"libexec/dovecot/deliver")
@doc """
Deliver the email to the dovecot user.
## Options
* `:exec_path :: String.t` the path to the deliver executable.
"""
@spec call(String.t, String.t, Keyword.t) :: :ok | {:error, any}
def call(username, email, options \\ [])
when is_binary(username) and is_binary(email) and is_list(options) do
exec_path = Keyword.get(options, :exec_path, @default_exec_path)
args = ["-c", Config.conf_path(), "-e", "-d", username]
true = :erlang.open_port({:spawn_executable, exec_path},
[:use_stdio | [args: args]])
|> send_email(email)
|> :erlang.port_close()
:ok
end
# Date: Fri, 21 Nov 1997 09:55:06 -0600
@email_template """
From: <%= @from %>
To: <%= @to %>
Subject: <%= @subject %>
Date: <%= @date %>
Message-ID: <%= @message_id %>
<%= @body %>
"""
EEx.function_from_string :def, :new_email, @email_template, [:assigns]
def new_message_id do
{:ok, host} = :inet.gethostname()
"#{:erlang.unique_integer()}@#{host}.com"
end
# Private Functions
defmodule DateTimeOffset do
defstruct [:datetime, :offset]
@type t :: %__MODULE__{datetime: :calendar.datetime, offset: integer}
@spec now :: t
def now do
# For now, return universal time and an time zone adjust of 0
%__MODULE__{datetime: :calendar.universal_time(),
offset: 0}
end
end
@spec send_email(Port.t, String.t) :: Port.t
defp send_email(port, email) do
true = :erlang.port_command(port, email)
port
end
end
defimpl String.Chars, for: Dovetail.Deliver.DateTimeOffset do
alias Dovetail.Deliver.DateTimeOffset
def to_string(%DateTimeOffset{datetime: {{year, month, day} = date, time},
offset: 0}) do
# Example: Wed Feb 10 11:23:57 2016
join([:calendar.day_of_the_week(date) |> weekday_to_string(),
month_to_string(month), int_to_string(day),
time_to_string(time), Integer.to_string(year)], " ")
end
@weekdays ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
for {weekday, index} <- Enum.with_index(@weekdays) do
defp weekday_to_string(unquote(index + 1)), do: unquote(weekday)
end
@months ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
for {month, index} <- Enum.with_index(@months) do
defp month_to_string(unquote(index + 1)), do: unquote(month)
end
defp time_to_string({hours, minutes, seconds}) do
join([int_to_string(hours),
int_to_string(minutes),
int_to_string(seconds)],
":")
end
@spec join([String.t], String.t) :: String.t
defp join(strings, spacer) do
join(Enum.reverse(strings), spacer, "")
end
defp join([], _spacer, acc), do: acc
defp join([string], spacer, acc) do
join([], spacer, string <> acc)
end
defp join([string | strings], spacer, acc) do
join(strings, spacer, spacer <> string <> acc)
end
@spec int_to_string(integer, integer) :: String.t
defp int_to_string(int, padding \\ 2) when is_integer(int) do
Integer.to_string(int) |> String.rjust(padding, ?0)
end
end
|
lib/dovetail/deliver.ex
| 0.639398 | 0.480418 |
deliver.ex
|
starcoder
|
defmodule Kitt.Message.ICA do
@moduledoc """
Defines the structure and instantiation function
for creating a J2735-compliant Intersection Collision Alert message
An ICA defines the alert message type that is emitted to DSRC-capable
vehicles entering the vicinity of an intersection in which a collision
has occurred
"""
alias Kitt.Message.BSM.CoreData
@typedoc "Defines the structure of an IntersectionCollision message and the data elements comprising its fields"
@type t :: %__MODULE__{
msgCnt: non_neg_integer(),
id: non_neg_integer(),
timeStamp: Kitt.Types.minute_of_year(),
partOne: CoreData.t(),
path: Kitt.Types.path_history(),
pathPrediction: Kitt.Types.path_prediction(),
intersectionID: Kitt.Types.intersection_reference_id(),
laneNumber: approach_or_lane(),
eventFlag: Kitt.Types.event_flag(),
regional: [Kitt.Types.regional_extension()]
}
@type approach_or_lane ::
{:approach, non_neg_integer()}
| {:lane, non_neg_integer()}
@derive Jason.Encoder
@enforce_keys [:msgCnt, :id, :intersectionID, :laneNumber, :eventFlags]
defstruct [
:eventFlag,
:id,
:intersectionID,
:laneNumber,
:msgCnt,
:partOne,
:path,
:pathPrediction,
:regional,
:timeStamp
]
@doc """
Produces an `ICA` message struct from an equivalent map or keyword input
"""
@spec new(map() | keyword()) :: t()
def new(message) do
{_, core_data_struct} =
Map.get_and_update(message, :partOne, fn core_data ->
case core_data do
nil -> {nil, nil}
core_data -> {core_data, CoreData.new(core_data)}
end
end)
struct(__MODULE__, core_data_struct)
end
@doc """
Returns the `ICA` identifying integer
"""
@spec type_id() :: non_neg_integer()
def type_id(), do: :DSRC.intersectionCollision()
@doc """
Returns the `ICA` identifying atom recognized by the ASN1 spec
"""
@spec type() :: atom()
def type(), do: :IntersectionCollision
end
|
lib/kitt/message/ica.ex
| 0.83545 | 0.603231 |
ica.ex
|
starcoder
|
defmodule MeshxRpc.Client do
@moduledoc """
Convenience module on top of `MeshxRpc.Client.Pool`.
Module leverages `Kernel.use/2` macro to simplify user interaction with `MeshxRpc.Client.Pool` module:
* current module name is used as pool id,
* pool options can be specified with `use/2` clause.
Please refer to `MeshxRpc.Client.Pool` documentation for details.
Example RPC client module:
```elixir
# lib/client.ex
defmodule Example1.Client do
use MeshxRpc.Client,
address: {:tcp, {127, 0, 0, 1}, 12_345},
telemetry_prefix: [:example1, __MODULE__],
pool_opts: [size: 20, max_overflow: 5],
idle_reconnect: 60 * 60 * 1000
def echo(args), do: call(:echo, args)
end
```
Start with application supervisor:
```elixir
# lib/example1/application.ex
def start(_type, _args) do
Supervisor.start_link([Example1.Client],
strategy: :one_for_one,
name: Example1.Supervisor
)
end
```
Run RPC calls:
```elixir
iex(1)> Example1.Client.echo("hello world")
"hello world"
iex(2)> Example1.Client.call(:echo, "hello world")
"hello world"
iex(3)> MeshxRpc.Client.Pool.call(Example1.Client, :echo, "hello world")
"hello world"
```
"""
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@opts opts
@pool_mod MeshxRpc.Client.Pool
def child_spec(arg_opts \\ []) do
opts = Keyword.merge(@opts, arg_opts)
@pool_mod.child_spec(__MODULE__, opts)
end
def cast(request, args \\ [], timeout \\ :infinity, retry \\ 5, retry_sleep \\ 100),
do: @pool_mod.cast(__MODULE__, request, args, timeout, retry, retry_sleep)
def call(request, args \\ [], timeout \\ :infinity, retry \\ 5, retry_sleep \\ 100),
do: @pool_mod.call(__MODULE__, request, args, timeout, retry, retry_sleep)
def call!(request, args \\ [], timeout \\ :infinity, retry \\ 5, retry_sleep \\ 100),
do: @pool_mod.call!(__MODULE__, request, args, timeout, retry, retry_sleep)
end
end
@optional_callbacks child_spec: 1, cast: 5, call: 5, call!: 5
@doc """
Returns a specification to start a RPC client workers pool under a supervisor.
"""
@callback child_spec(opts :: Keyword.t()) :: Supervisor.child_spec()
@doc """
Sends an asynchronous RPC cast `request` to the server.
"""
@callback cast(
request :: atom(),
args :: list(),
timeout :: timeout(),
retry :: pos_integer(),
retry_sleep :: non_neg_integer()
) ::
:ok
@doc """
Makes a synchronous RPC call `request` to the server and waits for its reply.
"""
@callback call(
request :: atom(),
args :: list(),
timeout :: timeout(),
retry :: pos_integer(),
retry_sleep :: non_neg_integer()
) ::
term() | {:error_rpc, reason :: term()}
@doc """
Same as `c:call/5`, will reraise remote exception locally.
"""
@callback call!(
request :: atom(),
args :: list(),
timeout :: timeout(),
retry :: pos_integer(),
retry_sleep :: non_neg_integer()
) ::
term() | {:error_rpc, reason :: term()}
end
|
lib/client/client.ex
| 0.89607 | 0.562417 |
client.ex
|
starcoder
|
defmodule Data.GeoJSON do
def to_feature_lists(%Map.Parsed{} = map) do
%{
routes: as_feat_collection(routes(map) ++ routeless_ways(map)),
articles: as_feat_collection(articles(map)),
markers: as_feat_collection(markers(map))
}
end
defp markers(map) do
map.nodes()
|> Map.Element.filter_by_tag(:type, ["marker", "warning"])
|> Enum.map(&as_geojson(&1))
end
defp articles(map) do
map.ways()
|> Map.Element.filter_by_tag(:type, "article")
|> Enum.map(&as_geojson(&1))
|> Enum.reject(&is_nil/1)
end
defp routeless_ways(map) do
map.ways()
|> Map.Element.filter_by_tag(:type, ["detour", "planned"])
|> Enum.map(&as_geojson(&1))
end
defp routes(map) do
map.relations()
|> Map.values()
|> add_overlap_info(:alltag)
|> add_overlap_info(:freizeit)
|> Enum.map(&as_geojson(&1))
end
# renders for nodes
defp as_geojson(%Map.Node{} = n) do
props = Map.merge(%{id: n.id}, n.tags)
%{
type: "Feature",
properties: props,
geometry: %{
type: "Point",
coordinates: as_geojson_coord(n)
}
}
end
# renders for ways
defp as_geojson(%Map.Way{tags: %{type: "article", hide_from_map: true}}), do: nil
defp as_geojson(
w = %Map.Way{
tags: %{
type: "article",
name: name,
article_title: title,
article_type: type
}
}
) do
coords = Enum.map(w.nodes, &as_geojson_coord(&1))
props = %{
type: "article",
name: name,
title: title,
icon: Map.get(w.tags, :article_icon) || type
}
%{
type: "Feature",
properties: props,
geometry: %{
type: "Polygon",
coordinates: [coords]
}
}
end
defp as_geojson(w = %Map.Way{tags: %{type: "article"}}) do
raise "A way tagged as an article is missing some required fields. Normally these are auto-inserted from the article's yaml. Does the article exist? \n #{inspect(w.tags)}"
end
defp as_geojson(%Map.Way{} = w) do
coords = Enum.map(w.nodes, &as_geojson_coord(&1))
props =
w.tags
|> Map.take([
:name,
:text,
:bridge,
:type,
:offset,
:overlap_index,
:route_id | Map.Way.style_tags()
])
%{
type: "Feature",
properties: props,
geometry: %{
type: "LineString",
coordinates: coords
}
}
end
# renders for relations
defp as_geojson(%Map.Relation{tags: %{ref: "" <> _rest}} = r) do
route = Route.from_relation(r)
extra_rel_tags = %{
color: route.color(),
route_id: route.id(),
type: route.type()
}
r
|> Map.Relation.ways()
|> Enum.map(&Map.Element.keep_only_tags(&1, [:oneway, :offset, :overlap_index, :color]))
|> Enum.map(&Map.Element.add_new_tags(&1, extra_rel_tags))
|> Enum.map(&as_geojson/1)
end
defp as_geojson(%Map.Relation{} = r) do
IO.puts(:stderr, "skipping unsupported relation: #{r.id}")
nil
end
defp as_geojson_coord(%{lon: lon, lat: lat}) do
[lon, lat]
end
defp as_feat_collection(feats) do
%{
type: "FeatureCollection",
features: feats
}
end
# hash from {length_of_rels, index_of_current_rel} → desired_offset_in_geojson
@offsets %{
{2, 0} => 1,
{2, 1} => -1
}
defp add_overlap_info(relations, type) do
{rels_to_modify, rels_to_keep} =
Enum.split_with(relations, fn rel ->
route = Route.from_relation(rel)
route && route.type() == type
end)
# create map from way IDs to relation IDs that include that way,
# additionally collecting their roles
ways_to_rels =
rels_to_modify
|> Enum.sort_by(fn %Map.Relation{id: id} -> id end)
|> Enum.reduce(%{}, fn rel, acc ->
rel
|> Map.Relation.way_members()
|> Enum.reduce(acc, fn %{role: role, ref: %Map.Way{id: wid}}, acc ->
%{rels: rels, roles: roles} = acc[wid] || %{rels: [], roles: []}
Map.put(acc, wid, %{rels: [rel.id | rels], roles: [role | roles]})
end)
end)
# add offset/oneway to tags of ways
Enum.map(rels_to_modify, fn rel ->
modify_ways(rel, fn way ->
rels = ways_to_rels[way.id][:rels]
roles = Enum.uniq(ways_to_rels[way.id][:roles])
index = Enum.find_index(rels, fn rel_id -> rel_id == rel.id end)
oneway = length(roles) == 1 && hd(roles) in ["forward", "backward"]
offset = @offsets[{length(rels), index}] || 0
tags = %{offset: offset}
tags = if length(rels) >= 2, do: Map.put(tags, :overlap_index, index), else: tags
tags = if oneway, do: Map.put(tags, :oneway, true), else: tags
Map.Element.add_new_tags(way, tags)
end)
end) ++ rels_to_keep
end
defp modify_ways(%Map.Relation{} = r, fun) do
members =
Enum.map(r.members, fn %{ref: ref} = member ->
updated = if is_struct(ref, Map.Way), do: fun.(ref), else: ref
%{member | ref: updated}
end)
%{r | members: members}
end
end
|
lib/data/geojson.ex
| 0.649467 | 0.428233 |
geojson.ex
|
starcoder
|
defmodule ExAws.Vocabulary do
@moduledoc """
Operations for AWS Transcribe Vocabulary Endpoints
"""
import ExAws.Utils, only: [camelize_keys: 2]
@version "2017-10-26"
@doc """
Creates a vocabulary
Doc: <https://docs.aws.amazon.com/transcribe/latest/dg/API_CreateVocabulary.html>
Example:
```
Transcribe.create_vocabulary("VocabularyName", "en-US", ["Hello", "World"])
```
"""
@type create_vocabulary_opts :: [
language_code: binary,
phrases: list(binary),
vocabulary_file_uri: binary
]
@spec create_vocabulary(
name :: binary,
opts :: create_vocabulary_opts
) :: ExAws.Operation.JSON.t()
def create_vocabulary(name, opts \\ []) do
params =
%{
"VocabularyName" => name
}
|> Map.merge(normalize_opts(opts))
request(:create_vocabulary, params)
end
@doc """
Lists vocabularies
Doc: <https://docs.aws.amazon.com/transcribe/latest/dg/API_ListVocabularies.html>
Examples:
```
# List vocabularies
ExAws.Vocabulary.list_vocabularies()
# List vocabularies by name
ExAws.Vocabulary.list_vocabularies(name_contains: "name")
```
"""
@type list_vocabularies_opts :: [
name_contains: binary,
max_results: integer,
next_token: binary,
state: binary
]
@spec list_vocabularies(opts :: list_vocabularies_opts) :: ExAws.Operation.JSON.t()
def list_vocabularies(opts \\ []) do
request(:list_vocabularies, normalize_opts(opts))
end
@doc """
Returns information about a vocabulary
Doc: <https://docs.aws.amazon.com/transcribe/latest/dg/API_GetVocabulary.html>
Example:
```
ExAws.Vocabulary.get_vocabulary("Vocabulary1")
```
"""
@spec get_vocabulary(name :: binary) :: ExAws.Operation.JSON.t()
def get_vocabulary(name) do
params = %{"VocabularyName" => name}
request(:get_vocabulary, params)
end
@doc """
Deletes the named vocabulary
Doc: <https://docs.aws.amazon.com/transcribe/latest/dg/API_DeleteVocabulary.html>
Example:
```
ExAws.Vocabulary.delete_vocabulary("Vocabulary1")
```
"""
@spec delete_vocabulary(name :: binary) :: ExAws.Operation.JSON.t()
def delete_vocabulary(name) do
params = %{"VocabularyName" => name}
request(:delete_vocabulary, params)
end
@doc """
Updates the named vocabulary
All words or a file must be specified.
Doc: <https://docs.aws.amazon.com/transcribe/latest/dg/API_DeleteVocabulary.html>
Example:
```
ExAws.Vocabulary.delete_vocabulary("Vocabulary1")
```
"""
@type update_vocabulary_opts :: [
language_code: binary,
phrases: list(binary),
vocabulary_file_uri: binary
]
@spec update_vocabulary(name :: binary, opts :: update_vocabulary_opts) ::
ExAws.Operation.JSON.t()
def update_vocabulary(name, opts \\ []) do
params =
%{
"VocabularyName" => name
}
|> Map.merge(normalize_opts(opts))
request(:update_vocabulary, params)
end
defp request(action, params) do
operation =
action
|> Atom.to_string()
|> Macro.camelize()
ExAws.Operation.JSON.new(:transcribe, %{
data: Map.merge(%{"Version" => @version}, params),
headers: [
{"X-Amz-Target", "Transcribe.#{operation}"},
{"content-type", "application/x-amz-json-1.1"}
]
})
end
defp normalize_opts(opts) do
opts
|> Enum.into(%{})
|> camelize_keys(deep: false)
end
end
|
lib/ex_aws/vocabulary.ex
| 0.878647 | 0.736543 |
vocabulary.ex
|
starcoder
|
defmodule AntlUtilsEcto.Queryable do
@moduledoc """
Superpower your schemas.
"""
@callback queryable() :: Ecto.Queryable.t()
@callback include(Ecto.Queryable.t(), list) :: Ecto.Queryable.t()
@callback filter(Ecto.Queryable.t(), keyword) :: Ecto.Queryable.t()
@callback order_by(Ecto.Queryable.t(), list | keyword) :: Ecto.Queryable.t()
@callback paginate(Ecto.Queryable.t(), pos_integer, pos_integer) :: Ecto.Queryable.t()
@callback search(Ecto.Queryable.t(), binary) :: Ecto.Queryable.t()
@callback select_fields(Ecto.Queryable.t(), list()) :: Ecto.Queryable.t()
defmacro __using__(opts) do
quote do
@behaviour unquote(__MODULE__)
@before_compile unquote(__MODULE__)
import Ecto.Query, only: [dynamic: 2, where: 2]
@searchable_fields Keyword.get(unquote(opts), :searchable_fields, [:id])
def queryable(), do: Keyword.get(unquote(opts), :base_schema, __MODULE__)
defoverridable queryable: 0
def searchable_fields(), do: @searchable_fields
@spec include(Ecto.Queryable.t(), list()) :: Ecto.Queryable.t()
def include(queryable, includes) when is_list(includes),
do: Enum.reduce(includes, queryable, &include_assoc(&2, &1))
def filter(queryable, filters),
do: Enum.reduce(filters, queryable, &filter_by_field(&2, &1))
def order_by(queryable, order_bys),
do: unquote(__MODULE__).order_by(queryable, order_bys)
def paginate(queryable, page_size, page_number),
do: unquote(__MODULE__).paginate(queryable, page_size, page_number)
def search(queryable, search_query, metadata \\ [], searchable_fields \\ @searchable_fields)
def search(queryable, nil, _metadata, _searchable_fields), do: queryable
def search(queryable, "", _metadata, _searchable_fields), do: queryable
def search(queryable, search_query, metadata, searchable_fields)
when is_binary(search_query) and is_list(metadata),
do: where(queryable, ^search_where_query(search_query, metadata, searchable_fields))
def select_fields(queryable, fields),
do: unquote(__MODULE__).select_fields(queryable, fields)
defp search_where_query(search_query, [], searchable_fields)
when is_list(searchable_fields) do
searchable_fields
|> Enum.reduce(Ecto.Query.dynamic(false), &search_by_field(&2, {&1, search_query}))
end
defp search_where_query(search_query, metadata, searchable_fields)
when length(metadata) > 0 and is_list(searchable_fields) do
searchable_fields
|> Enum.reduce(
Ecto.Query.dynamic(false),
&search_by_field(&2, {&1, search_query}, metadata)
)
end
end
end
defmacro __before_compile__(_env) do
quote do
defp include_assoc(queryable, _), do: queryable
defp filter_by_field(queryable, field),
do: unquote(__MODULE__).filter_by_field(queryable, field)
defp search_by_field(dynamic, field),
do: unquote(__MODULE__).search_by_field(dynamic, field)
defp search_by_field(dynamic, field, metadata),
do: unquote(__MODULE__).search_by_field(dynamic, field, metadata)
end
end
import Ecto.Query, only: [dynamic: 2]
@spec filter_by_field(any, {any, any}) :: Ecto.Query.t()
def filter_by_field(queryable, {key, value}) do
queryable |> AntlUtilsEcto.Query.where(key, value)
end
@spec order_by(Ecto.Queryable.t(), list) :: Ecto.Queryable.t()
def order_by(queryable, []), do: queryable
def order_by(queryable, order_bys) when is_list(order_bys) do
queryable |> Ecto.Query.order_by(^order_bys)
end
@spec paginate(any, pos_integer(), pos_integer()) :: Ecto.Query.t()
def paginate(queryable, page_size, page_number) do
queryable |> AntlUtilsEcto.Paginator.paginate(page_size, page_number)
end
@spec search_by_field(Ecto.Query.DynamicExpr.t(), {any, binary}) :: Ecto.Query.DynamicExpr.t()
def search_by_field(dynamic, {key, value}) do
like_value = "%#{String.replace(value, "%", "\\%")}%"
dynamic([q], ^dynamic or like(type(fragment("?", field(q, ^key)), :string), ^like_value))
end
@spec search_by_field(Ecto.Query.DynamicExpr.t(), {any, binary}, list()) ::
Ecto.Query.DynamicExpr.t()
def search_by_field(dynamic, {key, value}, metadata) when is_list(metadata) do
like_value = "%#{String.replace(value, "%", "\\%")}%"
dynamic([q], ^dynamic or like(type(fragment("?", field(q, ^key)), :string), ^like_value))
end
@spec select_fields(Ecto.Queryable.t(), nil | list) :: Ecto.Queryable.t()
def select_fields(queryable, nil), do: queryable
def select_fields(queryable, []), do: queryable
def select_fields(queryable, fields) when is_list(fields),
do: queryable |> Ecto.Query.select(^fields)
end
|
lib/queryable.ex
| 0.705176 | 0.403361 |
queryable.ex
|
starcoder
|
defmodule Cryppo.Rsa4096 do
@moduledoc """
Encryption strategy RSA with 4096-bit keys and some RSA-specific functions
For encryption and decryption please use functions in module `Cryppo`.
This module also contains logic for PEMs, singing and verification.
"""
# Key length 4096
# Exponents: 65537
# Padding: rsa_pkcs1_oaep_padding
use Cryppo.EncryptionStrategy,
strategy_name: "Rsa4096",
# 4096 is the key size in ruby Cryppo
key_length: 4_096,
key_derivation_possible: false
alias Cryppo.RsaSignature
@typedoc """
Erlang type for RSA private keys
The native Erlang type for RSA private keys in module [`public_key`](https://erlang.org/doc/man/public_key.html)
are Erlang records visible from Elixir as tuples with 11 terms the first term being atom `:RSAPrivateKey`
"""
@type rsa_private_key() ::
{:RSAPrivateKey, integer, integer, integer, integer, integer, integer, integer, integer,
integer, any}
@typedoc """
Erlang type for RSA public keys
The native Erlang type for RSA public keys in module [`public_key`](https://erlang.org/doc/man/public_key.html)
are Erlang records visible from Elixir as tuples with 3 terms the first term being atom `:RSAPublicKey`
"""
@type rsa_public_key() :: {:RSAPublicKey, integer, integer}
@typedoc """
RSA keys in PEM format
"""
@type pem() :: String.t()
# 65537 is the default in OpenSSL, and hence in ruby Cryppo
@exponent 65_537
# rsa_pkcs1_oaep_padding is the padding in ruby Cryppo
@padding :rsa_pkcs1_oaep_padding
@spec generate_key :: EncryptionKey.t()
@impl true
def generate_key do
{:rsa, key_length(), @exponent}
|> :public_key.generate_key()
|> EncryptionKey.new(__MODULE__)
end
@spec encrypt(binary, EncryptionKey.t()) ::
{:ok, binary, EncryptionArtefacts.t()} | :encryption_error
@impl EncryptionStrategy
def encrypt(data, %EncryptionKey{key: private_key})
when is_binary(data) and elem(private_key, 0) == :RSAPrivateKey and
tuple_size(private_key) == 11 do
public_key = private_key_to_public_key(private_key)
encrypt(data, EncryptionKey.new(public_key, __MODULE__))
end
def encrypt(data, %EncryptionKey{key: public_key})
when is_binary(data) and elem(public_key, 0) == :RSAPublicKey and
tuple_size(public_key) == 3 do
encrypted = data |> :public_key.encrypt_public(public_key, rsa_padding: @padding)
{:ok, encrypted, %EncryptionArtefacts{}}
rescue
_e in ErlangError ->
{:encryption_error,
"the input data to encrypt is likely bigger than Rsa4096 + rsa_pkcs1_oaep_padding can handle"}
e ->
e
end
def encrypt(_, _), do: :encryption_error
@doc """
Extracts a public key from a private key
Extracts a public key from a `Cryppo.EncryptionKey` struct with an RSA private key or from an
RSA private key in the native Erlang type `t:rsa_private_key/0`
## Examples
With a `Cryppo.EncryptionKey` struct:
iex> public_key = "Rsa4096"
...> |> Cryppo.generate_encryption_key()
...> |> Cryppo.Rsa4096.private_key_to_public_key()
...> elem(public_key, 0)
:RSAPublicKey
With a native Erlang key:
iex> public_key = {:rsa, 4_096, 65_537}
...> |> :public_key.generate_key()
...> |> Cryppo.Rsa4096.private_key_to_public_key()
...> elem(public_key, 0)
:RSAPublicKey
"""
@spec private_key_to_public_key(rsa_private_key() | EncryptionKey.t()) :: rsa_public_key()
def private_key_to_public_key(%EncryptionKey{
encryption_strategy_module: __MODULE__,
key: private_key
}),
do: private_key_to_public_key(private_key)
def private_key_to_public_key(private_key)
when is_tuple(private_key) and elem(private_key, 0) == :RSAPrivateKey and
tuple_size(private_key) == 11 do
public_modulus = private_key |> elem(2)
public_exponent = private_key |> elem(3)
{:RSAPublicKey, public_modulus, public_exponent}
end
@doc """
Converts an RSA key to PEM format.
Can convert
* a `Cryppo.EncryptionKey` struct
* a public key as native Erlang type `t:rsa_public_key/0`
* a private key as native Erlang type `t:rsa_private_key/0`
## Examples
With a `Cryppo.EncryptionKey` struct
iex> "Rsa4096" |> Cryppo.generate_encryption_key() |> Cryppo.Rsa4096.to_pem()
With a public key as native Erlang type `t:rsa_public_key/0`
iex> "Rsa4096"
...> |> Cryppo.generate_encryption_key()
...> |> Cryppo.Rsa4096.private_key_to_public_key()
...> |> Cryppo.Rsa4096.to_pem()
With a private key as native Erlang type `t:rsa_private_key/0`
iex> encryption_key = Cryppo.generate_encryption_key("Rsa4096")
iex> Cryppo.Rsa4096.to_pem(encryption_key.key)
"""
@spec to_pem(EncryptionKey.t() | rsa_private_key() | rsa_public_key()) :: {:ok, pem()}
def to_pem(%EncryptionKey{key: key}),
do: to_pem(key)
def to_pem(key)
when is_tuple(key) and (elem(key, 0) == :RSAPrivateKey or elem(key, 0) == :RSAPublicKey) do
pem_entry = key |> elem(0) |> :public_key.pem_entry_encode(key)
{:ok, :public_key.pem_encode([pem_entry])}
end
@doc """
Loads and initializes a `Cryppo.EncryptionKey` struct from a string with a PEM.
## Examples
iex> pem = "-----BEGIN RSA PRIVATE KEY-----\\n" <>
...> "<KEY>" <>
...> "<KEY>" <>
...> "<KEY>" <>
...> "<KEY>" <>
...> <KEY>" <>
...> "<KEY>" <>
...> "K4gndHnXD5QkKNcTdFq64ef23R6AY0XEGkiRLDXZZA09hDIACgSSfk1Qbo0SJSvU\\n" <>
...> "TAR8A6clAkEA1vkWJ5qUo+xuIZB+2604LRco1GYAj5/fZ2kvUMjbOdCFgFaDVzJY\\n" <>
...> "X2pzLkk7RZNgPvXcRAgX7FlWmm4jwZzQywJARrHeSCMRx7DqF0PZUQaXmorYU7uw\\n" <>
...> "<KEY>" <>
...> "ujk6i1l94kaC9LB59sXnqQMSS<KEY>rAMZxFF6/<KEY>FIk+CxiRX\\n" <>
...> "<KEY>" <>
...> "vR9vuGcUcIDcWKOl05t4D35F5A/DskP6dGYA1cuWNg==\\n" <>
...> "-----END RSA PRIVATE KEY-----\\n\\n"
...> {:ok, _encryption_key} = Cryppo.Rsa4096.from_pem(pem)
"""
@spec from_pem(pem) :: {:ok, EncryptionKey.t()} | {:error, :invalid_encryption_key}
def from_pem(pem) when is_binary(pem) do
case :public_key.pem_decode(pem) do
[pem_entry] ->
encryption_key = %EncryptionKey{
encryption_strategy_module: __MODULE__,
key: :public_key.pem_entry_decode(pem_entry)
}
{:ok, encryption_key}
_ ->
{:error, :invalid_encryption_key}
end
end
@spec decrypt(EncryptedData.t(), EncryptionKey.t()) :: {:ok, binary} | :decryption_error
@impl EncryptionStrategy
def decrypt(%EncryptedData{encrypted_data: encrypted_data}, %EncryptionKey{key: private_key})
when is_binary(encrypted_data) and elem(private_key, 0) == :RSAPrivateKey and
tuple_size(private_key) == 11 do
decrypted = :public_key.decrypt_private(encrypted_data, private_key, rsa_padding: @padding)
{:ok, decrypted}
rescue
ErlangError -> :decryption_error
end
def decrypt(_, _), do: :decryption_error
@doc """
Signs data with a private key
The private key can be one of the following:
* a `Cryppo.EncryptionKey` struct
* a private key as native Erlang type `t:rsa_private_key/0`
* a PEM with a private RSA key
## Examples
With a `Cryppo.EncryptionKey` struct:
iex> encryption_key = Cryppo.generate_encryption_key("Rsa4096")
iex> _signature = %Cryppo.RsaSignature{} = Cryppo.Rsa4096.sign("data to sign", encryption_key)
With a private key as native Erlang type `t:rsa_private_key/0`
iex> private_key = :public_key.generate_key({:rsa, 4_096, 65_537})
iex> _signature = %Cryppo.RsaSignature{} = Cryppo.Rsa4096.sign("data to sign", private_key)
With a PEM
iex> pem = "-----BEGIN RSA PRIVATE KEY-----\\n" <>
...> "<KEY>" <>
...> "tn396BzDTdQ16HuuZ+eN+K<KEY>" <>
...> "sWooM3mwnSvMPWWnBj1c+0tbO7zfur5wQdzBl66HrHgHt+Bz6f+dDj+aVwIDAQAB\\n" <>
...> "AoGAMHh3rihgrW9+h07dGF1baOoyzm6hCoTSkguefn0K0B5DLdSm7FHu+jp0pBqI\\n" <>
...> <KEY>" <>
...> "<KEY>" <>
...> "<KEY>" <>
...> "<KEY>" <>
...> "<KEY>" <>
...> "<KEY>" <>
...> "ujk6i1l94kaC9LB59sXnqQMSSLDlTBt9OSqB3rAMZxFF6/KGoDGKpBfFIk+CxiRX\\n" <>
...> "kT22vUleyt3lBNPK3QJAEr56asvREcIDFkbs7Ebjev4U1PL58w78ipp49Ti5FiwH\\n" <>
...> "vR9vuGcUcIDcWKOl05t4D35F5A/DskP6dGYA1cuWNg==\\n" <>
...> "-----END RSA PRIVATE KEY-----\\n\\n"
...> _signature = %Cryppo.RsaSignature{} = Cryppo.Rsa4096.sign("data to sign", pem)
"""
@spec sign(binary, rsa_private_key() | EncryptionKey.t() | pem()) ::
RsaSignature.t() | {:error, :invalid_encryption_key} | {:error, String.t()}
def sign(data, _maybe_pem) when is_binary(data) and byte_size(data) > 512 do
{:error, "cannot sign more than 512 bytes"}
end
def sign(data, maybe_pem) when is_binary(data) and is_binary(maybe_pem) do
with {:ok, encryption_key} <- from_pem(maybe_pem) do
sign(data, encryption_key)
end
end
def sign(data, %EncryptionKey{encryption_strategy_module: __MODULE__, key: private_key}),
do: sign(data, private_key)
def sign(data, private_key_erlang_tuple)
when is_binary(data) and is_tuple(private_key_erlang_tuple) and
elem(private_key_erlang_tuple, 0) == :RSAPrivateKey and
tuple_size(private_key_erlang_tuple) == 11 do
signature = :public_key.sign(data, :sha256, private_key_erlang_tuple)
%RsaSignature{signature: signature, data: data}
end
@doc """
Verifies an RSA signature with a public key
The key for verification can be pretty much any format and type, private keys are also accepted:
* native Erlang types `t:rsa_private_key/0` and `t:rsa_public_key/0`
* `Cryppo.EncryptionKey` structs
* PEMs
## Examples
With a public key in the Erlang format:
iex> encryption_key = Cryppo.generate_encryption_key("Rsa4096")
iex> signature = Cryppo.Rsa4096.sign("data to sign", encryption_key)
iex> public_key = Cryppo.Rsa4096.private_key_to_public_key(encryption_key)
iex> Cryppo.Rsa4096.verify(signature, public_key)
true
With a private key in the Erlang format:
iex> encryption_key = Cryppo.generate_encryption_key("Rsa4096")
iex> signature = Cryppo.Rsa4096.sign("data to sign", encryption_key)
iex> Cryppo.Rsa4096.verify(signature, encryption_key.key)
true
With a `Cryppo.EncryptionKey` struct:
iex> encryption_key = Cryppo.generate_encryption_key("Rsa4096")
iex> signature = Cryppo.Rsa4096.sign("data to sign", encryption_key)
iex> Cryppo.Rsa4096.verify(signature, encryption_key)
true
With a PEM
iex> pem_with_private_key = "-----BEGIN RSA PRIVATE KEY-----\\n" <>
...> "<KEY>" <>
...> "<KEY>" <>
...> "sWooM3mwnSvMPWWnBj1c+<KEY>" <>
...> "AoGAMHh3rihgrW9+h07dGF1baOoyzm6hCoTSkguefn0K0B5DLdSm7FHu+jp0pBqI\\n" <>
...> "/gHvolEFSZdMbarYOrUMf4BPlRSarCjjxf/beV4Pj/UQrCkDmNBBVJp33Sy8HEdb\\n" <>
...> "Wrzk+k8NcAS<KEY>S/<KEY>" <>
...> "<KEY>" <>
...> "<KEY>" <>
...> "<KEY>" <>
...> "<KEY>" <>
...> "ujk6i1l94kaC<KEY>" <>
...> "<KEY>" <>
...> "vR9vuGcUcIDcWKOl05t4D35F5A/DskP6dGYA1cuWNg==\\n" <>
...> "-----END RSA PRIVATE KEY-----\\n\\n"
...> signature = Cryppo.Rsa4096.sign("data to sign", pem_with_private_key)
...> {:ok, key} = Cryppo.Rsa4096.from_pem(pem_with_private_key)
...> {:ok, pem_with_public_key} = key
...> |> Cryppo.Rsa4096.private_key_to_public_key()
...> |> Cryppo.Rsa4096.to_pem()
...> Cryppo.Rsa4096.verify(signature, pem_with_public_key)
true
"""
@spec verify(RsaSignature.t(), rsa_public_key | rsa_private_key | EncryptionKey.t() | pem) ::
boolean() | {:error, :invalid_encryption_key}
def verify(%RsaSignature{data: data, signature: signature}, public_key),
do: verify(data, signature, public_key)
@spec verify(binary, binary, rsa_public_key | rsa_private_key | EncryptionKey.t() | pem) ::
boolean() | {:error, :invalid_encryption_key}
defp verify(data, signature, maybe_pem) when is_binary(maybe_pem) do
with {:ok, encryption_key} <- from_pem(maybe_pem),
do: verify(data, signature, encryption_key)
end
defp verify(data, signature, %EncryptionKey{
encryption_strategy_module: __MODULE__,
key: private_key
}),
do: verify(data, signature, private_key)
defp verify(data, signature, private_key)
when is_tuple(private_key) and elem(private_key, 0) == :RSAPrivateKey do
public_key = private_key_to_public_key(private_key)
verify(data, signature, public_key)
end
defp verify(data, signature, public_key)
when is_binary(data) and is_binary(signature) and is_tuple(public_key) and
elem(public_key, 0) == :RSAPublicKey do
:public_key.verify(data, :sha256, signature, public_key)
end
@spec build_encryption_key(any) :: {:ok, EncryptionKey.t()} | {:error, :invalid_encryption_key}
@impl EncryptionStrategy
def build_encryption_key(private_key_in_erlang_format)
when is_tuple(private_key_in_erlang_format) and
elem(private_key_in_erlang_format, 0) == :RSAPrivateKey and
tuple_size(private_key_in_erlang_format) == 11 do
{:ok, EncryptionKey.new(private_key_in_erlang_format, __MODULE__)}
end
def build_encryption_key(maybe_pem) when is_binary(maybe_pem),
do: from_pem(maybe_pem)
def build_encryption_key(_), do: {:error, :invalid_encryption_key}
end
|
lib/cryppo/rsa4096.ex
| 0.865494 | 0.542863 |
rsa4096.ex
|
starcoder
|
defmodule Delugex.MessageStore.Postgres do
@moduledoc """
This is the real implementation of MessageStore. It will execute the needed
queries on Postgres through Postgrex by calling the functions provided in
[ESP](https://github.com/Carburetor/ESP/tree/master/app/config/functions/stream). You should be able to infer what to write, it's just passing the
required arguments to the SQL functions and converting any returned value.
Whenever a stream name is expected, please use the %StreamName struct and
make sure to convert it to string.
"""
use Supervisor
use Delugex.MessageStore
import Delugex.MessageStore,
only: [
is_version: 1,
is_expected_version: 1,
is_batch_size: 1
]
alias Delugex.StreamName
alias Delugex.Event
alias Delugex.Event.Raw
alias Delugex.Event.Metadata
alias Delugex.MessageStore.Postgres.Repo
@wrong_version "Wrong expected version:"
@stream_read_batch_sql """
select * from get_stream_messages(
_stream_name := $1::varchar,
_position := $2::bigint,
_batch_size := $3::bigint
)
"""
@category_read_batch_sql """
select * from get_category_messages(
_category_name := $1::varchar,
_position := $2::bigint,
_batch_size := $3::bigint
)
"""
@stream_read_last_sql "select * from get_last_message(_stream_name := $1)"
@write_sql """
select * from write_message(
_id := $1::varchar,
_stream_name := $2::varchar,
_type := $3::varchar,
_data := $4::jsonb,
_metadata := $5::jsonb,
_expected_version := $6::bigint
)
"""
@version_sql "select * from stream_version(_stream_name := $1::varchar)"
def start_link do
Supervisor.start_link(__MODULE__, nil, name: __MODULE__)
end
def start_link(_arg), do: start_link()
@impl Supervisor
def init(_arg) do
notify_config =
Delugex.MessageStore.Postgres.Repo.config()
|> Keyword.put(:name, Postgrex.Notifications)
children = [
Delugex.MessageStore.Postgres.Repo,
%{
id: Postgrex.Notifications,
start: {
Postgrex.Notifications,
:start_link,
[notify_config]
}
}
]
Supervisor.init(children, strategy: :rest_for_one)
end
@impl Delugex.MessageStore
@doc """
Write has an optional expected_version argument. This argument could be one of:
- nil: no version expected
- no_stream: no message ever written to this stream, the Postgres
stream_version position will return null (max(position) is null if no rows
are present)
- An integer (0+): Representing the expected version
"""
def write!(%Event{} = event, expected_version \\ nil)
when is_expected_version(expected_version) do
expected_version = to_number_version(expected_version)
params = encode_event(event)
params = params ++ [expected_version]
query(@write_sql, params).rows
|> rows_to_single_result
rescue
error in Postgrex.Error -> as_known_error!(error)
end
@impl Delugex.MessageStore
@doc """
- `events` list of events to write
- `stream_name` stream where events will be written to (will overwrite
any stream_name provided in the events)
- optional `expected_version` argument. This argument could be one of:
- `nil`: no version expected
- `:no_stream`: no message ever written to this stream, the Postgres
stream_version position will return null (max(position) is null if no
rows are present)
- An integer (0+): Representing the expected version
"""
def write_batch!(
events,
stream_name,
expected_version \\ nil
)
when is_list(events) and is_expected_version(expected_version) do
insertables =
events
|> Stream.map(fn event -> Map.put(event, :stream_name, stream_name) end)
|> Stream.with_index()
|> Stream.map(fn {event, index} ->
case index do
0 -> {event, expected_version}
_ -> {event, nil}
end
end)
{:ok, final_version} =
Repo.transaction(fn ->
Enum.reduce(insertables, nil, fn {event, expected_version}, _ ->
write!(event, expected_version)
end)
end)
final_version
end
@impl Delugex.MessageStore
@doc """
Retrieve's the last stream by the stream_name (based on greatest position).
"""
def read_last(stream_name) do
stream_name = StreamName.to_string(stream_name)
query(@stream_read_last_sql, [stream_name]).rows
|> rows_to_events
|> List.last()
end
@impl Delugex.MessageStore
@doc """
Retrieve steams by the stream_name, in batches of 10 by default.
"""
def read_batch(stream_name, position \\ 0, batch_size \\ 10)
when is_version(position) and is_batch_size(batch_size) do
sql =
case StreamName.category?(stream_name) do
true -> @category_read_batch_sql
false -> @stream_read_batch_sql
end
stream_name = StreamName.to_string(stream_name)
query(sql, [stream_name, position, batch_size]).rows
|> rows_to_events
end
@impl Delugex.MessageStore
@doc """
Retrieves the last message position, or :no_stream if none are present
"""
def read_version(stream_name) do
stream_name = StreamName.to_string(stream_name)
version =
query(@version_sql, [stream_name]).rows
|> rows_to_single_result
case version do
nil -> :no_stream
_ -> version
end
end
@impl Delugex.MessageStore
@doc """
Receives notifications as GenServer casts. Two types of notifications are
received:
- `{:notification, connection_pid, ref, channel, payload}` with a notify
from Postgres (check
[Postgrex documentation](https://hexdocs.pm/postgrex/Postgrex.Notifications.html#listen/3))
- `{:reminder}` which is received every X seconds
"""
def listen(stream_name, opts \\ []) do
stream_name = StreamName.to_string(stream_name)
Repo.listen(stream_name, opts)
end
@impl Delugex.MessageStore
@doc """
Stops notifications
"""
def unlisten(ref, opts \\ []) do
Repo.unlisten(ref, opts)
end
defp to_number_version(:no_stream), do: -1
defp to_number_version(nil), do: nil
defp to_number_version(expected_version), do: expected_version
defp query(raw_sql, parameters) do
Repo
|> Ecto.Adapters.SQL.query!(raw_sql, parameters)
end
defp encode_event(%Event{
id: id,
stream_name: stream_name,
type: type,
data: data,
metadata: metadata
}) do
id = cast_uuid_as_string(id)
stream_name = StreamName.to_string(stream_name)
[id, stream_name, type, data, metadata]
end
defp rows_to_single_result([[value]]), do: value
defp rows_to_events(rows) do
rows
|> Enum.map(&row_to_event_raw/1)
end
defp row_to_event_raw([
id,
stream_name,
type,
position,
global_position,
data,
metadata,
time
]) do
id = cast_uuid_as_string(id)
%Raw{
id: decode_id(id),
stream_name: decode_stream_name(stream_name),
type: type,
position: position,
global_position: global_position,
data: decode_data(data),
metadata: decode_metadata(metadata),
time: decode_naive_date_time(time)
}
end
defp symbolize(map) do
map
|> Map.new(fn {k, v} -> {String.to_existing_atom(k), v} end)
end
defp as_known_error!(error) do
message = to_string(error.postgres.message)
cond do
String.starts_with?(message, @wrong_version) ->
raise Delugex.MessageStore.ExpectedVersionError, message: message
true ->
raise error
end
end
defp cast_uuid_as_string(id) do
Ecto.UUID.cast!(id)
end
defp decode_stream_name(text_stream_name) do
decoder =
__MODULE__
|> Delugex.Config.get(:stream_name, [])
|> Keyword.get(:decoder, Delugex.Stream.Name)
decoder.decode(text_stream_name)
end
defp decode_metadata(map) do
metadata =
map
|> decode_json()
|> symbolize()
struct(Metadata, metadata)
end
defp decode_data(map) do
map
|> decode_json()
|> symbolize()
end
defp decode_naive_date_time(time) do
# NaiveDateTime.from_iso8601!(time)
time
end
defp decode_id(id) do
cast_uuid_as_string(id)
end
defp decode_json(text) do
decoder =
__MODULE__
|> Delugex.Config.get(:json, [])
|> Keyword.get(:decoder, Jason)
decoder.decode!(text)
end
end
|
lib/delugex/message_store/postgres.ex
| 0.770853 | 0.493592 |
postgres.ex
|
starcoder
|
defmodule MotleyHue do
@moduledoc """
An Elixir utility for calculating the following color combinations:
* Complimentary - Two colors that are on opposite sides of the color wheel
* Analagous - Three colors that are side by side on the color wheel
* Monochromatic - A spectrum of shades, tones and tints of one base color
* Triadic - Three colors that are evenly spaced on the color wheel
* Tetradic - Four colors that are evenly spaced on the color wheel
"""
@doc """
Returns the provided color and its two analagous (adjacent) colors along a given direction of the HSV color wheel.
Adjacency is defined by a 30° offset in hue value and an analogous set must reside within a 90° section of the color wheel.
## Examples
iex> MotleyHue.analagous("FF0000")
["FF0000", "FF8000", "FFFF00"]
iex> MotleyHue.analagous("FF0000", :counter_clockwise)
["FF0000", "FF0080", "FF00FF"]
"""
@spec analagous(binary | map, :clockwise | :counter_clockwise) :: list | {:error, binary}
def analagous(color, direction \\ :clockwise)
def analagous(color, direction) when direction in [:clockwise, :counter_clockwise] do
base = Chameleon.convert(color, Chameleon.HSV)
case base do
{:error, err} ->
{:error, err}
base ->
1..2
|> Enum.map(fn i ->
hue_offset = i * 30
hue =
case direction do
:clockwise ->
calculate_degree(base.h + hue_offset)
:counter_clockwise ->
calculate_degree(base.h - hue_offset)
end
Chameleon.HSV.new(hue, base.s, base.v)
end)
|> then(&format_response(color, &1))
end
end
@doc """
Returns the provided color and its compliment.
Note that complimentary color can be calculated by either taking the value 180° (i.e., opposite) from the hue value on the HSV color wheel
or by finding the RGB value that when combined with the provided color will yield white (i.e., rgb(255, 255, 255)).
The default approach is to use the HSV hue offset, but either can be calculated by passing `:hsv` or `:rgb` as the model argument.
## Examples
iex> MotleyHue.complimentary("FF0000")
["FF0000", "00FFFF"]
iex> MotleyHue.complimentary("008080", :hsv)
["008080", "800000"]
iex> MotleyHue.complimentary("008080", :rgb)
["008080", "FF7F7F"]
"""
@spec complimentary(binary | map, :hsv | :rgb) :: list | {:error, binary}
def complimentary(color, model \\ :hsv)
def complimentary(color, :hsv) do
even(color, 2)
end
def complimentary(color, :rgb) do
base = Chameleon.convert(color, Chameleon.RGB)
case base do
{:error, err} ->
{:error, err}
base ->
compliment = Chameleon.RGB.new(255 - base.r, 255 - base.g, 255 - base.b)
format_response(color, [compliment])
end
end
@doc """
Returns the requested count of colors, including the provided color, distributed along the color wheel.
Ideal for use as color palette where adjacent colors need to be easily differentiated with one another (e.g., categorical or other non-quantitative data).
## Examples
iex> MotleyHue.contrast("FF0000", 7)
["FF0000", "FFFF00", "00FF00", "00FFFF", "0000FF", "FF00FF", "FF8000"]
iex> MotleyHue.contrast("FF0000", 13)
["FF0000", "FFFF00", "00FF00", "00FFFF", "0000FF", "FF00FF", "FF8000", "80FF00", "00FF80", "0080FF", "8000FF", "FF0080", "FF4000"]
"""
@spec contrast(binary | map, integer) :: list | {:error, binary}
def contrast(_color, count) when count < 2,
do: {:error, "Count must be a positive integer greater than or equal to 2"}
def contrast(color, count) when count <= 6, do: even(color, count)
def contrast(color, count) when is_integer(count) do
base = Chameleon.convert(color, Chameleon.HSV)
case base do
{:error, err} ->
{:error, err}
base ->
1..(count - 1)
|> Enum.map(fn i ->
div = div(i, 6)
degree_offset = round(360 / 6)
base_offset = i * degree_offset
rotation_offset = -360 * div + safe_divide(degree_offset, 2 * div)
hue_offset = round(base_offset + rotation_offset)
hue = calculate_degree(base.h + hue_offset)
Chameleon.HSV.new(hue, base.s, base.v)
end)
|> then(&format_response(color, &1))
end
end
@doc """
Returns the requested count of colors, including the provided color, evenly spaced along the color wheel.
## Examples
iex> MotleyHue.even("FF0000", 5)
["FF0000", "CCFF00", "00FF66", "0066FF", "CC00FF"]
"""
@spec even(binary | map, integer) :: list | {:error, binary}
def even(_color, count) when count < 2,
do: {:error, "Count must be a positive integer greater than or equal to 2"}
def even(color, count) when is_integer(count) do
base = Chameleon.convert(color, Chameleon.HSV)
case base do
{:error, err} ->
{:error, err}
base ->
degree_offset = round(360 / count)
1..(count - 1)
|> Enum.map(fn i ->
hue_offset = i * degree_offset
hue = calculate_degree(base.h + hue_offset)
Chameleon.HSV.new(hue, base.s, base.v)
end)
|> then(&format_response(color, &1))
end
end
@doc """
Returns a gradient of the size of the provided count bookended by the two provided colors.
## Examples
iex> MotleyHue.gradient("FF0000", "008080", 5)
["FF0000", "DF00A7", "6000BF", "00289F", "008080"]
"""
@spec gradient(binary | map, binary | map, integer) :: list | {:error, binary}
def gradient(_color1, _color2, count) when count < 3,
do: {:error, "Count must be a positive integer greater than or equal to 3"}
def gradient(color1, color2, count) when is_integer(count) do
base1 = Chameleon.convert(color1, Chameleon.HSV)
base2 = Chameleon.convert(color2, Chameleon.HSV)
case {base1, base2} do
{{:error, err}, _} ->
{:error, err}
{_, {:error, err}} ->
{:error, err}
{base1, base2} ->
hue_diff = base1.h - base2.h
saturation_diff = base1.s - base2.s
value_diff = base1.v - base2.v
hue_degree_offset = hue_diff |> safe_divide(count - 1)
saturation_percent_offset = saturation_diff |> safe_divide(count - 1)
value_percent_offset = value_diff |> safe_divide(count - 1)
1..(count - 2)
|> Enum.map(fn i ->
hue_offset = i * hue_degree_offset
hue = calculate_degree(base1.h + hue_offset)
saturation_offset = i * saturation_percent_offset
saturation = base1.s - saturation_offset
value_offset = i * value_percent_offset
value = base1.v - value_offset
Chameleon.HSV.new(hue, saturation, value)
end)
|> then(&format_response(color1, color2, &1))
end
end
@doc """
Returns the provided color and its monochromatic color spectrum towards black.
The number of results is configurable with each color equally spaced from the previous value.
## Examples
iex> MotleyHue.monochromatic("FF0000")
["FF0000", "AB0000", "570000"]
iex> MotleyHue.monochromatic("FF0000", 5)
["FF0000", "CC0000", "990000", "660000", "330000"]
"""
@spec monochromatic(binary | map, integer) :: list | {:error, binary}
def monochromatic(color, count \\ 3)
def monochromatic(_color, count) when count < 2,
do: {:error, "Count must be a positive integer greater than or equal to 2"}
def monochromatic(color, count) when is_integer(count) do
base = Chameleon.convert(color, Chameleon.HSV)
case base do
{:error, err} ->
{:error, err}
base ->
step = div(100, count)
Range.new(0, 100, step)
|> Enum.slice(1..(count - 1))
|> Enum.map(fn value_offset ->
value = round(base.v - value_offset)
Chameleon.HSV.new(base.h, base.s, value)
end)
|> then(&format_response(color, &1))
end
end
@doc """
Returns the provided color and its three tetradic colors, which are the colors 90°, 180°, and 270° offset from the given color's hue value on the HSV color wheel.
## Examples
iex> MotleyHue.tetradic("FF0000")
["FF0000", "80FF00", "00FFFF", "8000FF"]
"""
@spec tetradic(binary | map) :: list | {:error, binary}
def tetradic(color) do
even(color, 4)
end
@doc """
Returns the provided color and its two triadic colors, which are the colors 120° and 240° offset from the given color's hue value on the HSV color wheel.
## Examples
iex> MotleyHue.triadic("FF0000")
["FF0000", "00FF00", "0000FF"]
"""
@spec triadic(binary | map) :: list | {:error, binary}
def triadic(color) do
even(color, 3)
end
defp calculate_degree(degree) when degree >= 0, do: degree |> round() |> rem(360)
defp calculate_degree(degree), do: degree |> round() |> then(&(360 + rem(&1, 360)))
defp format_response(color, matches) when is_struct(color) do
[color]
|> Kernel.++(matches)
|> Enum.map(&Chameleon.convert(&1, color.__struct__))
end
defp format_response(color, matches) when is_binary(color) do
case Chameleon.Util.derive_input_struct(color) do
{:ok, %Chameleon.Hex{} = derived_color} ->
case color do
"#" <> _ -> derived_color |> format_response(matches) |> Enum.map(&"##{&1.hex}")
_ -> derived_color |> format_response(matches) |> Enum.map(& &1.hex)
end
{:ok, derived_color} ->
format_response(derived_color, matches)
{:error, err} ->
{:error, err}
end
end
defp format_response(color1, color2, matches) when is_struct(color1) do
[color1]
|> Kernel.++(matches)
|> Kernel.++([color2])
|> Enum.map(&Chameleon.convert(&1, color1.__struct__))
end
defp format_response(color1, color2, matches) when is_binary(color2) do
case Chameleon.Util.derive_input_struct(color2) do
{:ok, %Chameleon.Hex{} = derived_color2} ->
format_response(color1, derived_color2, matches)
{:ok, derived_color2} ->
format_response(color1, derived_color2, matches)
{:error, err} ->
{:error, err}
end
end
defp format_response(color1, color2, matches) when is_binary(color1) do
case Chameleon.Util.derive_input_struct(color1) do
{:ok, %Chameleon.Hex{} = derived_color1} ->
case color1 do
"#" <> _ ->
format_response(derived_color1, color2, matches) |> Enum.map(&"##{&1.hex}")
_ ->
format_response(derived_color1, color2, matches) |> Enum.map(& &1.hex)
end
{:ok, derived_color1} ->
format_response(derived_color1, color2, matches)
{:error, err} ->
{:error, err}
end
end
defp safe_divide(_, 0), do: 0
defp safe_divide(num, dem), do: num / dem
end
|
lib/motley_hue.ex
| 0.95374 | 0.666307 |
motley_hue.ex
|
starcoder
|
defmodule BitcoinPriceScraper.RateLimiter do
use GenStage
alias BitcoinPriceScraper.Upbit
alias __MODULE__.Producer
defmodule Producer do
defstruct [:limits_per_second, :pending]
def new(limits_per_second) do
%__MODULE__{
limits_per_second: limits_per_second,
# candle 조회에 실패한 이벤트를 담아두고 다음에 시도한다
pending: []
}
end
end
def start_link() do
GenStage.start_link(__MODULE__, :ok)
end
def init(_) do
{:consumer, %{}}
end
def handle_subscribe(:producer, opts, from, producers) do
limits_per_second = Keyword.fetch!(opts, :limits_per_second)
producers =
producers
|> Map.put(from, Producer.new(limits_per_second))
|> ask_and_schedule(from)
# :manual을 리턴해 생산자(producer)에 요구(demand)를 보내는 걸 직접 컨트롤한다.
{:manual, producers}
end
def handle_events(events, from, producers) do
IO.puts("handle_events - #{to_string(NaiveDateTime.utc_now())}, count: #{Enum.count(events)}")
:telemetry.execute([:upbit, :quotation, :request, :new], %{count: Enum.count(events)})
{_success, failed} = request_candles(events)
producers =
Map.update!(producers, from, fn exist ->
# 이 함수에서는 새로운 요청을 처리할 뿐, 이전에 실패한 요청을 처리하지 않는다.
# 실패한 요청을 다음에 시도할 수 있게 추가한다
%{exist | pending: exist.pending ++ failed}
end)
{:noreply, [], producers}
end
def handle_info({:ask, from}, producers) do
{:noreply, [], ask_and_schedule(producers, from)}
end
def handle_info({:retry, from}, producers) do
{:noreply, [], retry_events(producers, from)}
end
defp request_candles(events) do
events
|> Enum.split_with(fn e ->
before_request = System.monotonic_time()
case Upbit.candles("KRW-BTC", e, 200) do
{:ok, %{body: body, status: status, headers: headers}} ->
remaining_req =
Enum.find_value(headers, fn h ->
case h do
{"remaining-req", remain} -> remain
_ -> nil
end
end)
IO.puts(
"status: #{status}, candle count: #{Enum.count(body)}, remaining-req: #{remaining_req}"
)
:telemetry.execute([:upbit, :quotation, :response, :success], %{count: 1})
:telemetry.execute(
[:upbit, :quotation, :request, :success, :duration, :milliseconds],
%{duration: System.monotonic_time() - before_request}
)
true
error ->
IO.inspect(error)
:telemetry.execute([:upbit, :quotation, :response, :failed], %{count: 1})
:telemetry.execute(
[:upbit, :quotation, :request, :failed, :duration, :milliseconds],
%{duration: System.monotonic_time() - before_request}
)
false
end
end)
end
defp ask_and_schedule(producers, from) do
case producers do
%{^from => %{limits_per_second: limits_per_second, pending: pending}} ->
GenStage.ask(from, max(limits_per_second - Enum.count(pending), 0))
# 초당 호출 개수 제한이 있으므로 1초 스케쥴링을 한다
Process.send_after(self(), {:ask, from}, :timer.seconds(1))
if pending > 0 do
Process.send_after(self(), {:retry, from}, :timer.seconds(1))
end
producers
%{} ->
producers
end
end
defp retry_events(producers, from) do
if not Enum.empty?(producers[from].pending) do
IO.puts(
"retry count: #{Enum.count(producers[from].pending)}, detail: #{
inspect(producers[from].pending)
}"
)
# 이전에 실패한 candle 조회 요청을 보낸다
:telemetry.execute(
[:upbit, :quotation, :request, :retry],
%{count: Enum.count(producers[from].pending)}
)
{_success, pending} = request_candles(producers[from].pending)
producers =
Map.update!(producers, from, fn exist ->
%{exist | pending: pending}
end)
producers
else
producers
end
end
end
|
lib/bitcoin_price_scraper/rate_limiter.ex
| 0.568296 | 0.444685 |
rate_limiter.ex
|
starcoder
|
defmodule JSON.Parser.Unicode do
@moduledoc """
Implements a JSON Unicode Parser for Bitstring values
"""
use Bitwise
@doc """
parses a valid chain of escaped unicode and returns the string representation,
plus the remainder of the string
## Examples
iex> JSON.Parser.parse ""
{:error, :unexpected_end_of_buffer}
iex> JSON.Parser.parse "face0ff"
{:error, {:unexpected_token, "face0ff"}}
iex> JSON.Parser.parse "-hello"
{:error, {:unexpected_token, "-hello"}}
"""
def parse(<<?\\, ?u, json::binary>>), do: parse_escaped_unicode_codepoint(json, 0, 0)
def parse(<<>>), do: {:error, :unexpected_end_of_buffer}
def parse(json), do: {:error, {:unexpected_token, json}}
# Parsing sugorrogate pairs
# http://unicodebook.readthedocs.org/unicode_encodings.html
# Inspired by Poison's function
defp parse_escaped_unicode_codepoint(
<<?d, hex::utf8, f1, f2, ?\\, ?u, ?d, hex2::utf8, s1, s2, json::binary>>,
_,
0
)
when hex >= 56 do
first_part = (List.to_integer([?d, hex, f1, f2], 16) &&& 1023) <<< 10
second_part = List.to_integer([?d, hex2, s1, s2], 16) &&& 1023
complete = 0x10000 + first_part + second_part
{:ok, <<complete::utf8>>, json}
end
# parse_escaped_unicode_codepoint tries to parse
# a valid hexadecimal (composed of 4 characters) value that potentially
# represents a unicode codepoint
defp parse_escaped_unicode_codepoint(json, acc, chars_parsed) when 4 === chars_parsed do
{:ok, <<acc::utf8>>, json}
end
defp parse_escaped_unicode_codepoint(<<hex::utf8, json::binary>>, acc, chars_parsed)
when hex in ?0..?9 do
parse_escaped_unicode_codepoint(json, 16 * acc + hex - ?0, chars_parsed + 1)
end
defp parse_escaped_unicode_codepoint(<<hex::utf8, json::binary>>, acc, chars_parsed)
when hex in ?a..?f do
parse_escaped_unicode_codepoint(json, 16 * acc + 10 + hex - ?a, chars_parsed + 1)
end
defp parse_escaped_unicode_codepoint(<<hex::utf8, json::binary>>, acc, chars_parsed)
when hex in ?A..?F do
parse_escaped_unicode_codepoint(json, 16 * acc + 10 + hex - ?A, chars_parsed + 1)
end
defp parse_escaped_unicode_codepoint(<<>>, _, _), do: {:error, :unexpected_end_of_buffer}
defp parse_escaped_unicode_codepoint(json, _, _), do: {:error, {:unexpected_token, json}}
end
|
node_modules/@snyk/snyk-hex-plugin/elixirsrc/deps/json/lib/json/parser/unicode.ex
| 0.81309 | 0.418103 |
unicode.ex
|
starcoder
|
defmodule Akd.Publish.Release do
@moduledoc """
A native Hook module that comes shipped with Akd.
This module uses `Akd.Hook`.
Provides a set of operations to evaluate some elixir code after publishing the release
# Options:
* `run_ensure`: `boolean`. Specifies whether to a run a command or not.
* `ignore_failure`: `boolean`. Specifies whether to continue if this hook fails.
* `eval`: `string`. Elixir code to evaluate"
# Defaults:
* `run_ensure`: `true`
* `ignore_failure`: `false`
"""
use Akd.Hook
alias Akd.{Deployment, Destination, DestinationResolver}
@default_opts [run_ensure: true, ignore_failure: false, scp_options: ""]
@doc """
Callback implementation for `get_hooks/2`.
This function returns a list of operations that can be used to publish a release
on the `publish_to` destination of a deployment.
## Examples
iex> deployment = %Akd.Deployment{mix_env: "prod",
...> build_at: Akd.Destination.local("."),
...> publish_to: Akd.Destination.local("."),
...> name: "name",
...> vsn: "0.1.1"}
iex> Akd.Publish.Release .get_hooks(deployment, [])
[%Akd.Hook{ensure: [%Akd.Operation{cmd: "rm ./name-0.1.1.tar.gz",
cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], ignore_failure: false,
main: [%Akd.Operation{cmd: "cp ./_build/prod/name-0.1.1.tar.gz .\\n",
cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], rollback: [], run_ensure: true},
%Akd.Hook{ensure: [], ignore_failure: false,
main: [%Akd.Operation{cmd: "cd .\\ntar xzf name-0.1.1.tar.gz\\n",
cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], rollback: [], run_ensure: true}]
"""
@spec get_hooks(Akd.Deployment.t(), Keyword.t()) :: list(Akd.Hook.t())
def get_hooks(deployment, opts \\ []) do
opts = uniq_merge(opts, @default_opts)
[
copy_release_hook(deployment, opts),
publish_hook(deployment, opts)
]
end
# This function takes a deployment and options and returns an Akd.Hook.t
# struct using FormHook DSL
defp copy_release_hook(deployment, opts) do
build = DestinationResolver.resolve(:build, deployment)
publish = DestinationResolver.resolve(:publish, deployment)
scp_options = Keyword.get(opts, :scp_options, false)
# Transfer between two remote servers by running the SCP command locally
scp_destination =
if build.host != publish.host do
Akd.Destination.local()
else
build
end
form_hook opts do
main(copy_rel(deployment, scp_options), scp_destination)
ensure(
"rm #{publish.path}/#{deployment.name}-#{deployment.vsn}.tar.gz",
publish
)
end
end
# This function takes a deployment and options and returns an Akd.Hook.t
# struct using FormHook DSL
defp publish_hook(deployment, opts) do
publish = DestinationResolver.resolve(:publish, deployment)
form_hook opts do
main(publish_rel(deployment), publish)
end
end
# This function returns the command to be used to copy the release from
# build to production.
# This assumes that you're running this command from the same server
defp copy_rel(
%Deployment{build_at: %Destination{host: s}, publish_to: %Destination{host: s}} =
deployment,
_scp_options
) do
"""
cp #{path_to_release(deployment.build_at.path, deployment)} #{deployment.publish_to.path}
"""
end
defp copy_rel(
%Deployment{build_at: src, publish_to: dest} = deployment,
_scp_options
) do
"""
scp #{src |> Destination.to_string() |> path_to_release(deployment)} #{
Akd.Destination.local() |> Destination.to_string()
}
scp #{Akd.Destination.local() |> Destination.to_string() |> path_to_local_release(deployment)} #{
dest |> Destination.to_string()
}
rm #{Akd.Destination.local() |> Destination.to_string() |> path_to_local_release(deployment)}
"""
end
# This function returns the command to be used to publish a release, i.e.
# uncompress the tar.gz file associated with the deployment.
defp publish_rel(deployment) do
"""
cd #{deployment.publish_to.path}
tar xzf #{deployment.name}-#{deployment.vsn}.tar.gz
"""
end
# This function returns the path to the release based on deployment name
# and mix environment.
defp path_to_release(base, deployment) do
"#{base}/_build/#{deployment.mix_env}/#{deployment.name}-#{deployment.vsn}.tar.gz"
end
defp path_to_local_release(base, deployment) do
"#{base}/#{deployment.name}-#{deployment.vsn}.tar.gz"
end
# This function takes two keyword lists and merges them keeping the keys
# unique. If there are multiple values for a key, it takes the value from
# the first value of keyword1 corresponding to that key.
defp uniq_merge(keyword1, keyword2) do
keyword2
|> Keyword.merge(keyword1)
|> Keyword.new()
end
end
|
lib/akd/base/publish/release.ex
| 0.860237 | 0.482246 |
release.ex
|
starcoder
|
defmodule Yum.Ingredient do
@moduledoc """
A struct that contains all the data about an ingredient.
"""
defstruct [
ref: nil,
translation: %{},
exclude_diet: [],
exclude_allergen: [],
nutrition: %{}
]
@type t :: %Yum.Ingredient{ ref: String.t, translation: Yum.Data.translation_tree, exclude_diet: [String.t], exclude_allergen: [String.t], nutrition: %{ optional(String.t) => any } }
@doc """
Flatten an ingredient tree into an ingredient list.
Each item in the list can be safely operated on individually, as all of the
data related to that item is inside of its struct (compared to the original
tree where other data is inferred from its parent).
"""
@spec new(Yum.Data.ingredient_tree) :: [t]
def new(data), do: Enum.reduce(data, [], &new(&1, &2, %Yum.Ingredient{}))
defp new({ key, value = %{ __info__: info } }, ingredients, group) do
ingredient = %Yum.Ingredient{
ref: "#{group.ref}/#{key}",
translation: info["translation"] || %{}
}
|> new_exclude_diet(info, group)
|> new_exclude_allergen(info, group)
|> new_nutrition(info)
[ingredient|Enum.reduce(value, ingredients, &new(&1, &2, ingredient))]
end
defp new(_, ingredients, _), do: ingredients
defp new_exclude_diet(ingredient, %{ "exclude-diet" => diets }, parent), do: %{ ingredient | exclude_diet: Enum.uniq(diets ++ parent.exclude_diet) }
defp new_exclude_diet(ingredient, _, parent), do: %{ ingredient | exclude_diet: parent.exclude_diet }
defp new_exclude_allergen(ingredient, %{ "exclude-allergen" => allergens }, parent), do: %{ ingredient | exclude_allergen: Enum.uniq(allergens ++ parent.exclude_allergen) }
defp new_exclude_allergen(ingredient, _, parent), do: %{ ingredient | exclude_allergen: parent.exclude_allergen }
defp new_nutrition(ingredient, %{ "nutrition" => nutrition }), do: %{ ingredient | nutrition: nutrition }
defp new_nutrition(ingredient, _), do: ingredient
end
|
lib/yum/ingredient.ex
| 0.762114 | 0.569224 |
ingredient.ex
|
starcoder
|
defmodule Multihash do
@moduledoc """
A [multihash](https://github.com/jbenet/multihash) implementation.
"""
defstruct algorithm: :sha512, size: 0, digest: <<>>
@type algorithm :: :sha1 | :sha256 | :sha512 | :sha3 | :blake2b | :blake2s
@type t :: %Multihash{algorithm: algorithm,
size: non_neg_integer,
digest: binary}
defp codes do
%{sha1: 0x11,
sha256: 0x12,
sha512: 0x13,
sha3: 0x14,
blake2b: 0x40,
blake2s: 0x41}
end
@doc ~S"""
Calculate a multihash given an algorithm, digest size and binary.
## Examples
iex> Multihash.hash(:sha1, "Hello")
{:ok, %Multihash{algorithm: :sha1,
size: 20,
digest: <<247, 255, 158, 139, 123, 178, 224,
155, 112, 147, 90, 93, 120, 94, 12,
197, 217, 208, 171, 240>>}}
"""
@spec hash(algorithm, iodata) :: {:ok, t} | {:error, any}
def hash(algoritm, data)
def hash(:sha1, data), do: hash(:sha1, 20, data)
def hash(:sha256, data), do: hash(:sha256, 32, data)
def hash(:sha512, data), do: hash(:sha512, 64, data)
def hash(_, _), do: {:error, "Invalid hash algorithm"}
@spec hash(algorithm, non_neg_integer, iodata) :: {:ok, t} | {:error, any}
defp hash(:sha1, 20, data) do
{:ok, %Multihash{algorithm: :sha1,
size: 20,
digest: :crypto.hash(:sha, data)}}
end
defp hash(:sha1, _, _), do: {:error, "Invalid digest length"}
defp hash(:sha256, 32, data) do
{:ok, %Multihash{algorithm: :sha256,
size: 32,
digest: :crypto.hash(:sha256, data)}}
end
defp hash(:sha256, _, _), do: {:error, "Invalid digest length"}
defp hash(:sha512, 64, data) do
{:ok, %Multihash{algorithm: :sha512,
size: 64,
digest: :crypto.hash(:sha512, data)}}
end
defp hash(:sha512, _, _), do: {:error, "Invalid digest length"}
@type mh_binary :: <<_ :: 16>>
@doc ~S"""
Encodes a Multihash as a binary.
## Examples
iex> Multihash.encode(%Multihash{algorithm: :sha1, size: 20, digest: <<247, 255, 158, 139, 123, 178, 224, 155, 112, 147, 90, 93, 120, 94, 12, 197, 217, 208, 171, 240>>})
{:ok, <<17, 20, 247, 255, 158, 139, 123, 178, 224, 155, 112, 147,
90, 93, 120, 94, 12, 197, 217, 208, 171, 240>>}
"""
@spec encode(t) :: {:ok, mh_binary} | {:error, any}
def encode(%Multihash{algorithm: algorithm, size: size, digest: digest}) do
code = Map.get(codes, algorithm)
cond do
code == nil -> {:error, "Invalid algorithm"}
size != byte_size(digest) -> {:error, "Invalid digest length"}
true -> {:ok, <<code, size>> <> digest}
end
end
@doc ~S"""
Tries to decode a multihash binary.
## Examples
iex> Multihash.decode(<<17, 20, 247, 255, 158, 139, 123, 178, 224, 155, 112, 147, 90, 93, 120, 94, 12, 197, 217, 208, 171, 240>>)
{:ok, %Multihash{algorithm: :sha1,
size: 20,
digest: <<247, 255, 158, 139, 123, 178, 224,
155, 112, 147, 90, 93, 120, 94, 12,
197, 217, 208, 171, 240>>}}
"""
@spec decode(binary) :: {:ok, t} | {:error, any}
def decode(<<code, size, digest :: binary>> = binary) do
algorithm = from_code(code)
cond do
algorithm == nil -> {:error, "Invalid algorithm code"}
size != byte_size(digest) -> {:error, "Invalid digest length"}
true-> {:ok, %Multihash{algorithm: algorithm,
size: size,
digest: digest}}
end
end
def decode(_), do: {:error, "Invalid multihash length (too short)"}
@spec from_code(integer) :: algorithm | nil
defp from_code(code) do
case Enum.find(codes, fn {_, v} -> v == code end) do
nil -> nil
{algorithm, ^code} -> algorithm
end
end
end
|
lib/multihash.ex
| 0.876032 | 0.74911 |
multihash.ex
|
starcoder
|
defmodule Flawless do
@moduledoc """
Flawless is a library meant for validating Elixir data structures. The validation
is done by providing the `validate` function with a value and a schema.
"""
alias Flawless.Error
alias Flawless.Helpers
alias Flawless.Types
alias Flawless.Rule
alias Flawless.Spec
alias Flawless.Union
alias Flawless.Utils.Enum, as: EnumUtils
import Flawless.Utils.Interpolation, only: [sigil_t: 2]
@type spec_type() ::
Flawless.Spec.t()
| Flawless.Union.t()
| map()
| list()
| tuple()
| atom()
| number()
| binary()
| function()
defmodule AnyOtherKey do
@moduledoc """
Struct for representing any non-specified key in a map schema.
"""
defstruct []
@type t() :: %__MODULE__{}
end
defmodule OptionalKey do
@moduledoc """
Struct for representing an optional key in a map schema.
"""
defstruct key: nil
@type t() :: %__MODULE__{
key: any()
}
end
defmodule Context do
@moduledoc """
Struct used internally for validation.
"""
defstruct path: [], is_optional_field: false, stop_early: false
@type t() :: %__MODULE__{
path: list(String.t()),
is_optional_field: boolean(),
stop_early: boolean()
}
@doc false
def add_to_path(context, path_element) do
%Context{context | path: context.path ++ [path_element]}
end
end
@doc """
Validates Elixir data against a schema and returns the list of errors.
## Options
- `check_schema` - (boolean) Whether or not the schema should be checked with
[`validate_schema/1`](#validate_schema/1) before validating the value. This is
useful to avoid potential exceptions or incoherent messages if the schema has
no sense, but it adds an extra processing cost. Consider disabling the option
if you can validate the schema separately and you have to validate many values
against the same schema. Defaults to `true`.
- `group_errors` - (boolean) If true, error messages associated to the same path
in the value will be grouped into a list of messages in a single `Flawless.Error`.
Defaults to `true`.
- `stop_early` - (boolean) If true, the validation will try and stop at the first
primitive element in error. It allows to potentially reduce drastically the
number of errors as well as processing time in case of large data structures, and
if you do not care about having *all* the errors at once. Defaults to `false`.
## Examples
iex> import Flawless.Helpers
iex> Flawless.validate("hello", string())
[]
iex> Flawless.validate("hello", number())
[%Flawless.Error{context: [], message: "Expected type: number, got: \\"hello\\"."}]
iex> Flawless.validate(
...> %{"name" => 1234, "age" => "Steve"},
...> %{"name" => string(), "age" => number(), "city" => string()}
...> )
[
%Flawless.Error{context: [], message: "Missing required fields: \\"city\\" (string)."},
%Flawless.Error{context: ["age"], message: "Expected type: number, got: \\"Steve\\"."},
%Flawless.Error{context: ["name"], message: "Expected type: string, got: 1234."}
]
# Stop early
iex> import Flawless.Helpers
iex> Flawless.validate(
...> %{"name" => 1234, "age" => "Steve"},
...> %{"name" => string(), "age" => number(), "city" => string()},
...> stop_early: true
...> )
[
%Flawless.Error{context: [], message: "Missing required fields: \\"city\\" (string)."}
]
"""
@spec validate(any, spec_type(), Keyword.t()) :: list(Error.t())
def validate(value, schema, opts \\ []) do
check_schema = opts |> Keyword.get(:check_schema, true)
group_errors = opts |> Keyword.get(:group_errors, true)
stop_early = opts |> Keyword.get(:stop_early, false)
context = %Context{stop_early: stop_early}
if check_schema do
case validate_schema(schema) do
[] -> do_validate(value, schema, context)
errors -> raise "Invalid schema: #{inspect(errors)}"
end
else
do_validate(value, schema, context)
end
|> Error.evaluate_messages()
|> then(fn errors ->
if group_errors, do: Error.group_by_path(errors), else: errors
end)
end
@doc """
Checks whether a schema is valid, and returns a list of errors.
"""
@spec validate_schema(any) :: list(Error.t())
def validate_schema(schema) do
do_validate(schema, Flawless.SchemaValidator.schema_schema())
|> Error.evaluate_messages()
end
defp do_validate(value, schema, context \\ %Context{}) do
errors =
case check_type_and_cast_if_needed(value, schema, context) do
{:ok, cast_value} ->
dispatch_validation(cast_value, schema, %{context | is_optional_field: false})
{:error, error} ->
[Error.new(error, context)]
end
# Handle nillable elements
errors =
case {value, nil_opt(schema), errors} do
{nil, true, _} -> []
{nil, false, _} -> [Error.new("Value cannot be nil.", context)]
{nil, :default, []} -> []
{nil, :default, _errors} when context.is_optional_field -> []
{nil, :default, errors} -> errors
_ -> errors
end
# Handle the on_error option
case {errors, schema} do
{[], _} ->
[]
{_, %Spec{on_error: on_error_message}} when not is_nil(on_error_message) ->
[Error.new(on_error_message, context)]
_ ->
errors
end
end
defp nil_opt(%Spec{nil: nil_opt}), do: nil_opt
defp nil_opt(_schema), do: :default
defp dispatch_validation(value, schema, context) do
case schema do
%Spec{for: %Spec.Value{}} -> validate_value(value, schema, context)
%Spec{for: %Spec.List{}} -> validate_list(value, schema, context)
%Union{} -> validate_union(value, schema, context)
[item_type] -> validate_list(value, Helpers.list(item_type), context)
[] -> validate_list(value, Helpers.list(Helpers.any()), context)
%Spec{for: %Spec.Tuple{}} -> validate_tuple(value, schema, context)
tuple when is_tuple(tuple) -> validate_tuple(value, Helpers.tuple(tuple), context)
%Spec{for: %Spec.Literal{}} -> validate_literal(value, schema, context)
%Spec{for: %Spec.Struct{}} -> validate_struct(value, schema, context)
%_{} -> validate_struct(value, Helpers.structure(schema), context)
%{} -> validate_map(value, schema, context)
func when is_function(func, 0) -> do_validate(value, func.(), context)
func when is_function(func, 1) -> validate_select(value, func, context)
literal when is_binary(literal) -> validate_literal(value, Helpers.literal(schema), context)
literal when is_atom(literal) -> validate_literal(value, Helpers.literal(schema), context)
literal when is_number(literal) -> validate_literal(value, Helpers.literal(schema), context)
end
end
defp check_type_and_cast_if_needed(
value,
%Spec{type: type, cast_from: cast_from, for: subspec},
_context
) do
possible_casts =
cast_from
|> List.wrap()
|> Enum.filter(fn
{type, with: _converter} -> Types.has_type?(value, type)
type when is_atom(type) -> Types.has_type?(value, type)
end)
exact_type =
case subspec do
%Spec.Struct{module: module} -> inspect(module)
_ -> type
end
cond do
Types.has_type?(value, type) ->
{:ok, value}
possible_casts != [] ->
possible_casts
|> List.first()
|> case do
{_from, with: converter} -> Types.cast_with(value, exact_type, converter)
from when is_atom(from) -> Types.cast(value, from, exact_type)
end
not match?(%Spec.Literal{}, subspec) ->
{:error, "Expected type: #{exact_type}, got: #{inspect(value)}."}
true ->
{:ok, value}
end
end
defp check_type_and_cast_if_needed(value, _schema, _context), do: {:ok, value}
defp validate_spec(
value,
%Spec{checks: checks, late_checks: late_checks},
context,
get_sub_errors
) do
[]
|> EnumUtils.maybe_add_errors(context.stop_early, fn ->
# Top-level errors
checks |> Enum.map(&Rule.evaluate(&1, value, context))
end)
|> EnumUtils.maybe_add_errors(context.stop_early, get_sub_errors)
|> EnumUtils.maybe_add_errors(true, fn ->
# Late checks
late_checks |> Enum.map(&Rule.evaluate(&1, value, context))
end)
end
defp validate_map(map, %{} = _schema, context) when is_struct(map) do
[Error.new("Expected type: map, got: struct.", context)]
end
defp validate_map(map, %{} = schema, context) when is_map(map) do
[]
|> EnumUtils.maybe_add_errors(context.stop_early, fn ->
unexpected_fields_error(map, schema, context)
end)
|> EnumUtils.maybe_add_errors(context.stop_early, fn ->
missing_fields_error(map, schema, context)
end)
|> EnumUtils.maybe_add_errors(context.stop_early, fn ->
validate_map_fields(map, schema, context)
end)
end
defp validate_map(map, _spec, context) do
[Error.invalid_type_error(:map, map, context)]
end
defp validate_map_fields(map, %{} = schema, context) do
schema
|> EnumUtils.collect_errors(context.stop_early, fn
{%AnyOtherKey{}, field} ->
unexpected_fields(map, schema)
|> EnumUtils.collect_errors(
context.stop_early,
&validate_map_field(map, &1, field, %{context | is_optional_field: true})
)
{%OptionalKey{key: field_name}, field} ->
validate_map_field(map, field_name, field, %{context | is_optional_field: true})
{field_name, field} ->
validate_map_field(map, field_name, field, context)
end)
end
defp validate_map_field(map, field_name, field_schema, context) do
case Map.fetch(map, field_name) do
:error -> []
{:ok, value} -> do_validate(value, field_schema, context |> Context.add_to_path(field_name))
end
end
defp validate_struct(
%value_module{} = struct,
%Spec{for: %Spec.Struct{module: module, schema: schema}} = spec,
context
)
when value_module == module do
validate_spec(struct, spec, context, fn ->
if schema == nil do
[]
else
validate_map(Map.from_struct(struct), Map.from_struct(schema), context)
end
end)
end
defp validate_struct(
%value_module{} = _struct,
%Spec{for: %Spec.Struct{module: module}},
context
)
when value_module != module do
[
Error.new(
{~t"Expected struct of type: %{expected_module}, got struct of type: %{actual_module}.",
expected_module: inspect(module), actual_module: inspect(value_module)},
context
)
]
end
defp validate_struct(struct, _spec, context) do
[Error.invalid_type_error(:struct, struct, context)]
end
defp validate_value(value, spec, context) do
validate_spec(value, spec, context, fn ->
case spec.for.schema do
nil -> []
schema -> validate_map(value, schema, context)
end
end)
end
defp validate_list(list, spec, context) when is_list(list) do
validate_spec(list, spec, context, fn ->
list
|> Enum.with_index()
|> EnumUtils.collect_errors(context.stop_early, fn {value, index} ->
do_validate(value, spec.for.item_type, context |> Context.add_to_path(index))
end)
end)
end
defp validate_list(list, _spec, context) do
[Error.invalid_type_error(:list, list, context)]
end
defp validate_tuple(tuple, spec, context)
when is_tuple(tuple) and tuple_size(tuple) == tuple_size(spec.for.elem_types) do
validate_spec(tuple, spec, context, fn ->
tuple
|> Tuple.to_list()
|> Enum.zip(Tuple.to_list(spec.for.elem_types))
|> Enum.with_index()
|> EnumUtils.collect_errors(
context.stop_early,
fn {{value, elem_type}, index} ->
do_validate(value, elem_type, context |> Context.add_to_path(index))
end
)
end)
end
defp validate_tuple(tuple, spec, context) when is_tuple(tuple) do
expected_size = tuple_size(spec.for.elem_types)
actual_size = tuple_size(tuple)
[
Error.new(
{~t"Invalid tuple size (expected: %{expected_size}, received: %{actual_size}).",
expected_size: expected_size, actual_size: actual_size},
context
)
]
end
defp validate_tuple(value, _spec, context) do
[Error.invalid_type_error(:tuple, value, context)]
end
defp validate_select(value, func_spec, context) do
do_validate(value, func_spec.(value), context)
rescue
_e in FunctionClauseError ->
[Error.new("Value does not match any of the possible schemas.", context)]
end
defp validate_literal(value, spec, context) do
if value == spec.for.value do
[]
else
[
Error.new(
{~t"Expected literal value %{expected_value}, got: %{value}.",
expected_value: inspect(spec.for.value), value: inspect(value)},
context
)
]
end
end
defp validate_union(value, %Union{schemas: schemas}, context) do
schemas
|> Enum.reduce_while([], fn schema, errors ->
new_errors = do_validate(value, schema, context)
if new_errors == [] do
{:halt, []}
else
{:cont, [new_errors | errors]}
end
end)
|> case do
[] ->
[]
errors ->
schemas
|> Enum.map(&check_type_and_cast_if_needed(value, &1, context))
|> Enum.zip(Enum.reverse(errors))
|> Enum.reject(fn result -> match?({{:error, _}, _}, result) end)
|> case do
[{{:ok, _}, specific_errors}] ->
specific_errors
_ ->
schemas_types = schemas |> Enum.map(&type_of_schema/1) |> Enum.uniq()
[
Error.new(
"The value does not match any schema in the union. Possible types: #{inspect(schemas_types)}.",
context
)
]
end
end
end
defp unexpected_fields(map, schema) do
keys_from_schema =
schema
|> Map.keys()
|> Enum.map(fn
%OptionalKey{key: key} -> key
key -> key
end)
Map.keys(map) -- keys_from_schema
end
defp unexpected_fields_error(_map, %{%AnyOtherKey{} => _}, _context), do: []
defp unexpected_fields_error(map, schema, context) do
unexpected_fields = unexpected_fields(map, schema)
if unexpected_fields == [] do
[]
else
[
Error.new(
{~t"Unexpected fields: %{unexpected_fields}.",
unexpected_fields: inspect(unexpected_fields)},
context
)
]
end
end
defp missing_fields(map, schema) do
schema
|> Enum.reject(fn {key, _} ->
match?(%OptionalKey{}, key) or match?(%AnyOtherKey{}, key)
end)
|> Enum.filter(fn {field_name, _field} ->
not (map |> Map.has_key?(field_name))
end)
|> Enum.map(fn {field_name, _} -> field_name end)
end
defp missing_fields_error(map, schema, context) do
missing_fields = missing_fields(map, schema)
if missing_fields == [] do
[]
else
[
Error.new(
{~t"Missing required fields: %{missing_fields}.",
missing_fields: show_fields_with_type(schema, missing_fields)},
context
)
]
end
end
defp show_fields_with_type(schema, fields) do
fields
|> Enum.map(fn field ->
type =
schema
|> Map.get(field)
|> type_of_schema()
if type, do: "#{inspect(field)} (#{type})", else: inspect(field)
end)
|> Enum.join(", ")
end
defp type_of_schema(%Spec{type: type}), do: type
defp type_of_schema(schema) when is_function(schema), do: nil
defp type_of_schema(schema), do: Types.type_of(schema)
end
|
lib/flawless.ex
| 0.925129 | 0.583975 |
flawless.ex
|
starcoder
|
defmodule Mix.Releases.Shell do
@moduledoc """
This module provides conveniences for writing output to the shell.
"""
use Mix.Releases.Shell.Macros
@type verbosity :: :silent | :quiet | :normal | :verbose
# The order of these levels is from least important to most important
# When comparing log levels with `gte`, this ordering is what determines their total ordering
deflevel(:debug, prefix: "==> ", color: :cyan)
deflevel(:info, prefix: "==> ", color: [IO.ANSI.bright(), IO.ANSI.cyan()])
deflevel(:notice, color: :yellow)
deflevel(:success, prefix: "==> ", color: [IO.ANSI.bright(), IO.ANSI.green()])
deflevel(:warn, prefix: "==> ", color: :yellow, error: :warnings_as_errors)
deflevel(:error, prefix: "==> ", color: :red)
@doc """
Configure the logging verbosity of the release logger.
Valid verbosity settings are:
* `:silent` - no output except errors
* `:quiet` - no output except warnings/errors
* `:normal` - no debug output (default)
* `:verbose` - all output
"""
@spec configure(verbosity) :: :ok
def configure(verbosity) when is_atom(verbosity) do
Application.put_env(:mix, :release_logger_verbosity, verbosity)
end
@default_answer_pattern ~r/^(y(es)?)?$/i
@doc """
Ask the user to confirm an action using the given message.
The confirmation prompt will default to "[Yn]: ", and the
regex for determining whether the action was confirmed will
default to #{inspect(Regex.source(@default_answer_pattern))}.
Use confirm/3 to provide your own prompt and answer regex.
"""
@spec confirm?(String.t()) :: boolean
def confirm?(message) do
confirm?(message, "[Yn]: ", @default_answer_pattern)
end
@doc """
Same as confirm/1, but takes a custom prompt and answer regex pattern.
If the pattern matches the response, the action is considered confirmed.
"""
@spec confirm?(String.t(), String.t(), Regex.t()) :: boolean
def confirm?(message, prompt, answer_pattern) do
IO.puts(IO.ANSI.yellow())
answer = IO.gets("#{message} #{prompt}") |> String.trim_trailing("\n")
IO.puts(IO.ANSI.reset())
answer =~ answer_pattern
end
@doc """
Prints an error message, then terminates the VM with a non-zero status code
"""
def fail!(message) do
error(message)
System.halt(1)
end
@doc "Write the given iodata directly, bypassing the log level"
def write(message),
do: IO.write(message)
@doc "Write the given iodata, wrapped in the given color, but bypassing the log level"
def writef(message, color),
do: write(colorf(message, color))
@doc "Write a debug level message, but with minimal formatting. Default color is same as debug level"
def debugf(message, color \\ :cyan) do
data = verbosityf(:debug, colorf(message, color))
IO.write(data)
end
## Color helpers
# Formats a message with a given color
# Can use shorthand atoms for colors, or pass the ANSI directly
@doc """
Wraps a message in the given color
"""
def colorf(message, color), do: IO.ANSI.format([to_ansi(color), message, IO.ANSI.reset()])
# Map shorthand atoms to ANSI escapes
defp to_ansi(:cyan), do: IO.ANSI.cyan()
defp to_ansi(:green), do: IO.ANSI.green()
defp to_ansi(:yellow), do: IO.ANSI.yellow()
defp to_ansi(:red), do: IO.ANSI.red()
defp to_ansi(:magenta), do: IO.ANSI.magenta()
defp to_ansi(:blue), do: IO.ANSI.blue()
defp to_ansi(:normal), do: IO.ANSI.normal()
defp to_ansi(:white), do: IO.ANSI.white()
# For when we've already mapped the color
defp to_ansi(c) when not is_atom(c), do: c
end
|
lib/mix/lib/releases/shell.ex
| 0.819857 | 0.482673 |
shell.ex
|
starcoder
|
defmodule Map do
@moduledoc """
A set of functions for working with maps.
Maps are the "go to" key-value data structure in Elixir. Maps can be created
with the `%{}` syntax, and key-value pairs can be expressed as `key => value`:
iex> %{}
%{}
iex> %{"one" => :two, 3 => "four"}
%{3 => "four", "one" => :two}
Key-value pairs in a map do not follow any order (that's why the printed map
in the example above has a different order than the map that was created).
Maps do not impose any restriction on the key type: anything can be a key in a
map. As a key-value structure, maps do not allow duplicated keys; keys are
compared using the exact-equality operator (`===`). If colliding keys are defined
in a map literal, the last one prevails.
When the key in a key-value pair is an atom, the `key: value` shorthand syntax
can be used (as in many other special forms), provided key-value pairs are put at
the end:
iex> %{"hello" => "world", a: 1, b: 2}
%{:a => 1, :b => 2, "hello" => "world"}
Keys in maps can be accessed through some of the functions in this module
(such as `Map.get/3` or `Map.fetch/2`) or through the `[]` syntax provided by
the `Access` module:
iex> map = %{a: 1, b: 2}
iex> Map.fetch(map, :a)
{:ok, 1}
iex> map[:b]
2
iex> map["non_existing_key"]
nil
The alternative access syntax `map.key` is provided alongside `[]` when the
map has a `:key` key; note that while `map[key]` will return `nil` if `map`
doesn't contain the key `key`, `map.key` will raise if `map` doesn't contain
the key `:key`.
iex> map = %{foo: "bar", baz: "bong"}
iex> map.foo
"bar"
iex> map.non_existing_key
** (KeyError) key :non_existing_key not found in: %{baz: "bong", foo: "bar"}
Maps can be pattern matched on; when a map is on the left-hand side of a
pattern match, it will match if the map on the right-hand side contains the
keys on the left-hand side and their values match the ones on the left-hand
side. This means that an empty map matches every map.
iex> %{} = %{foo: "bar"}
%{foo: "bar"}
iex> %{a: a} = %{:a => 1, "b" => 2, [:c, :e, :e] => 3}
iex> a
1
iex> %{:c => 3} = %{:a => 1, 2 => :b}
** (MatchError) no match of right hand side value: %{2 => :b, :a => 1}
Variables can be used as map keys both when writing map literals as well as
when matching:
iex> n = 1
1
iex> %{n => :one}
%{1 => :one}
iex> %{^n => :one} = %{1 => :one, 2 => :two, 3 => :three}
%{1 => :one, 2 => :two, 3 => :three}
Maps also support a specific update syntax to update the value stored under
*existing* atom keys:
iex> map = %{one: 1, two: 2}
iex> %{map | one: "one"}
%{one: "one", two: 2}
iex> %{map | three: 3}
** (KeyError) key :three not found
## Modules to work with maps
This module aims to provide functions that perform operations specific to maps
(like accessing keys, updating values, and so on). For traversing maps as
collections, developers should use the `Enum` module that works across a
variety of data types.
The `Kernel` module also provides a few functions to work with maps: for
example, `Kernel.map_size/1` to know the number of key-value pairs in a map or
`Kernel.is_map/1` to know if a term is a map.
"""
@type key :: any
@type value :: any
@compile {:inline, fetch: 2, put: 3, delete: 2, has_key?: 2}
@doc """
Returns all keys from `map`.
## Examples
iex> Map.keys(%{a: 1, b: 2})
[:a, :b]
"""
@spec keys(map) :: [key]
defdelegate keys(map), to: :maps
@doc """
Returns all values from `map`.
## Examples
iex> Map.values(%{a: 1, b: 2})
[1, 2]
"""
@spec values(map) :: [value]
defdelegate values(map), to: :maps
@doc """
Converts `map` to a list.
Each key-value pair in the map is converted to a two-element tuple `{key,
value}` in the resulting list.
## Examples
iex> Map.to_list(%{a: 1})
[a: 1]
iex> Map.to_list(%{1 => 2})
[{1, 2}]
"""
@spec to_list(map) :: [{term, term}]
defdelegate to_list(map), to: :maps
@doc """
Returns a new empty map.
## Examples
iex> Map.new
%{}
"""
@spec new :: map
def new, do: %{}
@doc """
Creates a map from an `enumerable`.
Duplicated keys are removed; the latest one prevails.
## Examples
iex> Map.new([{:b, 1}, {:a, 2}])
%{a: 2, b: 1}
iex> Map.new([a: 1, a: 2, a: 3])
%{a: 3}
"""
@spec new(Enumerable.t) :: map
def new(enumerable)
def new(%{__struct__: _} = struct), do: new_from_enum(struct)
def new(%{} = map), do: map
def new(enum), do: new_from_enum(enum)
defp new_from_enum(enumerable) do
enumerable
|> Enum.to_list
|> :maps.from_list
end
@doc """
Creates a map from an `enumerable` via the given transformation function.
Duplicated keys are removed; the latest one prevails.
## Examples
iex> Map.new([:a, :b], fn x -> {x, x} end)
%{a: :a, b: :b}
"""
@spec new(Enumerable.t, (term -> {key, value})) :: map
def new(enumerable, transform) when is_function(transform, 1) do
enumerable
|> Enum.to_list
|> new_transform(transform, [])
end
defp new_transform([], _fun, acc) do
acc
|> :lists.reverse
|> :maps.from_list
end
defp new_transform([item | rest], fun, acc) do
new_transform(rest, fun, [fun.(item) | acc])
end
@doc """
Returns whether the given `key` exists in the given `map`.
## Examples
iex> Map.has_key?(%{a: 1}, :a)
true
iex> Map.has_key?(%{a: 1}, :b)
false
"""
@spec has_key?(map, key) :: boolean
def has_key?(map, key), do: :maps.is_key(key, map)
@doc """
Fetches the value for a specific `key` in the given `map`.
If `map` contains the given `key` with value `value`, then `{:ok, value}` is
returned. If `map` doesn't contain `key`, `:error` is returned.
## Examples
iex> Map.fetch(%{a: 1}, :a)
{:ok, 1}
iex> Map.fetch(%{a: 1}, :b)
:error
"""
@spec fetch(map, key) :: {:ok, value} | :error
def fetch(map, key), do: :maps.find(key, map)
@doc """
Fetches the value for a specific `key` in the given `map`, erroring out if
`map` doesn't contain `key`.
If `map` contains the given `key`, the corresponding value is returned. If
`map` doesn't contain `key`, a `KeyError` exception is raised.
## Examples
iex> Map.fetch!(%{a: 1}, :a)
1
iex> Map.fetch!(%{a: 1}, :b)
** (KeyError) key :b not found in: %{a: 1}
"""
@spec fetch!(map, key) :: value | no_return
def fetch!(map, key) do
case fetch(map, key) do
{:ok, value} -> value
:error -> raise KeyError, key: key, term: map
end
end
@doc """
Puts the given `value` under `key` unless the entry `key`
already exists in `map`.
## Examples
iex> Map.put_new(%{a: 1}, :b, 2)
%{b: 2, a: 1}
iex> Map.put_new(%{a: 1, b: 2}, :a, 3)
%{a: 1, b: 2}
"""
@spec put_new(map, key, value) :: map
def put_new(map, key, value) do
case has_key?(map, key) do
true -> map
false -> put(map, key, value)
end
end
@doc """
Evaluates `fun` and puts the result under `key`
in `map` unless `key` is already present.
This function is useful in case you want to compute the value to put under
`key` only if `key` is not already present (e.g., the value is expensive to
calculate or generally difficult to setup and teardown again).
## Examples
iex> map = %{a: 1}
iex> fun = fn ->
...> # some expensive operation here
...> 3
...> end
iex> Map.put_new_lazy(map, :a, fun)
%{a: 1}
iex> Map.put_new_lazy(map, :b, fun)
%{a: 1, b: 3}
"""
@spec put_new_lazy(map, key, (() -> value)) :: map
def put_new_lazy(map, key, fun) when is_function(fun, 0) do
case has_key?(map, key) do
true -> map
false -> put(map, key, fun.())
end
end
@doc """
Returns a new map with all the key-value pairs in `map` where the key
is in `keys`.
If `keys` contains keys that are not in `map`, they're simply ignored.
## Examples
iex> Map.take(%{a: 1, b: 2, c: 3}, [:a, :c, :e])
%{a: 1, c: 3}
"""
@spec take(map, Enumerable.t) :: map
def take(map, keys)
def take(map, keys) when is_map(map) do
keys
|> Enum.to_list
|> do_take(map, [])
end
def take(non_map, _keys) do
:erlang.error({:badmap, non_map})
end
defp do_take([], _map, acc), do: :maps.from_list(acc)
defp do_take([key | rest], map, acc) do
acc = case fetch(map, key) do
{:ok, value} -> [{key, value} | acc]
:error -> acc
end
do_take(rest, map, acc)
end
@doc """
Gets the value for a specific `key` in `map`.
If `key` is present in `map` with value `value`, then `value` is
returned. Otherwise, `default` is returned (which is `nil` unless
specified otherwise).
## Examples
iex> Map.get(%{}, :a)
nil
iex> Map.get(%{a: 1}, :a)
1
iex> Map.get(%{a: 1}, :b)
nil
iex> Map.get(%{a: 1}, :b, 3)
3
"""
@spec get(map, key) :: value
@spec get(map, key, value) :: value
def get(map, key, default \\ nil) do
case fetch(map, key) do
{:ok, value} -> value
:error -> default
end
end
@doc """
Gets the value for a specific `key` in `map`.
If `key` is present in `map` with value `value`, then `value` is
returned. Otherwise, `fun` is evaluated and its result is returned.
This is useful if the default value is very expensive to calculate or
generally difficult to setup and teardown again.
## Examples
iex> map = %{a: 1}
iex> fun = fn ->
...> # some expensive operation here
...> 13
...> end
iex> Map.get_lazy(map, :a, fun)
1
iex> Map.get_lazy(map, :b, fun)
13
"""
@spec get_lazy(map, key, (() -> value)) :: value
def get_lazy(map, key, fun) when is_function(fun, 0) do
case fetch(map, key) do
{:ok, value} -> value
:error -> fun.()
end
end
@doc """
Puts the given `value` under `key` in `map`.
## Examples
iex> Map.put(%{a: 1}, :b, 2)
%{a: 1, b: 2}
iex> Map.put(%{a: 1, b: 2}, :a, 3)
%{a: 3, b: 2}
"""
@spec put(map, key, value) :: map
def put(map, key, value) do
:maps.put(key, value, map)
end
@doc """
Deletes the entry in `map` for a specific `key`.
If the `key` does not exist, returns `map` unchanged.
## Examples
iex> Map.delete(%{a: 1, b: 2}, :a)
%{b: 2}
iex> Map.delete(%{b: 2}, :a)
%{b: 2}
"""
@spec delete(map, key) :: map
def delete(map, key), do: :maps.remove(key, map)
@doc """
Merges two maps into one.
All keys in `map2` will be added to `map1`, overriding any existing one
(i.e., the keys in `map2` "have precedence" over the ones in `map1`).
If you have a struct and you would like to merge a set of keys into the
struct, do not use this function, as it would merge all keys on the right
side into the struct, even if the key is not part of the struct. Instead,
use `Kernel.struct/2`.
## Examples
iex> Map.merge(%{a: 1, b: 2}, %{a: 3, d: 4})
%{a: 3, b: 2, d: 4}
"""
@spec merge(map, map) :: map
defdelegate merge(map1, map2), to: :maps
@doc """
Merges two maps into one, resolving conflicts through the given `callback`.
All keys in `map2` will be added to `map1`. The given function will be invoked
when there are duplicate keys; its arguments are `key` (the duplicate key),
`value1` (the value of `key` in `map1`), and `value2` (the value of `key` in
`map2`). The value returned by `callback` is used as the value under `key` in
the resulting map.
## Examples
iex> Map.merge(%{a: 1, b: 2}, %{a: 3, d: 4}, fn _k, v1, v2 ->
...> v1 + v2
...> end)
%{a: 4, b: 2, d: 4}
"""
@spec merge(map, map, (key, value, value -> value)) :: map
def merge(map1, map2, callback) when is_function(callback, 3) do
:maps.fold fn k, v2, acc ->
update(acc, k, v2, fn(v1) -> callback.(k, v1, v2) end)
end, map1, map2
end
@doc """
Updates the `key` in `map` with the given function.
If `key` is present in `map` with value `value`, `fun` is invoked with
argument `value` and its result is used as the new value of `key`. If `key` is
not present in `map`, `initial` is inserted as the value of `key`.
## Examples
iex> Map.update(%{a: 1}, :a, 13, &(&1 * 2))
%{a: 2}
iex> Map.update(%{a: 1}, :b, 11, &(&1 * 2))
%{a: 1, b: 11}
"""
@spec update(map, key, value, (value -> value)) :: map
def update(map, key, initial, fun) when is_function(fun, 1) do
case fetch(map, key) do
{:ok, value} ->
put(map, key, fun.(value))
:error ->
put(map, key, initial)
end
end
@doc """
Returns and removes the value associated with `key` in `map`.
If `key` is present in `map` with value `value`, `{value, new_map}` is
returned where `new_map` is the result of removing `key` from `map`. If `key`
is not present in `map`, `{default, map}` is returned.
## Examples
iex> Map.pop(%{a: 1}, :a)
{1, %{}}
iex> Map.pop(%{a: 1}, :b)
{nil, %{a: 1}}
iex> Map.pop(%{a: 1}, :b, 3)
{3, %{a: 1}}
"""
@spec pop(map, key, value) :: {value, map}
def pop(map, key, default \\ nil) do
case map do
%{^key => value} -> {value, delete(map, key)}
%{} -> {default, map}
end
end
@doc """
Lazily returns and removes the value associated with `key` in `map`.
If `key` is present in `map` with value `value`, `{value, new_map}` is
returned where `new_map` is the result of removing `key` from `map`. If `key`
is not present in `map`, `{fun_result, map}` is returned, where `fun_result`
is the result of applying `fun`.
This is useful if the default value is very expensive to calculate or
generally difficult to setup and teardown again.
## Examples
iex> map = %{a: 1}
iex> fun = fn ->
...> # some expensive operation here
...> 13
...> end
iex> Map.pop_lazy(map, :a, fun)
{1, %{}}
iex> Map.pop_lazy(map, :b, fun)
{13, %{a: 1}}
"""
@spec pop_lazy(map, key, (() -> value)) :: {value, map}
def pop_lazy(map, key, fun) when is_function(fun, 0) do
case fetch(map, key) do
{:ok, value} -> {value, delete(map, key)}
:error -> {fun.(), map}
end
end
@doc """
Drops the given `keys` from `map`.
If `keys` contains keys that are not in `map`, they're simply ignored.
## Examples
iex> Map.drop(%{a: 1, b: 2, c: 3}, [:b, :d])
%{a: 1, c: 3}
"""
@spec drop(map, Enumerable.t) :: map
def drop(map, keys)
def drop(map, keys) when is_map(map) do
keys
|> Enum.to_list
|> drop_list(map)
end
def drop(non_map, _keys) do
:erlang.error({:badmap, non_map})
end
defp drop_list([], acc), do: acc
defp drop_list([key | rest], acc) do
drop_list(rest, delete(acc, key))
end
@doc """
Takes all entries corresponding to the given `keys` in `maps` and extracts
them into a separate map.
Returns a tuple with the new map and the old map with removed keys.
Keys for which there are no entries in `map` are ignored.
## Examples
iex> Map.split(%{a: 1, b: 2, c: 3}, [:a, :c, :e])
{%{a: 1, c: 3}, %{b: 2}}
"""
@spec split(map, Enumerable.t) :: {map, map}
def split(map, keys)
def split(map, keys) when is_map(map) do
keys
|> Enum.to_list
|> do_split([], map)
end
def split(non_map, _keys) do
:erlang.error({:badmap, non_map})
end
defp do_split([], inc, exc) do
{:maps.from_list(inc), exc}
end
defp do_split([key | rest], inc, exc) do
case fetch(exc, key) do
{:ok, value} ->
do_split(rest, [{key, value} | inc], delete(exc, key))
:error ->
do_split(rest, inc, exc)
end
end
@doc """
Updates `key` with the given function.
If `key` is present in `map` with value `value`, `fun` is invoked with
argument `value` and its result is used as the new value of `key`. If `key` is
not present in `map`, a `KeyError` exception is raised.
## Examples
iex> Map.update!(%{a: 1}, :a, &(&1 * 2))
%{a: 2}
iex> Map.update!(%{a: 1}, :b, &(&1 * 2))
** (KeyError) key :b not found in: %{a: 1}
"""
@spec update!(map, key, (value -> value)) :: map | no_return
def update!(%{} = map, key, fun) when is_function(fun, 1) do
case fetch(map, key) do
{:ok, value} ->
put(map, key, fun.(value))
:error ->
raise KeyError, term: map, key: key
end
end
def update!(map, _key, _fun), do: :erlang.error({:badmap, map})
@doc """
Gets the value from `key` and updates it, all in one pass.
`fun` is called with the current value under `key` in `map` (or `nil` if `key`
is not present in `map`) and must return a two-element tuple: the "get" value
(the retrieved value, which can be operated on before being returned) and the
new value to be stored under `key` in the resulting new map. `fun` may also
return `:pop`, which means the current value shall be removed from `map` and
returned (making this function behave like `Map.pop(map, key)`.
The returned value is a tuple with the "get" value returned by
`fun` and a new map with the updated value under `key`.
## Examples
iex> Map.get_and_update(%{a: 1}, :a, fn current_value ->
...> {current_value, "new value!"}
...> end)
{1, %{a: "new value!"}}
iex> Map.get_and_update(%{a: 1}, :b, fn current_value ->
...> {current_value, "new value!"}
...> end)
{nil, %{b: "new value!", a: 1}}
iex> Map.get_and_update(%{a: 1}, :a, fn _ -> :pop end)
{1, %{}}
iex> Map.get_and_update(%{a: 1}, :b, fn _ -> :pop end)
{nil, %{a: 1}}
"""
@spec get_and_update(map, key, (value -> {get, value} | :pop)) :: {get, map} when get: term
def get_and_update(%{} = map, key, fun) when is_function(fun, 1) do
current =
case :maps.find(key, map) do
{:ok, value} -> value
:error -> nil
end
case fun.(current) do
{get, update} ->
{get, :maps.put(key, update, map)}
:pop ->
{current, :maps.remove(key, map)}
other ->
raise "the given function must return a two-element tuple or :pop, got: #{inspect(other)}"
end
end
def get_and_update(map, _key, _fun), do: :erlang.error({:badmap, map})
@doc """
Gets the value from `key` and updates it. Raises if there is no `key`.
Behaves exactly like `get_and_update/3`, but raises a `KeyError` exception if
`key` is not present in `map`.
## Examples
iex> Map.get_and_update!(%{a: 1}, :a, fn current_value ->
...> {current_value, "new value!"}
...> end)
{1, %{a: "new value!"}}
iex> Map.get_and_update!(%{a: 1}, :b, fn current_value ->
...> {current_value, "new value!"}
...> end)
** (KeyError) key :b not found in: %{a: 1}
iex> Map.get_and_update!(%{a: 1}, :a, fn _ ->
...> :pop
...> end)
{1, %{}}
"""
@spec get_and_update!(map, key, (value -> {get, value})) :: {get, map} | no_return when get: term
def get_and_update!(%{} = map, key, fun) when is_function(fun, 1) do
case :maps.find(key, map) do
{:ok, value} ->
case fun.(value) do
{get, update} ->
{get, :maps.put(key, update, map)}
:pop ->
{value, :maps.remove(key, map)}
other ->
raise "the given function must return a two-element tuple or :pop, got: #{inspect(other)}"
end
:error ->
raise KeyError, term: map, key: key
end
end
def get_and_update!(map, _key, _fun), do: :erlang.error({:badmap, map})
@doc """
Converts a `struct` to map.
It accepts the struct module or a struct itself and
simply removes the `__struct__` field from the given struct
or from a new struct generated from the given module.
## Example
defmodule User do
defstruct [:name]
end
Map.from_struct(User)
#=> %{name: nil}
Map.from_struct(%User{name: "john"})
#=> %{name: "john"}
"""
@spec from_struct(atom | struct) :: map
def from_struct(struct) when is_atom(struct) do
:maps.remove(:__struct__, struct.__struct__)
end
def from_struct(%{__struct__: _} = struct) do
:maps.remove(:__struct__, struct)
end
@doc """
Checks if two maps are equal.
Two maps are considered to be equal if they contain
the same keys and those keys contain the same values.
## Examples
iex> Map.equal?(%{a: 1, b: 2}, %{b: 2, a: 1})
true
iex> Map.equal?(%{a: 1, b: 2}, %{b: 1, a: 2})
false
"""
@spec equal?(map, map) :: boolean
def equal?(%{} = map1, %{} = map2), do: map1 === map2
@doc false
# TODO: Remove on 2.0
def size(map) do
IO.warn "Map.size/1 is deprecated, please use Kernel.map_size/1"
map_size(map)
end
end
|
lib/elixir/lib/map.ex
| 0.924637 | 0.730794 |
map.ex
|
starcoder
|
defmodule Plug.Parsers do
defmodule RequestTooLargeError do
@moduledoc """
Error raised when the request is too large.
"""
defexception message: "the request is too large. If you are willing to process " <>
"larger requests, please give a :length to Plug.Parsers",
plug_status: 413
end
defmodule UnsupportedMediaTypeError do
@moduledoc """
Error raised when the request body cannot be parsed.
"""
defexception media_type: nil, plug_status: 415
def message(exception) do
"unsupported media type #{exception.media_type}"
end
end
defmodule BadEncodingError do
@moduledoc """
Raised when the request body contains bad encoding.
"""
defexception message: nil, plug_status: 415
end
defmodule ParseError do
@moduledoc """
Error raised when the request body is malformed.
"""
defexception exception: nil, plug_status: 400
def message(%{exception: exception}) do
"malformed request, a #{inspect exception.__struct__} exception was raised " <>
"with message #{inspect(Exception.message(exception))}"
end
end
@moduledoc """
A plug for parsing the request body.
This module also specifies a behaviour that all the parsers to be used with
Plug should adopt.
## Options
* `:parsers` - a list of modules to be invoked for parsing.
These modules need to implement the behaviour outlined in
this module.
* `:pass` - an optional list of MIME type strings that are allowed
to pass through. Any mime not handled by a parser and not explicitly
listed in `:pass` will `raise UnsupportedMediaTypeError`. For example:
* `["*/*"]` - never raises
* `["text/html", "application/*"]` - doesn't raise for those values
* `[]` - always raises (default)
All options supported by `Plug.Conn.read_body/2` are also supported here (for
example the `:length` option which specifies the max body length to read) and
are passed to the underlying call to `Plug.Conn.read_body/1`.
This plug also fetches query params in the connection through
`Plug.Conn.fetch_query_params/2`.
Once a connection goes through this plug, it will have `:body_params` set to
the map of params parsed by one of the parsers listed in `:parsers` and
`:params` set to the result of merging the `:body_params` and `:query_params`.
This plug will raise `Plug.Parsers.UnsupportedMediaTypeError` by default if
the request cannot be parsed by any of the given types and the MIME type has
not been explicity accepted with the `:pass` option.
`Plug.Parsers.RequestTooLargeError` will be raised if the request goes over
the given limit.
Parsers may raise a `Plug.Parsers.ParseError` if the request has a malformed
body.
This plug only parses the body if the request method is one of the following:
* `POST`
* `PUT`
* `PATCH`
* `DELETE`
For requests with a different request method, this plug will only fetch the
query params.
## Examples
plug Plug.Parsers, parsers: [:urlencoded, :multipart]
plug Plug.Parsers, parsers: [:urlencoded, :json],
pass: ["text/*"],
json_decoder: Poison
## Built-in parsers
Plug ships with the following parsers:
* `Plug.Parsers.URLENCODED` - parses `application/x-www-form-urlencoded`
requests (can be used as `:urlencoded` as well in the `:parsers` option)
* `Plug.Parsers.MULTIPART` - parses `multipart/form-data` and
`multipart/mixed` requests (can be used as `:multipart` as well in the
`:parsers` option)
* `Plug.Parsers.JSON` - parses `application/json` requests with the given
`:json_decoder` (can be used as `:json` as well in the `:parsers` option)
## File handling
If a file is uploaded via any of the parsers, Plug will
stream the uploaded contents to a file in a temporary directory in order to
avoid loading the whole file into memory. For such, the `:plug` application
needs to be started in order for file uploads to work. More details on how the
uploaded file is handled can be found in the documentation for `Plug.Upload`.
When a file is uploaded, the request parameter that identifies that file will
be a `Plug.Upload` struct with information about the uploaded file (e.g.
filename and content type) and about where the file is stored.
The temporary directory where files are streamed to can be customized by
setting the `PLUG_TMPDIR` environment variable on the host system. If
`PLUG_TMPDIR` isn't set, Plug will look at some environment
variables which usually hold the value of the system's temporary directory
(like `TMPDIR` or `TMP`). If no value is found in any of those variables,
`/tmp` is used as a default.
"""
alias Plug.Conn
@doc """
Attempts to parse the connection's request body given the content-type type and
subtype and the headers.
The arguments are:
* the `Plug.Conn` connection
* `type`, the content-type type (e.g., `"x-sample"` for the
`"x-sample/json"` content-type)
* `subtype`, the content-type subtype (e.g., `"json"` for the
`"x-sample/json"` content-type)
* `opts`, the list of options passed to the `Plug.Parsers` plug
This function should return:
* `{:ok, body_params, conn}` if the parser is able to handle the given
content-type; `body_params` should be a map
* `{:next, conn}` if the next parser should be invoked
* `{:error, :too_large, conn}` if the request goes over the given limit
"""
@callback parse(conn :: Conn.t, type :: binary, subtype :: binary,
headers :: Keyword.t, opts :: Keyword.t) ::
{:ok, Conn.params, Conn.t} |
{:error, :too_large, Conn.t} |
{:next, Conn.t}
@behaviour Plug
@methods ~w(POST PUT PATCH DELETE)
def init(opts) do
parsers = Keyword.get(opts, :parsers) || raise_missing_parsers()
opts
|> Keyword.put(:parsers, convert_parsers(parsers))
|> Keyword.put_new(:length, 8_000_000)
|> Keyword.put_new(:pass, [])
end
defp raise_missing_parsers do
raise ArgumentError, "Plug.Parsers expects a set of parsers to be given in :parsers"
end
defp convert_parsers(parsers) do
for parser <- parsers do
case Atom.to_string(parser) do
"Elixir." <> _ -> parser
reference -> Module.concat(Plug.Parsers, String.upcase(reference))
end
end
end
def call(%{req_headers: req_headers, method: method,
body_params: %Plug.Conn.Unfetched{}} = conn, opts) when method in @methods do
conn = Conn.fetch_query_params(conn)
case List.keyfind(req_headers, "content-type", 0) do
{"content-type", ct} ->
case Conn.Utils.content_type(ct) do
{:ok, type, subtype, headers} ->
reduce(conn, Keyword.fetch!(opts, :parsers), type, subtype, headers, opts)
:error ->
merge_params(conn, %{})
end
nil ->
merge_params(conn, %{})
end
end
def call(%{body_params: body_params} = conn, _opts) do
merge_params(conn, make_empty_if_unfetched(body_params))
end
defp reduce(conn, [h|t], type, subtype, headers, opts) do
case h.parse(conn, type, subtype, headers, opts) do
{:ok, body, conn} ->
merge_params(conn, body)
{:next, conn} ->
reduce(conn, t, type, subtype, headers, opts)
{:error, :too_large, _conn} ->
raise RequestTooLargeError
end
end
defp reduce(conn, [], type, subtype, _headers, opts) do
ensure_accepted_mimes(conn, type, subtype, Keyword.fetch!(opts, :pass))
end
defp ensure_accepted_mimes(conn, _type, _subtype, ["*/*"]), do: conn
defp ensure_accepted_mimes(conn, type, subtype, pass) do
if "#{type}/#{subtype}" in pass || "#{type}/*" in pass do
conn
else
raise UnsupportedMediaTypeError, media_type: "#{type}/#{subtype}"
end
end
defp merge_params(%{params: params, path_params: path_params} = conn, body_params) do
params = make_empty_if_unfetched(params)
query_params = fetch_query_params(conn)
params = query_params |> Map.merge(params) |> Map.merge(body_params) |> Map.merge(path_params)
%{conn | params: params, query_params: query_params, body_params: body_params}
end
defp fetch_query_params(%{query_params: %Plug.Conn.Unfetched{}, query_string: query_string}) do
Plug.Conn.Utils.validate_utf8!(query_string, InvalidQueryError, "query string")
Plug.Conn.Query.decode(query_string)
end
defp fetch_query_params(%{query_params: query_params}) do
query_params
end
defp make_empty_if_unfetched(%Plug.Conn.Unfetched{}), do: %{}
defp make_empty_if_unfetched(params), do: params
end
|
lib/plug/parsers.ex
| 0.851274 | 0.414958 |
parsers.ex
|
starcoder
|
defmodule Speakeasy.LoadResource do
@moduledoc """
Loads a resource into the speakeasy context:
```
%Absinthe.Resolution{context: %{speakeasy: %Speakeasy.Context{resource: your_resource}}}
```
See the [README](readme.html) for a complete example in a Absinthe Schema.
"""
@behaviour Absinthe.Middleware
defmodule UnexpectedLoadingResponse do
defexception [:message, :ref]
end
@doc """
Handles loading a resource or resources and storing them in the `Speakeasy.Context` for later resolving.
Callback functions must return a type of: `any | {:ok, any} | {:error, error} | nil`. It is effectively looking for the return signatures of Phoenix Contexts.
## Examples
Loading a resource with a 1-arity function will receive the Absinthe arguments:
object :post_mutations do
@desc "Create post"
field :create_post, type: :post do
arg(:name, non_null(:string))
middleware(Speakeasy.Authn)
middleware(Speakeasy.LoadResource, fn(attrs) -> MyApp.Posts.create_post(attrs) end)
end
end
Loading a resource with a 2-arity function will receive the Absinthe arguments and the `SpeakEasy` current user:
object :post_mutations do
@desc "Create post"
field :create_post, type: :post do
arg(:name, non_null(:string))
middleware(Speakeasy.Authn)
middleware(Speakeasy.LoadResource, fn(attrs, user) -> MyApp.Posts.create_post(attrs, user) end)
end
end
"""
@impl true
def call(%{state: :unresolved} = res, fun) when is_function(fun), do: call(res, loader: fun)
def call(%{state: :unresolved} = res, opts) when is_list(opts) do
options = Enum.into(opts, %{})
call(res, options)
end
def call(%{state: :unresolved, arguments: args, context: ctx} = res, %{loader: loader}) do
case get_resource(loader, args, ctx[:speakeasy].user, ctx) do
%{} = resource ->
Speakeasy.Context.add_resource(res, resource)
{:ok, resource} ->
Speakeasy.Context.add_resource(res, resource)
{:error, reason} ->
Absinthe.Resolution.put_result(res, {:error, reason})
nil ->
Absinthe.Resolution.put_result(res, {:error, :not_found})
ref ->
raise UnexpectedLoadingResponse,
message:
"Unexpected response from LoadResource function. Expected `{:ok, resource}` | `{:error, reason}`",
ref: ref
end
end
def call(%{state: :unresolved}, %{}), do: raise(ArgumentError, message: "`:loader` is required")
def call(res, _), do: res
defp get_resource(fun, args, user, ctx) when is_function(fun, 3), do: fun.(args, user, ctx)
defp get_resource(fun, args, user, _ctx) when is_function(fun, 2), do: fun.(args, user)
defp get_resource(fun, args, _user, _ctx) when is_function(fun, 1), do: fun.(args)
defp get_resource(fun, _args, _user, _ctx) when is_function(fun, 0), do: fun.()
end
|
lib/speakeasy/load_resource.ex
| 0.843622 | 0.727443 |
load_resource.ex
|
starcoder
|
defmodule ExEntropy do
@doc """
Compute the Shannon entropy of a binary value.
reference:
- <http://stackoverflow.com/questions/990477/how-to-calculate-the-entropy-of-a-file>
- <https://en.wikipedia.org/wiki/Entropy_(information_theory)>
"""
@spec shannon_entropy(binary, integer) :: float
def shannon_entropy(value, exponent) when is_binary(value) do
# convert the binary value into a list with exponent as one of [1, 8]
val_list = gen_val_list(value, exponent)
val_range = round(:math.pow(2, exponent) - 1)
val_accumulator = for x <- 0..val_range, into: %{}, do: {x, 0}
# accumulate occurrence counts
accumulated_occurances = count_occurances(val_accumulator, val_list)
# transform the map of occurrence counts into a list
ao_list = Enum.map(accumulated_occurances, fn {_k, v} -> v end)
# compute Shannon's entropy
shannon_entropy_0(0, length(val_list), length(ao_list), ao_list)
end
def shannon_entropy(value) when is_binary(value) do
# byte blocks by default
shannon_entropy(value, 8)
end
defp shannon_entropy_0(entropy, _block_count, _block_range, []) do
entropy
end
defp shannon_entropy_0(entropy, block_count, block_range, [h | t]) do
case h do
0 ->
shannon_entropy_0(entropy, block_count, block_range, t)
_ ->
p = 1.0 * h / block_count
udpated_entropy = entropy - p * (:math.log(p) / :math.log(block_range))
shannon_entropy_0(udpated_entropy, block_count, block_range, t)
end
end
defp count_occurances(accumulator, []) do
accumulator
end
defp count_occurances(accumulator, [h | t]) do
c_0 = Map.get(accumulator, h, 0)
count_occurances(Map.put(accumulator, h, c_0 + 1), t)
end
defp gen_val_list(value, exponent) do
case exponent do
# bits
1 ->
for <<x::1 <- value>>, do: x
# bytes
8 ->
for <<x::8 <- value>>, do: x
# kilobytes
10 ->
for <<x::10 <- value>>, do: x
# hex
16 ->
for <<x::16 <- value>>, do: x
# megabytes
20 ->
for <<x::20 <- value>>, do: x
end
end
end
|
lib/ex_crypto/ex_entropy.ex
| 0.802865 | 0.692642 |
ex_entropy.ex
|
starcoder
|
defmodule ResxJSON.Encoder do
@moduledoc """
Encode data resources into strings of JSON.
### Media Types
Only `x.erlang.native` types are valid. This can either be a subtype or suffix.
Valid: `application/x.erlang.native`, `application/geo+x.erlang.native`.
If an error is being returned when attempting to open a data URI due to
`{ :invalid_reference, "invalid media type: \#{type}" }`, the MIME type
will need to be added to the config.
To add additional media types to be encoded, that can be done by configuring
the `:native_types` option.
config :resx_json,
native_types: [
{ "application/x.my-type", &("application/\#{&1}"), &(&1) }
]
The `:native_types` field should contain a list of 3 element tuples with the
format `{ pattern :: String.pattern | Regex.t, (replacement_type :: String.t -> replacement :: String.t), preprocessor :: (Resx.Resource.content -> Resx.Resource.content) }`.
The `pattern` and `replacement` are arguments to `String.replace/3`. While the
preprocessor performs any operations on the content before it is encoded.
The replacement becomes the new media type of the transformed resource. Nested
media types will be preserved. By default the current matches will be replaced
(where the `x.erlang.native` type part is), with the new type (currently `json`),
in order to denote that the content is now a JSON type. If this behaviour is not desired
simply override the match with `:native_types` for the media types that should
not be handled like this.
### Encoding
All literals are encoded using the `Poison` library.
The JSON format (final encoding type) is specified when calling transform,
by providing an atom to the `:format` option. This type is then used to infer
how the content should be encoded, as well as what type will be used for the
media type.
Resx.Resource.transform(resource, ResxJSON.Encoder, format: :json)
The current formats are:
* `:json` - This encodes the data into standard JSON. This is the default encoding format.
### Partial Streams
JSON may be built up from partial data, by using the functions provided in
`ResxJSON.Partial`. Note that this is only applied to content streams.
Any non-partials literals in the stream will be encoded as-is.
A stream with the shape of:
# assumes ResxJSON.Partial was imported
[
object(),
key("f"), key("oo"), key(["ba", "r"], :end), 3,
key("a", :end), value("b", :end),
key("c", :end), array(),
object(),
key("foo", :end), [1, 2, 3],
object(:end),
array(:end),
object(:end)
]
Will result in the following JSON (if `:json` format was used;
whitespace/indentation was added, normally would be packed):
```json
{
"foobar": 3,
"a": "b",
"c": [
{
"foo": [1, 2, 3]
}
]
}
```
#### Codepoints
Values or keys must contain the full codepoint, partial codepoints (when
the bytes that make up a codepoint are split) may result in an error or
incorrect encoding.
e.g. A stream consisting of `[value(["\\xf0\\x9f"]), value(["\\x8d\\x95"], :end)]`
will raise an `UnicodeConversionError`, the intended character (`"🍕"`) must be
included in the same value partial: `value(["\\xf0\\x9f", "\\x8d\\x95"], :end)`.
However if you have two separate codepoints such as `[value(["e"]), value(["́"], :end)]`
then this will correctly produce the intended character (`"é"`).
"""
use Resx.Transformer
alias Resx.Resource.Content
@impl Resx.Transformer
def transform(resource = %{ content: content }, opts) do
case opts[:format] || :json do
:json ->
case validate_type(content.type, "json") do
{ :ok, { type, preprocessor } } ->
content = Content.Stream.new(Callback.call(preprocessor, [content]))
{ :ok, %{ resource | content: %{ content | type: type, data: encode(content) } } }
error -> error
end
_ -> { :error, { :internal, "Unknown encoding format: #{inspect(opts[:format])}" } }
end
end
defp encode(%ResxJSON.Partial{ literal: literal, separator: separator, element: true, prefix: prefix, suffix: suffix, end: true }, { previous, false }), do: { previous <> prefix <> literal <> suffix, { separator, false } }
defp encode(%ResxJSON.Partial{ literal: literal, separator: separator, element: true, prefix: prefix }, { previous, false }), do: { previous <> prefix <> literal, { separator, true } }
defp encode(%ResxJSON.Partial{ literal: literal, separator: separator, element: true, suffix: suffix, end: true }, { previous, true }), do: { previous <> literal <> suffix, { separator, false } }
defp encode(%ResxJSON.Partial{ literal: literal, separator: separator, element: true }, { previous, true }), do: { previous <> literal, { separator, true } }
defp encode(%ResxJSON.Partial{ literal: literal, separator: separator, end: true }, _), do: { literal, { separator, false } }
defp encode(%ResxJSON.Partial{ literal: literal, separator: separator }, _), do: { literal, { separator, true } }
defp encode(%ResxJSON.Partial.Sequence{ nodes: nodes }, previous) do
Enum.map_reduce(nodes, previous, &encode/2)
end
defp encode(data, { previous, _ }), do: { previous <> Poison.encode!(data), { ",", false } }
@doc false
def encode(data) do
Stream.transform(data, { "", false }, fn
node, acc ->
case encode(node, acc) do
{ json, acc } when is_list(json) -> { json, acc }
{ json, acc } -> { [json], acc }
end
end)
end
defp validate_type(types, format) do
cond do
new_type = validate_type(types, Application.get_env(:resx_json, :native_types, []), format) -> { :ok, new_type }
new_type = validate_type(types, [{ ~r/\/(x\.erlang\.native(\+x\.erlang\.native)?|(.*?\+)x\.erlang\.native)(;|$)/, &("/\\3#{&1}\\4"), &(&1) }], format) -> { :ok, new_type }
true -> { :error, { :internal, "Invalid resource type" } }
end
end
defp validate_type(_, [], _), do: nil
defp validate_type(type_list = [type|types], [{ match, replacement, preprocessor }|matches], format) do
if type =~ match do
{ [String.replace(type, match, Callback.call(replacement, [format]))|types], preprocessor }
else
validate_type(type_list, matches, format)
end
end
end
|
lib/resx_json/encoder.ex
| 0.895247 | 0.744052 |
encoder.ex
|
starcoder
|
defmodule Base do
import Bitwise
@moduledoc """
This module provides data encoding and decoding functions
according to [RFC 4648](http://tools.ietf.org/html/rfc4648).
This document defines the commonly used base 16, base 32, and base
64 encoding schemes.
## Base 16 alphabet
| Value | Encoding | Value | Encoding | Value | Encoding | Value | Encoding |
|------:|---------:|------:|---------:|------:|---------:|------:|---------:|
| 0| 0| 4| 4| 8| 8| 12| C|
| 1| 1| 5| 5| 9| 9| 13| D|
| 2| 2| 6| 6| 10| A| 14| E|
| 3| 3| 7| 7| 11| B| 15| F|
## Base 32 alphabet
| Value | Encoding | Value | Encoding | Value | Encoding | Value | Encoding |
|------:|---------:|------:|---------:|------:|---------:|------:|---------:|
| 0| A| 9| J| 18| S| 27| 3|
| 1| B| 10| K| 19| T| 28| 4|
| 2| C| 11| L| 20| U| 29| 5|
| 3| D| 12| M| 21| V| 30| 6|
| 4| E| 13| N| 22| W| 31| 7|
| 5| F| 14| O| 23| X| | |
| 6| G| 15| P| 24| Y| (pad)| =|
| 7| H| 16| Q| 25| Z| | |
| 8| I| 17| R| 26| 2| | |
## Base 32 (extended hex) alphabet
| Value | Encoding | Value | Encoding | Value | Encoding | Value | Encoding |
|------:|---------:|------:|---------:|------:|---------:|------:|---------:|
| 0| 0| 9| 9| 18| I| 27| R|
| 1| 1| 10| A| 19| J| 28| S|
| 2| 2| 11| B| 20| K| 29| T|
| 3| 3| 12| C| 21| L| 30| U|
| 4| 4| 13| D| 22| M| 31| V|
| 5| 5| 14| E| 23| N| | |
| 6| 6| 15| F| 24| O| (pad)| =|
| 7| 7| 16| G| 25| P| | |
| 8| 8| 17| H| 26| Q| | |
## Base 64 alphabet
| Value | Encoding | Value | Encoding | Value | Encoding | Value | Encoding |
|------:|---------:|------:|---------:|------:|---------:|------:|---------:|
| 0| A| 17| R| 34| i| 51| z|
| 1| B| 18| S| 35| j| 52| 0|
| 2| C| 19| T| 36| k| 53| 1|
| 3| D| 20| U| 37| l| 54| 2|
| 4| E| 21| V| 38| m| 55| 3|
| 5| F| 22| W| 39| n| 56| 4|
| 6| G| 23| X| 40| o| 57| 5|
| 7| H| 24| Y| 41| p| 58| 6|
| 8| I| 25| Z| 42| q| 59| 7|
| 9| J| 26| a| 43| r| 60| 8|
| 10| K| 27| b| 44| s| 61| 9|
| 11| L| 28| c| 45| t| 62| +|
| 12| M| 29| d| 46| u| 63| /|
| 13| N| 30| e| 47| v| | |
| 14| O| 31| f| 48| w| (pad)| =|
| 15| P| 32| g| 49| x| | |
| 16| Q| 33| h| 50| y| | |
## Base 64 (URL and filename safe) alphabet
| Value | Encoding | Value | Encoding | Value | Encoding | Value | Encoding |
|------:|---------:|------:|---------:|------:|---------:|------:|---------:|
| 0| A| 17| R| 34| i| 51| z|
| 1| B| 18| S| 35| j| 52| 0|
| 2| C| 19| T| 36| k| 53| 1|
| 3| D| 20| U| 37| l| 54| 2|
| 4| E| 21| V| 38| m| 55| 3|
| 5| F| 22| W| 39| n| 56| 4|
| 6| G| 23| X| 40| o| 57| 5|
| 7| H| 24| Y| 41| p| 58| 6|
| 8| I| 25| Z| 42| q| 59| 7|
| 9| J| 26| a| 43| r| 60| 8|
| 10| K| 27| b| 44| s| 61| 9|
| 11| L| 28| c| 45| t| 62| -|
| 12| M| 29| d| 46| u| 63| _|
| 13| N| 30| e| 47| v| | |
| 14| O| 31| f| 48| w| (pad)| =|
| 15| P| 32| g| 49| x| | |
| 16| Q| 33| h| 50| y| | |
"""
b16_alphabet = Enum.with_index '0123456789ABCDEF'
b64_alphabet = Enum.with_index 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
b64url_alphabet = Enum.with_index 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
b32_alphabet = Enum.with_index 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
b32hex_alphabet = Enum.with_index '0123456789ABCDEFGHIJKLMNOPQRSTUV'
Enum.each [{:enc16, :dec16, b16_alphabet},
{:enc32, :dec32, b32_alphabet},
{:enc64, :dec64, b64_alphabet},
{:enc64url, :dec64url, b64url_alphabet},
{:enc32hex, :dec32hex, b32hex_alphabet}], fn({enc, dec, alphabet}) ->
for {encoding, value} <- alphabet do
defp unquote(enc)(unquote(value)), do: unquote(encoding)
defp unquote(dec)(unquote(encoding)), do: unquote(value)
end
defp unquote(dec)(c) do
raise ArgumentError, "non-alphabet digit found: #{inspect <<c>>, binaries: :as_strings} (byte #{c})"
end
end
@compile {:inline, from_upper: 1, from_lower: 1, from_mixed: 1,
to_lower: 1, to_upper: 1, enc16: 1, dec16: 1,
enc32: 1, dec32: 1, enc32hex: 1, dec32hex: 1,
enc64: 1, dec64: 1, enc64url: 1, dec64url: 1}
defp to_lower(char) when char in ?A..?Z,
do: char + (?a - ?A)
defp to_lower(char),
do: char
defp to_upper(char), do: char
defp from_upper(char), do: char
defp from_lower(char) when char in ?a..?z,
do: char - (?a - ?A)
defp from_lower(char) when not char in ?A..?Z,
do: char
defp from_lower(char),
do: raise(ArgumentError, "non-alphabet digit found: \"#{<<char>>}\" (byte #{char})")
defp from_mixed(char) when char in ?a..?z,
do: char - (?a - ?A)
defp from_mixed(char),
do: char
@doc """
Encodes a binary string into a base 16 encoded string.
Accepts an atom `:upper` (default) for encoding to upper case characters or
`:lower` for lower case characters.
## Examples
iex> Base.encode16("foobar")
"666F6F626172"
iex> Base.encode16("foobar", case: :lower)
"666f6f626172"
"""
@spec encode16(binary) :: binary
@spec encode16(binary, Keyword.t) :: binary
def encode16(data, opts \\ []) when is_binary(data) do
case = Keyword.get(opts, :case, :upper)
do_encode16(case, data)
end
@doc """
Decodes a base 16 encoded string into a binary string.
Accepts an atom `:upper` (default) for decoding from upper case characters or
`:lower` for lower case characters. `:mixed` can be given for mixed case
characters.
## Examples
iex> Base.decode16("666F6F626172")
{:ok, "foobar"}
iex> Base.decode16("666f6f626172", case: :lower)
{:ok, "foobar"}
iex> Base.decode16("666f6F626172", case: :mixed)
{:ok, "foobar"}
"""
@spec decode16(binary) :: {:ok, binary} | :error
@spec decode16(binary, Keyword.t) :: {:ok, binary} | :error
def decode16(string, opts \\ []) do
{:ok, decode16!(string, opts)}
rescue
ArgumentError -> :error
end
@doc """
Decodes a base 16 encoded string into a binary string.
Accepts an atom `:upper` (default) for decoding from upper case characters or
`:lower` for lower case characters. `:mixed` can be given for mixed case
characters.
An `ArgumentError` exception is raised if the padding is incorrect or
a non-alphabet character is present in the string.
## Examples
iex> Base.decode16!("666F6F626172")
"foobar"
iex> Base.decode16!("666f6f626172", case: :lower)
"foobar"
iex> Base.decode16!("666f6F626172", case: :mixed)
"foobar"
"""
@spec decode16!(binary) :: binary
@spec decode16!(binary, Keyword.t) :: binary
def decode16!(string, opts \\ [])
def decode16!(string, opts) when is_binary(string) and rem(byte_size(string), 2) == 0 do
case = Keyword.get(opts, :case, :upper)
do_decode16(case, string)
end
def decode16!(string, _opts) when is_binary(string) do
raise ArgumentError, "odd-length string"
end
@doc """
Encodes a binary string into a base 64 encoded string.
## Examples
iex> Base.encode64("foobar")
"Zm9vYmFy"
"""
@spec encode64(binary) :: binary
def encode64(data) when is_binary(data) do
do_encode64(data)
end
@doc """
Decodes a base 64 encoded string into a binary string.
Accepts `ignore: :whitespace` option which will ignore all the
whitespace characters in the input string.
## Examples
iex> Base.decode64("Zm9vYmFy")
{:ok, "foobar"}
iex> Base.decode64("Zm9vYmFy\\n", ignore: :whitespace)
{:ok, "foobar"}
"""
@spec decode64(binary) :: {:ok, binary} | :error
@spec decode64(binary, Keyword.t) :: {:ok, binary} | :error
def decode64(string, opts \\ []) when is_binary(string) do
{:ok, decode64!(string, opts)}
rescue
ArgumentError -> :error
end
@doc """
Decodes a base 64 encoded string into a binary string.
Accepts `ignore: :whitespace` option which will ignore all the
whitespace characters in the input string.
An `ArgumentError` exception is raised if the padding is incorrect or
a non-alphabet character is present in the string.
## Examples
iex> Base.decode64!("Zm9vYmFy")
"foobar"
iex> Base.decode64!("Zm9vYmFy\\n", ignore: :whitespace)
"foobar"
"""
@spec decode64!(binary) :: binary
@spec decode64!(binary, Keyword.t) :: binary
def decode64!(string, opts \\ []) when is_binary(string) do
string |> filter_ignored(opts[:ignore]) |> do_decode64()
end
@doc """
Encodes a binary string into a base 64 encoded string with URL and filename
safe alphabet.
## Examples
iex> Base.url_encode64(<<255, 127, 254, 252>>)
"_3_-_A=="
"""
@spec url_encode64(binary) :: binary
def url_encode64(data) when is_binary(data) do
do_encode64url(data)
end
@doc """
Decodes a base 64 encoded string with URL and filename safe alphabet
into a binary string.
Accepts `ignore: :whitespace` option which will ignore all the
whitespace characters in the input string.
## Examples
iex> Base.url_decode64("_3_-_A==")
{:ok, <<255, 127, 254, 252>>}
iex> Base.url_decode64("_3_-_A==\\n", ignore: :whitespace)
{:ok, <<255, 127, 254, 252>>}
"""
@spec url_decode64(binary) :: {:ok, binary} | :error
@spec url_decode64(binary, Keyword.t) :: {:ok, binary} | :error
def url_decode64(string, opts \\ []) when is_binary(string) do
{:ok, url_decode64!(string, opts)}
rescue
ArgumentError -> :error
end
@doc """
Decodes a base 64 encoded string with URL and filename safe alphabet
into a binary string.
Accepts `ignore: :whitespace` option which will ignore all the
whitespace characters in the input string.
An `ArgumentError` exception is raised if the padding is incorrect or
a non-alphabet character is present in the string.
## Examples
iex> Base.url_decode64!("_3_-_A==")
<<255, 127, 254, 252>>
iex> Base.url_decode64!("_3_-_A==\\n", ignore: :whitespace)
<<255, 127, 254, 252>>
"""
@spec url_decode64!(binary) :: binary
@spec url_decode64!(binary, Keyword.t) :: binary
def url_decode64!(string, opts \\ []) when is_binary(string) do
string |> filter_ignored(opts[:ignore]) |> do_decode64url()
end
@doc """
Encodes a binary string into a base 32 encoded string.
Accepts an atom `:upper` (default) for encoding to upper case characters or
`:lower` for lower case characters.
## Examples
iex> Base.encode32("foobar")
"MZXW6YTBOI======"
iex> Base.encode32("foobar", case: :lower)
"mzxw6ytboi======"
"""
@spec encode32(binary) :: binary
@spec encode32(binary, Keyword.t) :: binary
def encode32(data, opts \\ []) when is_binary(data) do
case = Keyword.get(opts, :case, :upper)
do_encode32(case, data)
end
@doc """
Decodes a base 32 encoded string into a binary string.
Accepts an atom `:upper` (default) for decoding from upper case characters or
`:lower` for lower case characters. `:mixed` can be given for mixed case
characters.
## Examples
iex> Base.decode32("MZXW6YTBOI======")
{:ok, "foobar"}
iex> Base.decode32("mzxw6ytboi======", case: :lower)
{:ok, "foobar"}
iex> Base.decode32("mzXW6ytBOi======", case: :mixed)
{:ok, "foobar"}
"""
@spec decode32(binary) :: {:ok, binary} | :error
@spec decode32(binary, Keyword.t) :: {:ok, binary} | :error
def decode32(string, opts \\ []) do
{:ok, decode32!(string, opts)}
rescue
ArgumentError -> :error
end
@doc """
Decodes a base 32 encoded string into a binary string.
Accepts an atom `:upper` (default) for decoding from upper case characters or
`:lower` for lower case characters. `:mixed` can be given for mixed case
characters.
An `ArgumentError` exception is raised if the padding is incorrect or
a non-alphabet character is present in the string.
## Examples
iex> Base.decode32!("MZXW6YTBOI======")
"foobar"
iex> Base.decode32!("mzxw6ytboi======", case: :lower)
"foobar"
iex> Base.decode32!("mzXW6ytBOi======", case: :mixed)
"foobar"
"""
@spec decode32!(binary) :: binary
@spec decode32!(binary, Keyword.t) :: binary
def decode32!(string, opts \\ [])
def decode32!(string, opts) when is_binary(string) and rem(byte_size(string), 8) == 0 do
case = Keyword.get(opts, :case, :upper)
do_decode32(case, string)
end
def decode32!(string, _opts) when is_binary(string) do
raise ArgumentError, "incorrect padding"
end
@doc """
Encodes a binary string into a base 32 encoded string with an
extended hexadecimal alphabet.
Accepts an atom `:upper` (default) for encoding to upper case characters or
`:lower` for lower case characters.
## Examples
iex> Base.hex_encode32("foobar")
"CPNMUOJ1E8======"
iex> Base.hex_encode32("foobar", case: :lower)
"cpnmuoj1e8======"
"""
@spec hex_encode32(binary) :: binary
@spec hex_encode32(binary, Keyword.t) :: binary
def hex_encode32(data, opts \\ []) when is_binary(data) do
case = Keyword.get(opts, :case, :upper)
do_hex_encode32(case, data)
end
@doc """
Decodes a base 32 encoded string with extended hexadecimal alphabet
into a binary string.
Accepts an atom `:upper` (default) for decoding from upper case characters or
`:lower` for lower case characters. `:mixed` can be given for mixed case
characters.
## Examples
iex> Base.hex_decode32("CPNMUOJ1E8======")
{:ok, "foobar"}
iex> Base.hex_decode32("cpnmuoj1e8======", case: :lower)
{:ok, "foobar"}
iex> Base.hex_decode32("cpnMuOJ1E8======", case: :mixed)
{:ok, "foobar"}
"""
@spec hex_decode32(binary) :: {:ok, binary} | :error
@spec hex_decode32(binary, Keyword.t) :: {:ok, binary} | :error
def hex_decode32(string, opts \\ []) do
{:ok, hex_decode32!(string, opts)}
rescue
ArgumentError -> :error
end
@doc """
Decodes a base 32 encoded string with extended hexadecimal alphabet
into a binary string.
Accepts an atom `:upper` (default) for decoding from upper case characters or
`:lower` for lower case characters. `:mixed` can be given for mixed case
characters.
An `ArgumentError` exception is raised if the padding is incorrect or
a non-alphabet character is present in the string.
## Examples
iex> Base.hex_decode32!("CPNMUOJ1E8======")
"foobar"
iex> Base.hex_decode32!("cpnmuoj1e8======", case: :lower)
"foobar"
iex> Base.hex_decode32!("cpnMuOJ1E8======", case: :mixed)
"foobar"
"""
@spec hex_decode32!(binary) :: binary
@spec hex_decode32!(binary, Keyword.t) :: binary
def hex_decode32!(string, opts \\ [])
def hex_decode32!(string, opts) when is_binary(string) and rem(byte_size(string), 8) == 0 do
case = Keyword.get(opts, :case, :upper)
do_hex_decode32(case, string)
end
def hex_decode32!(string, _opts) when is_binary(string) do
raise ArgumentError, "incorrect padding"
end
defp filter_ignored(string, nil), do: string
defp filter_ignored(string, :whitespace) do
for <<c::8 <- string>>, not c in '\s\t\r\n', into: <<>>, do: <<c::8>>
end
defp do_encode16(_, <<>>), do: <<>>
defp do_encode16(:upper, data) do
for <<c::4 <- data>>, into: <<>>, do: <<enc16(c)::8>>
end
defp do_encode16(:lower, data) do
for <<c::4 <- data>>, into: <<>>, do: <<to_lower(enc16(c))::8>>
end
defp do_decode16(_, <<>>), do: <<>>
defp do_decode16(:upper, string) when rem(byte_size(string), 2) == 0 do
for <<c1::8, c2::8 <- string>>, into: <<>> do
<<dec16(c1)::4, dec16(c2)::4>>
end
end
defp do_decode16(:lower, string) when rem(byte_size(string), 2) == 0 do
for <<c1::8, c2::8 <- string>>, into: <<>> do
<<dec16(from_lower(c1))::4, dec16(from_lower(c2))::4>>
end
end
defp do_decode16(:mixed, string) when rem(byte_size(string), 2) == 0 do
for <<c1::8, c2::8 <- string>>, into: <<>> do
<<dec16(from_mixed(c1))::4, dec16(from_mixed(c2))::4>>
end
end
defp do_encode64(<<>>), do: <<>>
defp do_encode64(data) do
split = 3 * div(byte_size(data), 3)
<<main::size(split)-binary, rest::binary>> = data
main = for <<c::6 <- main>>, into: <<>>, do: <<enc64(c)::8>>
case rest do
<<cfd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, c2::6, c3::4>> ->
<<main::binary, enc64(c1)::8, enc64(c2)::8, enc64(bsl(c3, 2))::8, ?=>>
<<cfd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, c2::2>> ->
<<main::binary, enc64(c1)::8, enc64(bsl(c2, 4))::8, ?=, ?=>>
<<>> ->
main
end
end
defp do_decode64(<<>>), do: <<>>
defp do_decode64(string) when rem(byte_size(string), 4) == 0 do
split = byte_size(string) - 4
<<main::size(split)-binary, rest::binary>> = string
main = for <<c::8 <- main>>, into: <<>>, do: <<dec64(c)::6>>
case rest do
<<cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, c2::8, ?=, ?=>> ->
<<main::binary, dec64(c1)::6, bsr(dec64(c2), 4)::2>>
<<c1::8, cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, c3::8, ?=>> ->
<<main::binary, dec64(c1)::6, dec64(c2)::6, bsr(dec64(c3), 2)::4>>
<<cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, c3::8, c4::8>> ->
<<main::binary, dec64(c1)::6, dec64(c2)::6, dec64(c3)::6, dec64(c4)::6>>
<<>> ->
main
end
end
defp do_decode64(_) do
raise ArgumentError, "incorrect padding"
end
defp do_encode64url(<<>>), do: <<>>
defp do_encode64url(data) do
split = 3 * div(byte_size(data), 3)
<<main::size(split)-binary, rest::binary>> = data
main = for <<c::6 <- main>>, into: <<>>, do: <<enc64url(c)::8>>
case rest do
<<c1::6, c2::6, c3::4>> ->
<<main::binary, enc64url(c1)::8, enc64url(c2)::8, enc64url(bsl(c3, 2))::8, ?=>>
<<c1::6, c2::2>> ->
<<main::binary, enc64url(c1)::8, enc64url(bsl(c2, 4))::8, ?=, ?=>>
<<>> ->
main
end
end
defp do_decode64url(<<>>), do: <<>>
defp do_decode64url(string) when rem(byte_size(string), 4) == 0 do
split = byte_size(string) - 4
<<main::size(split)-binary, rest::binary>> = string
main = for <<c::8 <- main>>, into: <<>>, do: <<dec64url(c)::6>>
case rest do
<<cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, c2::8, ?=, ?=>> ->
<<main::binary, dec64url(c1)::6, bsr(dec64url(c2), 4)::2>>
<<c1::8, c2::8, c3::8, ?=>> ->
<<main::binary, dec64url(c1)::6, dec64url(c2)::6, bsr(dec64url(c3), 2)::4>>
<<c1::8, c2::8, c3::8, c4::8>> ->
<<main::binary, dec64url(c1)::6, dec64url(c2)::6, dec64url(c3)::6, dec64url(c4)::6>>
<<>> ->
main
end
end
defp do_decode64url(_) do
raise ArgumentError, "incorrect padding"
end
defp do_encode32(_, <<>>), do: <<>>
for {case, fun} <- [upper: :to_upper, lower: :to_lower] do
defp do_encode32(unquote(case), data) do
split = 5 * div(byte_size(data), 5)
<<main::size(split)-binary, rest::binary>> = data
main = for <<c::5 <- main>>, into: <<>>, do: <<unquote(fun)(enc32(c))::8>>
case rest do
<<cfc00:db20:35b:7399::5, cfd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, cfc00:db20:35b:7399::5, cfc00:db20:35b:7399::5, c5::5, c6::5, c7::2>> ->
<<main::binary,
unquote(fun)(enc32(c1))::8, unquote(fun)(enc32(c2))::8,
unquote(fun)(enc32(c3))::8, unquote(fun)(enc32(c4))::8,
unquote(fun)(enc32(c5))::8, unquote(fun)(enc32(c6))::8,
unquote(fun)(enc32(bsl(c7, 3)))::8, ?=>>
<<cfc00:db20:35b:7399::5, cfd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, c3::5, c4::5, c5::4>> ->
<<main::binary,
unquote(fun)(enc32(c1))::8, unquote(fun)(enc32(c2))::8,
unquote(fun)(enc32(c3))::8, unquote(fun)(enc32(c4))::8,
unquote(fun)(enc32(bsl(c5, 1)))::8, ?=, ?=, ?=>>
<<cfc00:db20:35b:7399::5, cfd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, c3::5, c4::1>> ->
<<main::binary,
unquote(fun)(enc32(c1))::8, unquote(fun)(enc32(c2))::8,
unquote(fun)(enc32(c3))::8, unquote(fun)(enc32(bsl(c4, 4)))::8,
?=, ?=, ?=, ?=>>
<<c1::5, c2::3>> ->
<<main::binary,
unquote(fun)(enc32(c1))::8, unquote(fun)(enc32(bsl(c2, 2)))::8, ?=, ?=,
?=, ?=, ?=, ?=>>
<<>> ->
main
end
end
end
defp do_decode32(_, <<>>), do: <<>>
for {case, fun} <- [upper: :from_upper, lower: :from_lower, mixed: :from_mixed] do
defp do_decode32(unquote(case), string) do
split = byte_size(string) - 8
<<main::size(split)-binary, rest::binary>> = string
main = for <<c::8 <- main>>, into: <<>>, do: <<dec32(unquote(fun)(c))::5>>
case rest do
<<cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, ?=, ?=, ?=, ?=, ?=, ?=>> ->
<<main::binary, dec32(unquote(fun)(c1))::5,
bsr(dec32(unquote(fun)(c2)), 2)::3>>
<<cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, cfc00:db20:35b:7399::5, cfc00:e968:6179::de52:7100, ?=, ?=, ?=, ?=>> ->
<<main::binary,
dec32(unquote(fun)(c1))::5, dec32(unquote(fun)(c2))::5,
dec32(unquote(fun)(c3))::5, bsr(dec32(unquote(fun)(c4)), 4)::1>>
<<cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, cfc00:e968:6179::de52:7100, cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, ?=, ?=, ?=>> ->
<<main::binary,
dec32(unquote(fun)(c1))::5, dec32(unquote(fun)(c2))::5,
dec32(unquote(fun)(c3))::5, dec32(unquote(fun)(c4))::5,
bsr(dec32(unquote(fun)(c5)), 1)::4>>
<<cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, cfc00:e968:6179::de52:7100, c5::8, c6::8, c7::8, ?=>> ->
<<main::binary,
dec32(unquote(fun)(c1))::5, dec32(unquote(fun)(c2))::5,
dec32(unquote(fun)(c3))::5, dec32(unquote(fun)(c4))::5,
dec32(unquote(fun)(c5))::5, dec32(unquote(fun)(c6))::5,
bsr(dec32(unquote(fun)(c7)), 3)::2>>
<<cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fdf8:f53e:61e4::18, cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, c6::8, c7::8, c8::8>> ->
<<main::binary,
dec32(unquote(fun)(c1))::5, dec32(unquote(fun)(c2))::5,
dec32(unquote(fun)(c3))::5, dec32(unquote(fun)(c4))::5,
dec32(unquote(fun)(c5))::5, dec32(unquote(fun)(c6))::5,
dec32(unquote(fun)(c7))::5, dec32(unquote(fun)(c8))::5>>
<<>> ->
main
end
end
end
defp do_hex_encode32(_, <<>>), do: <<>>
for {case, fun} <- [upper: :to_upper, lower: :to_lower] do
defp do_hex_encode32(unquote(case), data) do
split = 5 * div(byte_size(data), 5)
<<main::size(split)-binary, rest::binary>> = data
main = for <<c::5 <- main>>, into: <<>>, do: <<unquote(fun)(enc32hex(c))::8>>
case rest do
<<cfc00:db20:35b:7399::5, fc00:db20:35b:7399::5, cfc00:db20:35b:7399::5, cfc00:db20:35b:7399::5, c5::5, c6::5, c7::2>> ->
<<main::binary,
unquote(fun)(enc32hex(c1))::8, unquote(fun)(enc32hex(c2))::8,
unquote(fun)(enc32hex(c3))::8, unquote(fun)(enc32hex(c4))::8,
unquote(fun)(enc32hex(c5))::8, unquote(fun)(enc32hex(c6))::8,
unquote(fun)(enc32hex(bsl(c7, 3)))::8, ?=>>
<<cfc00:db20:35b:7399::5, fc00:db20:35b:7399::5, cfc00:db20:35b:7399::5, c4::5, c5::4>> ->
<<main::binary,
unquote(fun)(enc32hex(c1))::8, unquote(fun)(enc32hex(c2))::8,
unquote(fun)(enc32hex(c3))::8, unquote(fun)(enc32hex(c4))::8,
unquote(fun)(enc32hex(bsl(c5, 1)))::8, ?=, ?=, ?=>>
<<cfc00:db20:35b:7399::5, cfd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, cfc00:db20:35b:7399::5, c4::1>> ->
<<main::binary,
unquote(fun)(enc32hex(c1))::8, unquote(fun)(enc32hex(c2))::8,
unquote(fun)(enc32hex(c3))::8, unquote(fun)(enc32hex(bsl(c4, 4)))::8,
?=, ?=, ?=, ?=>>
<<cfc00:db20:35b:7399::5, c2::3>> ->
<<main::binary,
unquote(fun)(enc32hex(c1))::8, unquote(fun)(enc32hex(bsl(c2, 2)))::8, ?=, ?=,
?=, ?=, ?=, ?=>>
<<>> ->
main
end
end
end
defp do_hex_decode32(_, <<>>), do: <<>>
for {case, fun} <- [upper: :from_upper, lower: :from_lower, mixed: :from_mixed] do
defp do_hex_decode32(unquote(case), string) do
split = byte_size(string) - 8
<<main::size(split)-binary, rest::binary>> = string
main = for <<c::8 <- main>>, into: <<>>, do: <<dec32hex(unquote(fun)(c))::5>>
case rest do
<<fdf8:f53e:61e4::18, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, ?=, ?=, ?=, ?=, ?=, ?=>> ->
<<main::binary, dec32hex(unquote(fun)(c1))::5,
bsr(dec32hex(unquote(fun)(c2)), 2)::3>>
<<fdf8:f53e:61e4::18, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fdf8:f53e:61e4::18, ?=, ?=, ?=, ?=>> ->
<<main::binary,
dec32hex(unquote(fun)(c1))::5, dec32hex(unquote(fun)(c2))::5,
dec32hex(unquote(fun)(c3))::5, bsr(dec32hex(unquote(fun)(c4)), 4)::1>>
<<fdf8:f53e:61e4::18, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fdf8:f53e:61e4::18, cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, ?=, ?=, ?=>> ->
<<main::binary,
dec32hex(unquote(fun)(c1))::5, dec32hex(unquote(fun)(c2))::5,
dec32hex(unquote(fun)(c3))::5, dec32hex(unquote(fun)(c4))::5,
bsr(dec32hex(unquote(fun)(c5)), 1)::4>>
<<fdf8:f53e:61e4::18, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fdf8:f53e:61e4::18, fdf8:f53e:61e4::18, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, cfc00:e968:6179::de52:7100, ?=>> ->
<<main::binary,
dec32hex(unquote(fun)(c1))::5, dec32hex(unquote(fun)(c2))::5,
dec32hex(unquote(fun)(c3))::5, dec32hex(unquote(fun)(c4))::5,
dec32hex(unquote(fun)(c5))::5, dec32hex(unquote(fun)(c6))::5,
bsr(dec32hex(unquote(fun)(c7)), 3)::2>>
<<cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, cfc00:db20:35b:7399::5, cfc00:e968:6179::de52:7100, c5::8, c6::8, c7::8, c8::8>> ->
<<main::binary,
dec32hex(unquote(fun)(c1))::5, dec32hex(unquote(fun)(c2))::5,
dec32hex(unquote(fun)(c3))::5, dec32hex(unquote(fun)(c4))::5,
dec32hex(unquote(fun)(c5))::5, dec32hex(unquote(fun)(c6))::5,
dec32hex(unquote(fun)(c7))::5, dec32hex(unquote(fun)(c8))::5>>
<<>> ->
main
end
end
end
end
|
lib/elixir/lib/base.ex
| 0.663996 | 0.668366 |
base.ex
|
starcoder
|
defmodule Cldr.Normalize.Territories do
@moduledoc false
alias Cldr.Locale
def normalize(content) do
content
|> normalize_territory_info
end
def normalize_territory_info(content) do
content
|> Cldr.Map.remove_leading_underscores()
|> Cldr.Map.underscore_keys()
|> Cldr.Map.integerize_values()
|> Cldr.Map.floatize_values()
|> Enum.map(&normalize_territory_code/1)
|> Enum.map(&normalize_language_codes/1)
|> Enum.into(%{})
|> add_currency_for_territories
|> add_measurement_system
end
@key "language_population"
def normalize_language_codes({k, v}) do
if language_population = Map.get(v, @key) do
language_population =
language_population
|> Enum.map(fn {k1, v1} -> {Locale.normalize_locale_name(k1), v1} end)
|> Enum.into(%{})
{k, Map.put(v, @key, language_population)}
else
{k, v}
end
end
def add_currency_for_territories(territories) do
currencies = Cldr.Normalize.Currency.get_currency_data()["region"]
territories
|> Enum.map(fn {territory, map} ->
{territory, Map.put(map, "currency", Map.get(currencies, territory))}
end)
|> Enum.into(%{})
end
def add_measurement_system(territories) do
systems = get_measurement_data()
territories
|> Enum.map(fn {territory, map} ->
territory = String.to_atom(territory)
measurement_system =
%{}
|> Map.put(
:default,
(get_in(systems, [:measurement_system, territory]) ||
get_in(systems, [:measurement_system, :"001"]))
|> Cldr.Consolidate.canonicalize_measurement_system()
)
|> Map.put(
:paper_size,
(get_in(systems, [:paper_size, territory]) || get_in(systems, [:paper_size, :"001"]))
|> Cldr.Consolidate.canonicalize_measurement_system()
)
|> Map.put(
:temperature,
(get_in(systems, [:measurement_system_category_temperature, territory]) ||
get_in(systems, [:measurement_system, territory]) ||
get_in(systems, [:measurement_system, :"001"]))
|> Cldr.Consolidate.canonicalize_measurement_system()
)
{territory, Map.put(map, :measurement_system, measurement_system)}
end)
|> Map.new()
end
@measurement_path Path.join(Cldr.Config.download_data_dir(), [
"cldr-core",
"/supplemental",
"/measurementData.json"
])
def get_measurement_data do
@measurement_path
|> File.read!()
|> Jason.decode!()
|> get_in(["supplemental", "measurementData"])
|> Enum.map(fn {k, v} -> {Cldr.String.to_underscore(Cldr.String.underscore(k)), v} end)
|> Enum.into(%{})
|> Cldr.Map.atomize_keys()
end
defp normalize_territory_code({code, rest}) do
{normalize_territory_code(code), rest}
end
defp normalize_territory_code(code) do
String.upcase(code)
end
end
|
mix/support/normalize/normalize_territory_info.ex
| 0.69946 | 0.42674 |
normalize_territory_info.ex
|
starcoder
|
defmodule Memcache do
@moduledoc """
This module provides a user friendly API to interact with the
memcached server.
## Example
{:ok, pid} = Memcache.start_link()
{:ok} = Memcache.set(pid, "hello", "world")
{:ok, "world"} = Memcache.get(pid, "hello")
## Coder
`Memcache.Coder` allows you to specify how the value should be encoded before
sending it to the server and how it should be decoded after it is
retrived. There are four built-in coders namely `Memcache.Coder.Raw`,
`Memcache.Coder.Erlang`, `Memcache.Coder.JSON`,
`Memcache.Coder.ZIP`. Custom coders can be created by implementing
the `Memcache.Coder` behaviour.
## CAS
CAS feature allows to atomically perform two commands on a key. Get
the cas version number associated with a key during the first
command and pass that value during the second command. The second
command will fail if the value has changed by someone else in the
mean time.
{:ok, "hello", cas} = Memcache.get(pid, "key", cas: true)
{:ok} = Memcache.set_cas(pid, "key", "world", cas)
Memcache module provides a *_cas variant for most of the
functions. This function will take an additional argument named
`cas` and returns the same value as their counterpart except in case
of CAS error. In case of CAS error the returned value would be equal
to `{:error, "Key exists"}`
## Options
Most the functions in this module accept an optional `Keyword`
list. The below list specifies the behavior of each option. The list
of option accepted by a specific function will be documented in the
specific funcion.
* `:cas` - (boolean) returns the CAS value associated with the
data. This value will be either in second or third position
of the returned tuple depending on the command. Defaults to `false`.
* `:ttl` - (integer) specifies the expiration time in seconds for
the corresponding key. Can be set to `0` to disable
expiration. The Default value can be configured using
`start_link/2`.
"""
@type error :: {:error, binary | atom}
@type result :: {:ok} | {:ok, integer} | {:ok, any} | {:ok, any, integer} | error
@type fetch_result :: {:ok, any} | {:ok, any, integer} | error
@type fetch_integer_result :: {:ok, integer} | {:ok, integer, integer} | error
@type store_result :: {:ok} | {:ok, integer} | error
alias Memcache.Connection
alias Memcache.Registry
@default_opts [
ttl: 0,
namespace: nil,
key_coder: nil,
coder: {Memcache.Coder.Raw, []}
]
@doc """
Creates a connection using `Memcache.Connection.start_link/2`
## Connection Options
This is a superset of the connection options accepted by the
`Memcache.Connection.start_link/2`. The following list specifies the
additional options.
* `:ttl` - (integer) a default expiration time in seconds. This
value will be used if the `:ttl` value is not specified for a
operation. Defaults to `0`(means forever).
* `:namespace` - (string) prepend each key with the given value.
* `:key_coder` - ({module, function}) Used to transform the key completely.
The function needs to accept one argument, the key and return a new key.
* `:coder` - (module | {module, options}) Can be either a module or
tuple contains the module and options. Defaults to
`{Memcache.Coder.Raw, []}`.
## Options
The second option is passed directly to the underlying
`GenServer.start_link/3`, so it can be used to create named process.
"""
@spec start_link(Keyword.t(), Keyword.t()) :: GenServer.on_start()
def start_link(connection_options \\ [], options \\ []) do
extra_opts = [:ttl, :namespace, :key_coder, :coder]
connection_options =
@default_opts
|> Keyword.merge(connection_options)
|> Keyword.update!(:coder, &normalize_coder/1)
{state, connection_options} = Keyword.split(connection_options, extra_opts)
{:ok, pid} = Connection.start_link(connection_options, options)
state =
state
|> Map.new()
|> Map.put(:connection, pid)
Registry.associate(pid, state)
{:ok, pid}
end
@doc false
def child_spec(args) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, args},
type: :worker
}
end
@doc """
Closes the connection to the memcached server.
"""
@spec stop(GenServer.server()) :: {:ok}
def stop(server) do
Connection.close(server)
end
@doc """
Gets the value associated with the key. Returns `{:error, "Key not
found"}` if the given key doesn't exist.
Accepted option: `:cas`
"""
@spec get(GenServer.server(), binary, Keyword.t()) :: fetch_result
def get(server, key, opts \\ []) do
execute_k(server, :GET, [key], opts)
end
@doc """
Gets the values associated with the list of keys. Returns a
map. Keys that are not found in the server are filtered from the
result.
Accepted option: `:cas`
"""
@spec multi_get(GenServer.server(), [binary], Keyword.t()) :: {:ok, map} | error
def multi_get(server, keys, opts \\ []) do
commands = Enum.map(keys, &{:GETQ, [&1], opts})
with {:ok, values} <- execute_quiet_k(server, commands) do
result =
keys
|> Enum.zip(values)
|> Enum.reduce(%{}, fn
{key, {:ok, value}}, acc -> Map.put(acc, key, value)
{key, {:ok, value, cas}}, acc -> Map.put(acc, key, {value, cas})
{_key, {:error, _}}, acc -> acc
end)
{:ok, result}
end
end
@doc """
Sets the key to value
Accepted options: `:cas`, `:ttl`
"""
@spec set(GenServer.server(), binary, binary, Keyword.t()) :: store_result
def set(server, key, value, opts \\ []) do
set_cas(server, key, value, 0, opts)
end
@doc """
Sets the key to value if the key exists and has CAS value equal to
the provided value
Accepted options: `:cas`, `:ttl`
"""
@spec set_cas(GenServer.server(), binary, binary, integer, Keyword.t()) :: store_result
def set_cas(server, key, value, cas, opts \\ []) do
server_options = get_server_options(server)
execute_kv(
server,
:SET,
[key, value, cas, ttl_or_default(server_options, opts)],
opts,
server_options
)
end
@doc """
Multi version of `set/4`. Accepts a map or a list of `{key, value}`.
Accepted options: `:cas`, `:ttl`
"""
@spec multi_set(GenServer.server(), [{binary, binary}] | map, Keyword.t()) ::
{:ok, [store_result]} | error
def multi_set(server, commands, opts \\ []) do
commands = Enum.map(commands, fn {key, value} -> {key, value, 0} end)
multi_set_cas(server, commands, opts)
end
@doc """
Multi version of `set_cas/4`. Accepts a list of `{key, value, cas}`.
Accepted options: `:cas`, `:ttl`
"""
@spec multi_set_cas(GenServer.server(), [{binary, binary, integer}], Keyword.t()) ::
{:ok, [store_result]} | error
def multi_set_cas(server, commands, opts \\ []) do
op = if Keyword.get(opts, :cas, false), do: :SET, else: :SETQ
server_options = get_server_options(server)
commands =
Enum.map(commands, fn {key, value, cas} ->
{op, [key, value, cas, ttl_or_default(server_options, opts)], opts}
end)
execute_quiet_kv(server, commands, server_options)
end
@cas_error {:error, "Key exists"}
@doc """
Compare and swap value using optimistic locking.
1. Get the existing value for key
2. If it exists, call the update function with the value
3. Set the returned value for key
The 3rd operation will fail if someone else has updated the value
for the same key in the mean time. In that case, by default, this
function will go to step 1 and try again. Retry behavior can be
disabled by passing `[retry: false]` option.
"""
@spec cas(GenServer.server(), binary, (binary -> binary), Keyword.t()) :: {:ok, any} | error
def cas(server, key, update, opts \\ []) do
with {:ok, value, cas} <- get(server, key, cas: true),
new_value = update.(value),
{:ok} <- set_cas(server, key, new_value, cas) do
{:ok, new_value}
else
@cas_error ->
if Keyword.get(opts, :retry, true) do
cas(server, key, update)
else
@cas_error
end
err ->
err
end
end
@doc """
Sets the key to value if the key doesn't exist already. Returns
`{:error, "Key exists"}` if the given key already exists.
Accepted options: `:cas`, `:ttl`
"""
@spec add(GenServer.server(), binary, binary, Keyword.t()) :: store_result
def add(server, key, value, opts \\ []) do
server_options = get_server_options(server)
execute_kv(
server,
:ADD,
[key, value, ttl_or_default(server_options, opts)],
opts,
server_options
)
end
@doc """
Sets the key to value if the key already exists. Returns `{:error,
"Key not found"}` if the given key doesn't exist.
Accepted options: `:cas`, `:ttl`
"""
@spec replace(GenServer.server(), binary, binary, Keyword.t()) :: store_result
def replace(server, key, value, opts \\ []) do
replace_cas(server, key, value, 0, opts)
end
@doc """
Sets the key to value if the key already exists and has CAS value
equal to the provided value.
Accepted options: `:cas`, `:ttl`
"""
@spec replace_cas(GenServer.server(), binary, binary, integer, Keyword.t()) :: store_result
def replace_cas(server, key, value, cas, opts \\ []) do
server_options = get_server_options(server)
execute_kv(
server,
:REPLACE,
[key, value, cas, ttl_or_default(server_options, opts)],
opts,
server_options
)
end
@doc """
Removes the item with the given key value. Returns `{:error, "Key
not found"}` if the given key is not found
"""
@spec delete(GenServer.server(), binary) :: store_result
def delete(server, key) do
execute_k(server, :DELETE, [key])
end
@doc """
Removes the item with the given key value if the CAS value is equal
to the provided value
"""
@spec delete_cas(GenServer.server(), binary, integer) :: store_result
def delete_cas(server, key, cas) do
execute_k(server, :DELETE, [key, cas])
end
@doc """
Flush all the items in the server. `ttl` option will cause the flush
to be delayed by the specified time.
Accepted options: `:ttl`
"""
@spec flush(GenServer.server(), Keyword.t()) :: store_result
def flush(server, opts \\ []) do
execute(server, :FLUSH, [Keyword.get(opts, :ttl, 0)])
end
@doc """
Appends the value to the end of the current value of the
key. Returns `{:error, "Item not stored"}` if the item is not present
in the server already
Accepted options: `:cas`
"""
@spec append(GenServer.server(), binary, binary, Keyword.t()) :: store_result
def append(server, key, value, opts \\ []) do
execute_kv(server, :APPEND, [key, value], opts)
end
@doc """
Appends the value to the end of the current value of the
key if the CAS value is equal to the provided value
Accepted options: `:cas`
"""
@spec append_cas(GenServer.server(), binary, binary, integer, Keyword.t()) :: store_result
def append_cas(server, key, value, cas, opts \\ []) do
execute_kv(server, :APPEND, [key, value, cas], opts)
end
@doc """
Prepends the value to the start of the current value of the
key. Returns `{:error, "Item not stored"}` if the item is not present
in the server already
Accepted options: `:cas`
"""
@spec prepend(GenServer.server(), binary, binary, Keyword.t()) :: store_result
def prepend(server, key, value, opts \\ []) do
execute_kv(server, :PREPEND, [key, value], opts)
end
@doc """
Prepends the value to the start of the current value of the
key if the CAS value is equal to the provided value
Accepted options: `:cas`
"""
@spec prepend_cas(GenServer.server(), binary, binary, integer, Keyword.t()) :: store_result
def prepend_cas(server, key, value, cas, opts \\ []) do
execute_kv(server, :PREPEND, [key, value, cas], opts)
end
@doc """
Increments the current value. Only integer value can be
incremented. Returns `{:error, "Incr/Decr on non-numeric value"}` if
the value stored in the server is not numeric.
## Options
* `:by` - (integer) The amount to add to the existing
value. Defaults to `1`.
* `:default` - (integer) Default value to use in case the key is not
found. Defaults to `0`.
other options: `:cas`, `:ttl`
"""
@spec incr(GenServer.server(), binary, Keyword.t()) :: fetch_integer_result
def incr(server, key, opts \\ []) do
incr_cas(server, key, 0, opts)
end
@doc """
Increments the current value if the CAS value is equal to the
provided value.
## Options
* `:by` - (integer) The amount to add to the existing
value. Defaults to `1`.
* `:default` - (integer) Default value to use in case the key is not
found. Defaults to `0`.
other options: `:cas`, `:ttl`
"""
@spec incr_cas(GenServer.server(), binary, integer, Keyword.t()) :: fetch_integer_result
def incr_cas(server, key, cas, opts \\ []) do
defaults = [by: 1, default: 0]
opts = Keyword.merge(defaults, opts)
server_options = get_server_options(server)
execute_k(
server,
:INCREMENT,
[
key,
Keyword.get(opts, :by),
Keyword.get(opts, :default),
cas,
ttl_or_default(server_options, opts)
],
opts,
server_options
)
end
@doc """
Decremens the current value. Only integer value can be
decremented. Returns `{:error, "Incr/Decr on non-numeric value"}` if
the value stored in the server is not numeric.
## Options
* `:by` - (integer) The amount to add to the existing
value. Defaults to `1`.
* `:default` - (integer) Default value to use in case the key is not
found. Defaults to `0`.
other options: `:cas`, `:ttl`
"""
@spec decr(GenServer.server(), binary, Keyword.t()) :: fetch_integer_result
def decr(server, key, opts \\ []) do
decr_cas(server, key, 0, opts)
end
@doc """
Decrements the current value if the CAS value is equal to the
provided value.
## Options
* `:by` - (integer) The amount to add to the existing
value. Defaults to `1`.
* `:default` - (integer) Default value to use in case the key is not
found. Defaults to `0`.
other options: `:cas`, `:ttl`
"""
@spec decr_cas(GenServer.server(), binary, integer, Keyword.t()) :: fetch_integer_result
def decr_cas(server, key, cas, opts \\ []) do
defaults = [by: 1, default: 0]
opts = Keyword.merge(defaults, opts)
server_options = get_server_options(server)
execute_k(
server,
:DECREMENT,
[
key,
Keyword.get(opts, :by),
Keyword.get(opts, :default),
cas,
ttl_or_default(server_options, opts)
],
opts,
server_options
)
end
@doc """
Gets the default set of server statistics
"""
@spec stat(GenServer.server()) :: {:ok, map} | error
def stat(server) do
execute(server, :STAT, [])
end
@doc """
Gets the specific set of server statistics
"""
@spec stat(GenServer.server(), String.t()) :: {:ok, map} | error
def stat(server, key) do
execute(server, :STAT, [key])
end
@doc """
Gets the version of the server
"""
@spec version(GenServer.server()) :: String.t() | error
def version(server) do
execute(server, :VERSION, [])
end
@doc """
Sends a noop command
"""
@spec noop(GenServer.server()) :: {:ok} | error
def noop(server) do
execute(server, :NOOP, [])
end
## Private
defp get_server_options(server) do
Registry.lookup(server)
end
defp normalize_coder(spec) when is_tuple(spec), do: spec
defp normalize_coder(module) when is_atom(module), do: {module, []}
defp encode(server_options, value) do
coder = server_options.coder
apply(elem(coder, 0), :encode, [value, elem(coder, 1)])
end
defp decode(server_options, value) do
coder = server_options.coder
apply(elem(coder, 0), :decode, [value, elem(coder, 1)])
end
defp decode_response({:ok, value}, server_options) when is_binary(value) do
{:ok, decode(server_options, value)}
end
defp decode_response({:ok, value, cas}, server_options) when is_binary(value) do
{:ok, decode(server_options, value), cas}
end
defp decode_response(rest, _server_options), do: rest
defp decode_multi_response({:ok, values}, server_options) when is_list(values) do
{:ok, Enum.map(values, &decode_response(&1, server_options))}
end
defp decode_multi_response(rest, _server_options), do: rest
defp ttl_or_default(server_options, opts) do
if Keyword.has_key?(opts, :ttl) do
opts[:ttl]
else
server_options.ttl
end
end
# This takes care of both namespacing and key coding.
defp key_with_namespace(server_options, key) do
key =
case server_options.namespace do
nil -> key
namespace -> "#{namespace}:#{key}"
end
case server_options.key_coder do
{module, function} -> apply(module, function, [key])
_ -> key
end
end
defp execute_k(server, command, args, opts \\ []),
do: execute_k(server, command, args, opts, get_server_options(server))
defp execute_k(server, command, [key | rest], opts, server_options) do
server
|> execute(command, [key_with_namespace(server_options, key) | rest], opts)
|> decode_response(server_options)
end
defp execute_kv(server, command, args, opts),
do: execute_kv(server, command, args, opts, get_server_options(server))
defp execute_kv(server, command, [key | [value | rest]], opts, server_options) do
server
|> execute(
command,
[key_with_namespace(server_options, key) | [encode(server_options, value) | rest]],
opts
)
|> decode_response(server_options)
end
defp execute(server, command, args, opts \\ []) do
Connection.execute(server, command, args, opts)
end
defp execute_quiet_k(server, commands),
do: execute_quiet_k(server, commands, get_server_options(server))
defp execute_quiet_k(server, commands, server_options) do
commands =
Enum.map(commands, fn {command, [key | rest], opts} ->
{command, [key_with_namespace(server_options, key) | rest], opts}
end)
server
|> execute_quiet(commands)
|> decode_multi_response(server_options)
end
defp execute_quiet_kv(server, commands, server_options) do
commands =
Enum.map(commands, fn {command, [key | [value | rest]], opts} ->
{command,
[key_with_namespace(server_options, key) | [encode(server_options, value) | rest]], opts}
end)
server
|> execute_quiet(commands)
|> decode_multi_response(server_options)
end
defp execute_quiet(server, commands) do
Connection.execute_quiet(server, commands)
end
end
|
lib/memcache.ex
| 0.929103 | 0.610279 |
memcache.ex
|
starcoder
|
defmodule ExIntercom.User do
@moduledoc """
Users are the primary way of interacting with the ExIntercom API. If you know a
user's ID you can easily fetch their data.
```elixir
{:ok, %ExIntercom.User{} = user} = ExIntercom.User.get("530370b477ad7120001d")
```
You can also look them up using the `user_id` your system assigned to them
when creating the user record, or alternatively, via their email address.
```elixir
{:ok, %ExIntercom.User{}} = ExIntercom.User.find({:user_id, 25})
{:ok, %ExIntercom.User{}} = ExIntercom.User.find({:email, "<EMAIL>"})
```
If the user cannot be found you will get `{:error, :not_found}`. If your
token doesn't have sufficient permissions to access a resource the return
value will be `{:error, :not_authorised}`.
"""
use Ecto.Schema
import Ecto.Changeset
alias ExIntercom.{Location, Avatar, Company, SocialProfile, Segment,
Tag, Request, HTTP}
alias __MODULE__
@path "/users"
@primary_key false
embedded_schema do
field :type, :string
field :id, :string
field :user_id, :string
field :email, :string
field :phone, :string
field :name, :string
field :updated_at, :integer
field :last_seen_ip, :string
field :unsubscribed_from_emails, :boolean
field :last_request_at, :integer
field :signed_up_at, :integer
field :created_at, :integer
field :session_count, :integer
field :user_agent_data, :string
field :pseudonym, :string
field :anonymous, :boolean
embeds_one :location, Location
embeds_one :avatar, Avatar
embeds_many :companies, Company
embeds_many :social_profiles, SocialProfile
embeds_many :segments, Segment
embeds_many :tags, Tag
end
@type t :: %__MODULE__{}
@type result :: {:ok, User.t} | {:error, :not_found} |
{:error, :not_authorised} | {:error, any}
@doc """
Fetches a user by their ExIntercom ID.
"""
@spec get(String.t) :: result
def get(id) when is_binary(id) do
"#{@path}/#{id}"
|> Request.build
|> HTTP.get
|> case do
{:ok, map} -> parse(map)
{:error, {:http, 404}} -> {:error, :not_found}
{:error, {:http, 401}} -> {:error, :not_authorised}
{:error, _} = err -> err
end
end
@doc """
Look up a user by their assigned user ID or email address.
"""
@spec find({:user_id | :email, String.t}) :: result
def find({:user_id, user_id}) when is_binary(user_id) do
@path
|> Request.build(%{user_id: user_id})
|> HTTP.get
|> case do
{:ok, map} -> parse(map)
{:error, {:http, 404}} -> {:error, :not_found}
{:error, {:http, 401}} -> {:error, :not_authorised}
{:error, _} = err -> err
end
end
def find({:email, email}) do
@path
|> Request.build(%{email: email})
|> HTTP.get
|> case do
{:ok, map} -> parse(map)
{:error, {:http, 404}} -> {:error, :not_found}
{:error, {:http, 401}} -> {:error, :not_authorised}
{:error, _} = err -> err
end
end
@doc false
@spec changeset(User.t, map) :: Ecto.Changeset.t
def changeset(%User{} = user, %{} = changes) do
user
|> cast(changes, __schema__(:fields) -- __schema__(:embeds))
|> cast_embed(:location, with: &Location.changeset/2)
|> cast_embed(:avatar, with: &Avatar.changeset/2)
|> cast_embed(:social_profiles, with: &SocialProfile.changeset/2)
|> cast_embed(:companies, with: &Company.changeset/2)
|> cast_embed(:segments, with: &Segment.changeset/2)
|> cast_embed(:tags, with: &Tag.changeset/2)
end
@doc false
@spec parse(map) :: result
def parse(%{} = user) do
{loc, user} = Map.pop(user, :location_data)
{%{social_profiles: profs}, user} = Map.pop(user, :social_profiles)
{%{companies: comps}, user} = Map.pop(user, :companies)
{%{segments: segs}, user} = Map.pop(user, :segments)
{%{tags: tags}, user} = Map.pop(user, :tags)
user =
user
|> Map.put(:location, loc)
|> Map.put(:social_profiles, profs)
|> Map.put(:companies, comps)
|> Map.put(:segments, segs)
|> Map.put(:tags, tags)
case changeset(%User{}, user) do
%Ecto.Changeset{valid?: true} = changes ->
{:ok, apply_changes(changes)}
%Ecto.Changeset{valid?: false} = changes ->
{:error, changes}
end
end
end
|
lib/intercom/user.ex
| 0.809012 | 0.737229 |
user.ex
|
starcoder
|
defmodule Tesla.Multipart do
@moduledoc """
Multipart functionality.
## Example
```
mp =
Multipart.new()
|> Multipart.add_content_type_param("charset=utf-8")
|> Multipart.add_field("field1", "foo")
|> Multipart.add_field("field2", "bar",
headers: [{"content-id", "1"}, {"content-type", "text/plain"}]
)
|> Multipart.add_file("test/tesla/multipart_test_file.sh")
|> Multipart.add_file("test/tesla/multipart_test_file.sh", name: "foobar")
|> Multipart.add_file_content("sample file content", "sample.txt")
response = client.post(url, mp)
```
"""
defmodule Part do
@moduledoc false
defstruct body: nil,
dispositions: [],
headers: []
@type t :: %__MODULE__{
body: String.t(),
headers: Tesla.Env.headers(),
dispositions: Keyword.t()
}
end
@type part_stream :: Enum.t()
@type part_value :: iodata | part_stream
defstruct parts: [],
boundary: nil,
content_type_params: []
@type t :: %__MODULE__{
parts: list(Tesla.Multipart.Part.t()),
boundary: String.t(),
content_type_params: [String.t()]
}
@doc """
Create a new Multipart struct to be used for a request body.
"""
@spec new() :: t
def new do
%__MODULE__{boundary: unique_string()}
end
@doc """
Add a parameter to the multipart content-type.
"""
@spec add_content_type_param(t, String.t()) :: t
def add_content_type_param(%__MODULE__{} = mp, param) do
%{mp | content_type_params: mp.content_type_params ++ [param]}
end
@doc """
Add a field part.
"""
@spec add_field(t, String.t(), part_value, Keyword.t()) :: t
def add_field(%__MODULE__{} = mp, name, value, opts \\ []) do
:ok = assert_part_value!(value)
{headers, opts} = Keyword.pop_first(opts, :headers, [])
part = %Part{
body: value,
headers: headers,
dispositions: [{:name, name}] ++ opts
}
%{mp | parts: mp.parts ++ [part]}
end
@doc """
Add a file part. The file will be streamed.
## Options
- `:name` - name of form param
- `:filename` - filename (defaults to path basename)
- `:headers` - additional headers
- `:detect_content_type` - auto-detect file content-type (defaults to false)
"""
@spec add_file(t, String.t(), Keyword.t()) :: t
def add_file(%__MODULE__{} = mp, path, opts \\ []) do
{filename, opts} = Keyword.pop_first(opts, :filename, Path.basename(path))
{headers, opts} = Keyword.pop_first(opts, :headers, [])
{detect_content_type, opts} = Keyword.pop_first(opts, :detect_content_type, false)
# add in detected content-type if necessary
headers =
case detect_content_type do
true -> List.keystore(headers, "content-type", 0, {"content-type", MIME.from_path(path)})
false -> headers
end
data = File.stream!(path, [], 2048)
add_file_content(mp, data, filename, opts ++ [headers: headers])
end
@doc """
Add a file part with value.
Same of `add_file/3` but the file content is read from `data` input argument.
## Options
- `:name` - name of form param
- `:headers` - additional headers
"""
@spec add_file_content(t, part_value, String.t(), Keyword.t()) :: t
def add_file_content(%__MODULE__{} = mp, data, filename, opts \\ []) do
{name, opts} = Keyword.pop_first(opts, :name, "file")
add_field(mp, name, data, opts ++ [filename: filename])
end
@doc false
@spec headers(t) :: Tesla.Env.headers()
def headers(%__MODULE__{boundary: boundary, content_type_params: params}) do
ct_params = (["boundary=#{boundary}"] ++ params) |> Enum.join("; ")
[{"content-type", "multipart/form-data; #{ct_params}"}]
end
@doc false
@spec body(t) :: part_stream
def body(%__MODULE__{boundary: boundary, parts: parts}) do
part_streams = Enum.map(parts, &part_as_stream(&1, boundary))
Stream.concat(part_streams ++ [["--#{boundary}--\r\n"]])
end
@doc false
@spec part_as_stream(Part.t(), String.t()) :: part_stream
def part_as_stream(
%Part{body: body, dispositions: dispositions, headers: part_headers},
boundary
) do
part_headers = Enum.map(part_headers, fn {k, v} -> "#{k}: #{v}\r\n" end)
part_headers = part_headers ++ [part_headers_for_disposition(dispositions)]
enum_body =
case body do
b when is_binary(b) -> [b]
b -> b
end
Stream.concat([
["--#{boundary}\r\n"],
part_headers,
["\r\n"],
enum_body,
["\r\n"]
])
end
@doc false
@spec part_headers_for_disposition(Keyword.t()) :: [String.t()]
def part_headers_for_disposition([]), do: []
def part_headers_for_disposition(kvs) do
ds =
kvs
|> Enum.map(fn {k, v} -> "#{k}=\"#{v}\"" end)
|> Enum.join("; ")
["content-disposition: form-data; #{ds}\r\n"]
end
@spec unique_string() :: String.t()
defp unique_string() do
16
|> :crypto.strong_rand_bytes()
|> Base.encode16(case: :lower)
end
@spec assert_part_value!(any) :: :ok | no_return
defp assert_part_value!(%maybe_stream{})
when maybe_stream in [IO.Stream, File.Stream, Stream],
do: :ok
defp assert_part_value!(value)
when is_list(value)
when is_binary(value),
do: :ok
defp assert_part_value!(val) do
raise(ArgumentError, "#{inspect(val)} is not a supported multipart value.")
end
end
|
lib/tesla/multipart.ex
| 0.890509 | 0.571079 |
multipart.ex
|
starcoder
|
defmodule Jamdb.Oracle do
@vsn "0.3.7"
@moduledoc """
Adapter module for Oracle. `DBConnection` behaviour implementation.
It uses `jamdb_oracle` for communicating to the database.
"""
use DBConnection
defstruct [:pid, :mode, :cursors]
@doc """
Starts and links to a database connection process.
See [`Ecto.Adapters.Jamdb.Oracle`](Ecto.Adapters.Jamdb.Oracle.html#module-connection-options).
By default the `DBConnection` starts a pool with a single connection.
The size of the pool can be increased with `:pool_size`. The ping interval
to validate an idle connection can be given with the `:idle_interval` option.
"""
@spec start_link(opts :: Keyword.t) ::
{:ok, pid()} | {:error, any()}
def start_link(opts) do
DBConnection.start_link(Jamdb.Oracle, opts)
end
@doc """
Runs the SQL statement.
See `DBConnection.prepare_execute/4`.
In case of success, it must return an `:ok` tuple containing
a map with at least two keys:
* `:num_rows` - the number of rows affected
* `:rows` - the result set as a list
"""
@spec query(conn :: any(), sql :: any(), params :: any()) ::
{:ok, any()} | {:error | :disconnect, any()}
def query(conn, sql, params \\ [])
def query(pid, sql, params) when is_pid(pid), do: query(%{pid: pid}, sql, params)
def query(%{pid: pid}, sql, params) do
case :jamdb_oracle.sql_query(pid, stmt(sql, params)) do
{:ok, [{:result_set, columns, _, rows}]} ->
{:ok, %{num_rows: length(rows), rows: rows, columns: columns}}
{:ok, [{:fetched_rows, _, _, _} = result]} -> {:cont, result}
{:ok, [{:proc_result, 0, rows}]} -> {:ok, %{num_rows: length(rows), rows: rows}}
{:ok, [{:proc_result, _, msg}]} -> {:error, msg}
{:ok, [{:affected_rows, num_rows}]} -> {:ok, %{num_rows: num_rows, rows: nil}}
{:ok, result} -> {:ok, result}
{:error, _, err} -> {:disconnect, err}
end
end
defp stmt({:fetch, sql, params}, _), do: {:fetch, sql, params}
defp stmt({:fetch, cursor, row_format, last_row}, _), do: {:fetch, cursor, row_format, last_row}
defp stmt({:batch, sql, params}, _), do: {:batch, sql, params}
defp stmt(sql, params), do: {sql, params}
@impl true
def connect(opts) do
database = Keyword.fetch!(opts, :database) |> to_charlist
env = if( hd(database) == ?:, do: [sid: tl(database)], else: [service_name: database] )
|> Keyword.put_new(:host, Keyword.fetch!(opts, :hostname) |> to_charlist)
|> Keyword.put_new(:port, Keyword.fetch!(opts, :port))
|> Keyword.put_new(:user, Keyword.fetch!(opts, :username) |> to_charlist)
|> Keyword.put_new(:password, Keyword.fetch!(opts, :password) |> to_charlist)
|> Keyword.put_new(:timeout, Keyword.fetch!(opts, :timeout))
params = if( Keyword.has_key?(opts, :parameters) == true,
do: opts[:parameters], else: [] )
sock_opts = if( Keyword.has_key?(opts, :socket_options) == true,
do: [socket_options: opts[:socket_options]], else: [] )
case :jamdb_oracle.start_link(sock_opts ++ params ++ env) do
{:ok, pid} -> {:ok, %Jamdb.Oracle{pid: pid, mode: :idle}}
{:error, [{:proc_result, _, msg}]} -> {:error, error!(msg)}
{:error, err} -> {:error, error!(err)}
end
end
@impl true
def disconnect(_err, %{pid: pid}) do
:jamdb_oracle.stop(pid)
end
@impl true
def handle_execute(%{batch: true} = query, params, opts, s) do
%Jamdb.Oracle.Query{statement: statement} = query
case query(s, {:batch, statement |> to_charlist, params}, []) do
{:ok, result} -> {:ok, query, result, s}
{:error, err} -> {:error, error!(err), s}
{:disconnect, err} -> {:disconnect, error!(err), s}
end
end
def handle_execute(query, params, opts, s) do
%Jamdb.Oracle.Query{statement: statement} = query
returning = Enum.map(Keyword.get(opts, :out, []), fn elem -> {:out, elem} end)
case query(s, statement |> to_charlist, Enum.concat(params, returning)) do
{:ok, result} -> {:ok, query, result, s}
{:error, err} -> {:error, error!(err), s}
{:disconnect, err} -> {:disconnect, error!(err), s}
end
end
@impl true
def handle_prepare(query, _opts, s) do
{:ok, query, s}
end
@impl true
def handle_begin(opts, %{mode: mode} = s) do
case Keyword.get(opts, :mode, :transaction) do
:transaction when mode == :idle ->
statement = "SAVEPOINT tran"
handle_transaction(statement, opts, %{s | mode: :transaction})
:savepoint when mode == :transaction ->
statement = "SAVEPOINT " <> Keyword.get(opts, :name, "svpt")
handle_transaction(statement, opts, %{s | mode: :transaction})
status when status in [:transaction, :savepoint] ->
{status, s}
end
end
@impl true
def handle_commit(opts, %{mode: mode} = s) do
case Keyword.get(opts, :mode, :transaction) do
:transaction when mode == :transaction ->
statement = "COMMIT"
handle_transaction(statement, opts, %{s | mode: :idle})
:savepoint when mode == :transaction ->
{:ok, [], %{s | mode: :transaction}}
status when status in [:transaction, :savepoint] ->
{status, s}
end
end
@impl true
def handle_rollback(opts, %{mode: mode} = s) do
case Keyword.get(opts, :mode, :transaction) do
:transaction when mode in [:transaction, :error] ->
statement = "ROLLBACK TO tran"
handle_transaction(statement, opts, %{s | mode: :idle})
:savepoint when mode in [:transaction, :error] ->
statement = "ROLLBACK TO " <> Keyword.get(opts, :name, "svpt")
handle_transaction(statement, opts, %{s | mode: :transaction})
status when status in [:transaction, :savepoint] ->
{status, s}
end
end
defp handle_transaction(statement, _opts, s) do
case query(s, statement |> to_charlist) do
{:ok, result} -> {:ok, result, s}
{:error, err} -> {:error, error!(err), s}
{:disconnect, err} -> {:disconnect, error!(err), s}
end
end
@impl true
def handle_declare(query, params, _opts, s) do
{:ok, query, %{params: params}, s}
end
@impl true
def handle_fetch(query, %{params: params}, _opts, %{cursors: nil} = s) do
%Jamdb.Oracle.Query{statement: statement} = query
case query(s, {:fetch, statement |> to_charlist, params}) do
{:cont, {_, cursor, row_format, rows}} ->
cursors = %{cursor: cursor, row_format: row_format, last_row: List.last(rows)}
{:cont, %{num_rows: length(rows), rows: rows}, %{s | cursors: cursors}}
{:ok, result} ->
{:halt, result, s}
{:error, err} -> {:error, error!(err), s}
{:disconnect, err} -> {:disconnect, error!(err), s}
end
end
def handle_fetch(_query, _cursor, _opts, %{cursors: cursors} = s) do
%{cursor: cursor, row_format: row_format, last_row: last_row} = cursors
case query(s, {:fetch, cursor, row_format, last_row}) do
{:cont, {_, _, _, rows}} ->
rows = tl(rows)
{:cont, %{num_rows: length(rows), rows: rows},
%{s | cursors: %{cursors | last_row: List.last(rows)}}}
{:ok, %{rows: rows} = result} ->
rows = tl(rows)
{:halt, %{result | num_rows: length(rows), rows: rows}, s}
{:error, err} -> {:error, error!(err), s}
{:disconnect, err} -> {:disconnect, error!(err), s}
end
end
@impl true
def handle_deallocate(_query, _cursor, _opts, s) do
{:ok, nil, %{s | cursors: nil}}
end
@impl true
def handle_close(_query, _opts, s) do
{:ok, nil, s}
end
@impl true
def handle_status(_opts, %{mode: mode} = s) do
{mode, s}
end
@impl true
def checkin(s) do
{:ok, s}
end
@impl true
def checkout(s) do
case query(s, 'SESSION') do
{:ok, _} -> {:ok, s}
{:error, err} -> {:disconnect, error!(err), s}
end
end
@impl true
def ping(%{mode: :idle} = s) do
case query(s, 'PING') do
{:ok, _} -> {:ok, s}
{:error, err} -> {:disconnect, error!(err), s}
{:disconnect, err} -> {:disconnect, error!(err), s}
end
end
def ping(%{mode: :transaction} = s) do
{:ok, s}
end
defp error!(msg) do
DBConnection.ConnectionError.exception("#{inspect msg}")
end
@doc """
Returns the configured JSON library.
To customize the JSON library, include the following in your `config/config.exs`:
config :jamdb_oracle, :json_library, SomeJSONModule
Defaults to [`Jason`](https://hexdocs.pm/jason)
"""
@spec json_library() :: module()
def json_library() do
Application.get_env(:jamdb_oracle, :json_library, Jason)
end
end
defimpl DBConnection.Query, for: Jamdb.Oracle.Query do
def parse(query, _), do: query
def describe(query, _), do: query
def decode(_, %{rows: []} = result, _), do: result
def decode(_, %{rows: rows} = result, opts) when rows != nil,
do: %{result | rows: Enum.map(rows, fn row -> decode(row, opts[:decode_mapper]) end)}
def decode(_, result, _), do: result
defp decode(row, nil), do: Enum.map(row, fn elem -> decode(elem) end)
defp decode(row, mapper), do: mapper.(decode(row, nil))
defp decode(:null), do: nil
defp decode({elem}) when is_number(elem), do: elem
defp decode({date, time}) when is_tuple(date), do: to_naive({date, time})
defp decode({date, time, _}) when is_tuple(date), do: to_utc({date, time})
defp decode(elem) when is_list(elem), do: to_binary(elem)
defp decode(elem), do: elem
def encode(_, [], _), do: []
def encode(_, params, opts) do
charset = if( Keyword.has_key?(opts, :charset) == true,
do: String.starts_with?(Atom.to_string(opts[:charset]), ["al16","ja16","ko16","zht16","zhs16"]), else: false )
Enum.map(params, fn elem -> encode(elem, charset) end)
end
defp encode(nil, _), do: :null
defp encode(true, _), do: "1"
defp encode(false, _), do: "0"
defp encode(%Decimal{} = decimal, _), do: Decimal.to_float(decimal)
defp encode(%DateTime{} = datetime, _), do: NaiveDateTime.to_erl(DateTime.to_naive(datetime))
defp encode(%NaiveDateTime{} = naive, _), do: NaiveDateTime.to_erl(naive)
defp encode(%Ecto.Query.Tagged{value: elem}, _), do: elem
defp encode(elem, false) when is_binary(elem), do: elem |> to_charlist
defp encode(elem, charset) when is_map(elem),
do: encode(Jamdb.Oracle.json_library().encode!(elem), charset)
defp encode(elem, _), do: elem
defp expr(list) when is_list(list) do
Enum.map(list, fn
:null -> nil
elem -> elem
end)
end
defp to_binary(list) when is_list(list) do
try do
:binary.list_to_bin(list)
rescue
ArgumentError ->
Enum.map(expr(list), fn
elem when is_list(elem) -> expr(elem)
other -> other
end) |> Enum.join
end
end
defp to_naive({date, {hour, min, sec}}) when is_integer(sec),
do: NaiveDateTime.from_erl!({date, {hour, min, sec}})
defp to_naive({date, {hour, min, sec}}),
do: NaiveDateTime.from_erl!({date, {hour, min, trunc(sec)}}, parse_sec(sec))
defp to_utc({date, time}),
do: DateTime.from_naive!(to_naive({date, time}), "Etc/UTC")
defp parse_sec(sec),
do: {trunc((sec - trunc(sec)) * 1000000) , 6}
end
|
lib/jamdb_oracle.ex
| 0.799168 | 0.430088 |
jamdb_oracle.ex
|
starcoder
|
defmodule Engine.Ethereum.Event.Listener do
@moduledoc """
GenServer running the listener.
Periodically fetches events made on dynamically changing block range
from the root chain contract and feeds them to a callback.
It is **not** responsible for figuring out which ranges of Ethereum blocks are eligible to scan and when, see
`Coordinator` for that.
The `Coordinator` provides the `SyncGuide` that indicates what's eligible to scan, taking into account:
- finality margin
- mutual ordering and dependencies of various types of Ethereum events to be respected.
It **is** responsible for processing all events from all blocks and processing them only once.
It accomplishes that by keeping a persisted value in `OMG.DB` and its state that reflects till which Ethereum height
the events were processed (`synced_height`).
This `synced_height` is updated after every batch of Ethereum events get successfully consumed by
`callbacks.process_events_callback`, as called in `sync_height/2`, together with all the `OMG.DB` updates this
callback returns, atomically.
The key in `PG` used to persist `synced_height` is defined by the value of `service_name`.
What specific Ethereum events it fetches, and what it does with them is up to predefined `callbacks`.
See `Listener.Core` for the implementation of the business logic for the listener.
"""
use GenServer
alias Engine.DB.ListenerState
alias Engine.Ethereum.Event.Coordinator
alias Engine.Ethereum.Event.Listener.Core
alias Engine.Ethereum.Event.Listener.Storage
alias Engine.Ethereum.RootChain.Event
require Logger
### Client
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(opts) do
name = Keyword.fetch!(opts, :service_name)
GenServer.start_link(__MODULE__, opts, name: name)
end
@doc """
Returns child_specs for the given `Listener` setup, to be included e.g. in Supervisor's children.
See `handle_continue/2` for the required keyword arguments.
"""
@spec prepare_child(keyword()) :: Supervisor.child_spec()
def prepare_child(opts) do
name = Keyword.fetch!(opts, :service_name)
%{id: name, start: {__MODULE__, :start_link, [opts]}, shutdown: :brutal_kill, type: :worker}
end
### Server
@doc """
Initializes the GenServer state, most work done in `handle_continue/2`.
"""
def init(opts) do
{:ok, opts, {:continue, :setup}}
end
@doc """
Reads the status of listening (till which Ethereum height were the events processed) from the storage and initializes
the logic `Listener.Core` with it. Does an initial `Coordinator.check_in` with the
Ethereum height it last stopped on. Next, it continues to monitor and fetch the events as usual.
"""
def handle_continue(:setup, opts) do
contract_deployment_height = Keyword.fetch!(opts, :contract_deployment_height)
service_name = Keyword.fetch!(opts, :service_name)
get_events_callback = Keyword.fetch!(opts, :get_events_callback)
process_events_callback = Keyword.fetch!(opts, :process_events_callback)
metrics_collection_interval = Keyword.fetch!(opts, :metrics_collection_interval)
ets = Keyword.fetch!(opts, :ets)
_ = Logger.info("Starting #{inspect(__MODULE__)} for #{service_name}.")
# we don't need to ever look at earlier than contract deployment
last_event_block_height =
max_of_three(
Storage.get_local_synced_height(service_name, ets),
contract_deployment_height,
ListenerState.get_height(service_name)
)
request_max_size = 1000
state =
Core.init(
service_name,
last_event_block_height,
request_max_size,
ets
)
callbacks = %{
get_events_callback: get_events_callback,
process_events_callback: process_events_callback
}
:ok = Bus.subscribe({:root_chain, "ethereum_new_height"}, link: true)
:ok = Coordinator.check_in(state.synced_height, service_name)
{:ok, _} = :timer.send_interval(metrics_collection_interval, self(), :send_metrics)
_ = Logger.info("Started #{inspect(__MODULE__)} for #{service_name}, synced_height: #{state.synced_height}")
{:noreply, {state, callbacks}}
end
def handle_info(:send_metrics, {state, callbacks}) do
:ok = :telemetry.execute([:process, __MODULE__], %{}, state)
{:noreply, {state, callbacks}}
end
@doc """
Main worker function, called on a cadence as initialized in `handle_continue/2`.
The cadence is every change of ethereum height, notified via Bus.
Does the following:
- asks `Coordinator` about how to sync, with respect to other services listening to Ethereum
- (`sync_height/2`) figures out what is the suitable range of Ethereum blocks to download events for
- (`sync_height/2`) if necessary fetches those events to the in-memory cache in `Listener.Core`
- (`sync_height/2`) executes the related event-consuming callback with events as arguments
- (`sync_height/2`) does `OMG.DB` updates that persist the processes Ethereum height as well as whatever the
callbacks returned to persist
- (`sync_height/2`) `Coordinator.check_in` to tell the rest what Ethereum height was processed.
"""
def handle_info({:internal_event_bus, :ethereum_new_height, _new_height}, {state, callbacks}) do
case Coordinator.get_sync_info() do
:nosync ->
:ok = Coordinator.check_in(state.synced_height, state.service_name)
{:noreply, {state, callbacks}}
sync_info ->
new_state = sync_height(state, callbacks, sync_info)
{:noreply, {new_state, callbacks}}
end
end
def handle_cast(:sync, {state, callbacks}) do
case Coordinator.get_sync_info() do
:nosync ->
:ok = Coordinator.check_in(state.synced_height, state.service_name)
{:noreply, {state, callbacks}}
sync_info ->
new_state = sync_height(state, callbacks, sync_info)
{:noreply, {new_state, callbacks}}
end
end
defp sync_height(state, callbacks, sync_guide) do
{events, new_state} =
state
|> Core.calc_events_range_set_height(sync_guide)
|> get_events(callbacks.get_events_callback)
# process_events_callback sorts persistence!
{:ok, _} = callbacks.process_events_callback.(events, state.service_name)
:ok = :telemetry.execute([:process, __MODULE__], %{events: events}, new_state)
:ok = publish_events(events)
:ok = Storage.update_synced_height(new_state.service_name, new_state.synced_height, new_state.ets)
:ok = Coordinator.check_in(new_state.synced_height, state.service_name)
new_state
end
defp get_events({{from, to}, state}, get_events_callback) do
{:ok, new_events} = get_events_callback.(from, to)
{new_events, state}
end
defp get_events({:dont_fetch_events, state}, _callback) do
{[], state}
end
@spec publish_events(list(Event.t())) :: :ok
defp publish_events([%{event_signature: event_signature} | _] = data) do
[event_signature, _] = String.split(event_signature, "(")
{:root_chain, event_signature}
|> Bus.Event.new(:data, data)
|> Bus.local_broadcast()
end
defp publish_events([]), do: :ok
# the guard are here to protect us from number to term comparison
defp max_of_three(a, b, c) when is_number(a) and is_number(b) and is_number(c) do
a
|> max(b)
|> max(c)
end
end
|
apps/engine/lib/engine/ethereum/event/listener.ex
| 0.85405 | 0.468365 |
listener.ex
|
starcoder
|
defmodule Zaryn.Mining.TransactionContext do
@moduledoc """
Gathering of the necessary information for the transaction validation:
- previous transaction
- unspent outputs
"""
alias Zaryn.Crypto
alias Zaryn.P2P
alias Zaryn.P2P.Node
alias __MODULE__.DataFetcher
alias __MODULE__.NodeDistribution
alias Zaryn.Replication
alias Zaryn.TransactionChain.Transaction
alias Zaryn.TransactionChain.Transaction.ValidationStamp.LedgerOperations.UnspentOutput
require Logger
@doc """
Request concurrently the context of the transaction including the previous transaction,
the unspent outputs and P2P view for the storage nodes and validation nodes
as long as the involved nodes for the retrieval
"""
@spec get(
previous_tx_address :: binary(),
chain_storage_node_public_keys :: list(Crypto.key()),
beacon_storage_nodes_public_keys :: list(Crypto.key()),
validation_node_public_keys :: list(Crypto.key()),
unspent_outputs_confirmation? :: boolean()
) ::
{Transaction.t(), list(UnspentOutput.t()), list(Node.t()), bitstring(), bitstring(),
bitstring()}
def get(
previous_address,
chain_storage_node_public_keys,
beacon_storage_nodes_public_keys,
validation_node_public_keys,
unspent_outputs_confirmation? \\ true
) do
nodes_distribution = previous_nodes_distribution(previous_address, 5, 3)
context =
wrap_async_queries(
previous_address,
unspent_outputs_confirmation?,
chain_storage_node_public_keys,
beacon_storage_nodes_public_keys,
validation_node_public_keys,
nodes_distribution
)
|> Enum.reduce(%{}, &reduce_tasks/2)
{
Map.get(context, :previous_transaction),
Map.get(context, :unspent_outputs, []),
Map.get(context, :previous_storage_nodes, []),
Map.get(context, :chain_storage_nodes_view, <<>>),
Map.get(context, :beacon_storage_nodes_view, <<>>),
Map.get(context, :validation_nodes_view, <<>>)
}
end
defp previous_nodes_distribution(previous_address, nb_sub_lists, sample_size) do
authorized_nodes = P2P.authorized_nodes()
node_list =
if length(authorized_nodes) == 1 do
authorized_nodes
else
Enum.reject(authorized_nodes, &(&1.first_public_key == Crypto.first_node_public_key()))
end
previous_address
|> Replication.chain_storage_nodes(node_list)
|> NodeDistribution.split_storage_nodes(nb_sub_lists, sample_size)
end
defp wrap_async_queries(
previous_address,
unspent_outputs_confirmation?,
chain_storage_node_public_keys,
beacon_storage_nodes_public_keys,
validation_node_public_keys,
_nodes_distribution = [
prev_tx_nodes_split,
unspent_outputs_nodes_split,
chain_storage_nodes_view_split,
beacon_storage_nodes_view_split,
validation_nodes_view_split
]
) do
[
prev_tx: fn ->
DataFetcher.fetch_previous_transaction(previous_address, prev_tx_nodes_split)
end,
utxo: fn ->
DataFetcher.fetch_unspent_outputs(
previous_address,
unspent_outputs_nodes_split,
unspent_outputs_confirmation?
)
end,
chain_nodes_view: fn ->
DataFetcher.fetch_p2p_view(
chain_storage_node_public_keys,
chain_storage_nodes_view_split
)
end,
beacon_nodes_view: fn ->
DataFetcher.fetch_p2p_view(
beacon_storage_nodes_public_keys,
beacon_storage_nodes_view_split
)
end,
validation_nodes_view: fn ->
DataFetcher.fetch_p2p_view(validation_node_public_keys, validation_nodes_view_split)
end
]
|> Task.async_stream(fn {domain, fun} ->
{domain, fun.()}
end)
|> Stream.filter(&match?({:ok, _}, &1))
|> Stream.map(&elem(&1, 1))
end
defp reduce_tasks({_, {:error, _}}, acc), do: acc
defp reduce_tasks(
{:prev_tx, {:ok, prev_tx = %Transaction{}, prev_tx_node = %Node{}}},
acc
) do
acc
|> Map.put(:previous_transaction, prev_tx)
|> Map.update(
:previous_storage_nodes,
[prev_tx_node],
&P2P.distinct_nodes([prev_tx_node | &1])
)
end
defp reduce_tasks({:utxo, {:ok, unspent_outputs, unspent_outputs_nodes}}, acc)
when is_list(unspent_outputs) and is_list(unspent_outputs_nodes) do
acc
|> Map.put(:unspent_outputs, unspent_outputs)
|> Map.update(
:previous_storage_nodes,
unspent_outputs_nodes,
&P2P.distinct_nodes(&1 ++ unspent_outputs_nodes)
)
end
defp reduce_tasks({:chain_nodes_view, {:ok, view, node = %Node{}}}, acc) do
acc
|> Map.put(:chain_storage_nodes_view, view)
|> Map.update(
:previous_storage_nodes,
[node],
&P2P.distinct_nodes([node | &1])
)
end
defp reduce_tasks({:beacon_nodes_view, {:ok, view, node = %Node{}}}, acc) do
acc
|> Map.put(:beacon_storage_nodes_view, view)
|> Map.update(
:previous_storage_nodes,
[node],
&P2P.distinct_nodes([node | &1])
)
end
defp reduce_tasks({:validation_nodes_view, {:ok, view, node = %Node{}}}, acc) do
acc
|> Map.put(:validation_nodes_view, view)
|> Map.update(
:previous_storage_nodes,
[node],
&P2P.distinct_nodes([node | &1])
)
end
end
|
lib/zaryn/mining/transaction_context.ex
| 0.803135 | 0.481515 |
transaction_context.ex
|
starcoder
|
defmodule ExHashRing.HashRing do
@compile :native
@type t :: __MODULE__
@type override_map :: %{atom => [binary]}
use Bitwise
alias ExHashRing.HashRing.Utils
defstruct num_replicas: 0, nodes: [], overrides: %{}, items: {}
@spec new :: t
def new, do: new([])
@spec new([binary], override_map, integer) :: t
def new(nodes, num_replicas \\ 512, overrides \\ %{}) do
rebuild(%__MODULE__{nodes: nodes, overrides: overrides, num_replicas: num_replicas})
end
@spec set_nodes(t, [binary]) :: t
def set_nodes(ring, nodes) do
rebuild(%{ring | nodes: nodes})
end
@spec add_node(t, binary) :: {:ok, t} | :error
def add_node(%{nodes: nodes} = ring, name) do
if name in nodes do
:error
else
{:ok, rebuild(%{ring | nodes: [name | nodes]})}
end
end
@spec remove_node(t, binary) :: {:ok, t} | :error
def remove_node(%{nodes: nodes} = ring, name) do
if name in nodes do
{:ok, rebuild(%{ring | nodes: nodes -- [name]})}
else
:error
end
end
@spec set_overrides(t, override_map) :: {:ok, t}
def set_overrides(ring, overrides) do
overrides =
overrides
|> Enum.filter(fn {_, values} -> length(values) > 0 end)
|> Map.new()
{:ok, rebuild(%{ring | overrides: overrides})}
end
@spec find_node(t, binary | integer) :: binary | nil
def find_node(%{overrides: overrides} = ring, key) when map_size(overrides) > 0 do
find_override(overrides, key) || find_node_inner(ring, key)
end
@spec find_node(t, binary | integer) :: binary | nil
def find_node(ring, key) do
find_node_inner(ring, key)
end
@spec find_node_inner(t, binary | integer) :: binary | nil
defp find_node_inner(%{items: items}, key) do
with {_, name} <- find_next_highest_item(items, Utils.hash(key)) do
name
end
end
@spec find_nodes(t, binary | integer, integer) :: [binary]
def find_nodes(%{items: items, nodes: nodes, overrides: overrides}, key, num)
when num > 0 and map_size(overrides) > 0 do
{found, found_length} =
case overrides do
%{^key => overrides} ->
{nodes, length} = Utils.take_max(overrides, num)
{Enum.reverse(nodes), length}
_ ->
{[], 0}
end
do_find_nodes(
items,
max(num - found_length, 0),
length(nodes),
Utils.hash(key),
found,
found_length
)
end
@spec find_nodes(t, binary | integer, integer) :: [binary]
def find_nodes(%{items: items, nodes: nodes}, key, num) do
do_find_nodes(items, num, length(nodes), Utils.hash(key), [], 0)
end
## Private
defp do_find_nodes(_items, 0, _num_nodes, _key, found, _found_length) do
Enum.reverse(found)
end
defp do_find_nodes(_items, _remaining, num_nodes, _key, found, num_nodes) do
Enum.reverse(found)
end
defp do_find_nodes(items, remaining, num_nodes, key, found, found_length) do
{number, name} = find_next_highest_item(items, key)
if name in found do
do_find_nodes(items, remaining, num_nodes, number, found, found_length)
else
do_find_nodes(items, remaining - 1, num_nodes, number, [name | found], found_length + 1)
end
end
defp rebuild(%{nodes: nodes} = ring) do
%{ring | items: Utils.gen_items(nodes, ring.num_replicas) |> List.to_tuple()}
end
def find_override(overrides, key) do
case overrides do
%{^key => values} -> hd(values)
_ -> nil
end
end
defp find_next_highest_item(items, key) do
find_next_highest_item(items, tuple_size(items), key)
end
defp find_next_highest_item(_items, 0, _key) do
nil
end
defp find_next_highest_item(items, num_items, key) do
find_next_highest_item(items, num_items, key, 0, num_items - 1)
end
defp find_next_highest_item(items, num_items, key, min, max) do
mid = div(min + max, 2)
{number, _name} = elem(items, mid)
{min, max} =
if number > key do
# Key is in the lower half.
{min, mid - 1}
else
# Key is in the upper half.
{mid + 1, max}
end
cond do
min > max and min == num_items ->
# Past the end of the ring, return the first item.
elem(items, 0)
min > max ->
# Return the next highest item.
elem(items, min)
true ->
find_next_highest_item(items, num_items, key, min, max)
end
end
end
|
lib/hash_ring.ex
| 0.821295 | 0.42471 |
hash_ring.ex
|
starcoder
|
defmodule GSS.Client.Limiter do
@moduledoc """
Model of Limiter request subscribed to Client with partition :write or :read
This process is a ProducerConsumer for this GenStage pipeline.
"""
use GenStage
require Logger
@type state :: %__MODULE__{
max_demand: pos_integer(),
max_interval: timeout(),
producer: GenStage.from(),
scheduled_at: pos_integer() | nil,
taked_events: pos_integer(),
interval: timeout()
}
defstruct [:max_demand, :max_interval, :producer, :scheduled_at, :taked_events, :interval]
@type options :: [
name: atom(),
max_demand: pos_integer() | nil,
max_interval: timeout() | nil,
interval: timeout() | nil,
clients: [{atom(), keyword()} | atom()]
]
@doc """
Starts an limiter manager linked to the current process.
If the event manager is successfully created and initialized, the function
returns {:ok, pid}, where pid is the PID of the server. If a process with the
specified server name already exists, the function returns {:error,
{:already_started, pid}} with the PID of that process.
## Options
* `:name` - used for name registration as described in the "Name
registration" section of the module documentation
* `:interval` - ask new events from producer after `:interval` milliseconds.
* `:max_demand` - count of maximum requests per `:maximum_interval`
* `:max_interval` - maximum time that allowed in `:max_demand` requests
* `:clients` - list of clients with partition options. For example `[{GSS.Client, partition: :read}}]`.
"""
@spec start_link(options()) :: GenServer.on_start()
def start_link(options \\ []) do
GenStage.start_link(__MODULE__, options, name: Keyword.get(options, :name))
end
## Callbacks
def init(args) do
Logger.debug("init: #{inspect(args)}")
state = %__MODULE__{
max_demand: args[:max_demand] || 100,
max_interval: args[:max_interval] || 1_000,
interval: args[:interval] || 100,
taked_events: 0,
scheduled_at: nil
}
Process.send_after(self(), :ask, 0)
{:producer_consumer, state, subscribe_to: args[:clients]}
end
# Set the subscription to manual to control when to ask for events
def handle_subscribe(:producer, _options, from, state) do
{:manual, Map.put(state, :producer, from)}
end
# Make the subscriptions to auto for consumers
def handle_subscribe(:consumer, _, _, state) do
{:automatic, state}
end
def handle_events(events, _from, state) do
Logger.debug(fn -> "Limiter Handle events: #{inspect(events)}" end)
state =
state
|> Map.update!(:taked_events, &(&1 + length(events)))
|> schedule_counts()
{:noreply, events, state}
end
@doc """
Gives events for the next stage to process when requested
"""
def handle_demand(demand, state) when demand > 0 do
{:noreply, [], state}
end
@doc """
Ask new events if needed
"""
def handle_info(:ask, state) do
{:noreply, [], ask_and_schedule(state)}
end
@doc """
Check to reach limit.
If limit not reached ask again after `:interval` timeout,
otherwise ask after `:max_interval` timeout.
"""
def ask_and_schedule(state) do
cond do
limited_events?(state) ->
Process.send_after(self(), :ask, state.max_interval)
clear_counts(state)
interval_expired?(state) ->
GenStage.ask(state.producer, state.max_demand)
Process.send_after(self(), :ask, state.interval)
clear_counts(state)
true ->
GenStage.ask(state.producer, state.max_demand)
Process.send_after(self(), :ask, state.interval)
schedule_counts(state)
end
end
# take events more than max demand
defp limited_events?(state) do
state.taked_events >= state.max_demand
end
# check limit of interval
defp interval_expired?(%__MODULE__{scheduled_at: nil}), do: false
defp interval_expired?(%__MODULE__{scheduled_at: scheduled_at, max_interval: max_interval}) do
now = :erlang.timestamp()
:timer.now_diff(now, scheduled_at) >= max_interval * 1000
end
defp clear_counts(state) do
%{state | taked_events: 0, scheduled_at: nil}
end
# set current timestamp to scheduled_at
defp schedule_counts(%__MODULE__{scheduled_at: nil} = state) do
%{state | scheduled_at: :erlang.timestamp()}
end
defp schedule_counts(state), do: state
end
|
lib/elixir_google_spreadsheets/client/limiter.ex
| 0.918485 | 0.478651 |
limiter.ex
|
starcoder
|
if Code.ensure_loaded?(Plug) do
defmodule Guardian.Plug.VerifyHeader do
@moduledoc """
Looks for and validates a token found in the `Authorization` header.
In the case where:
1. The session is not loaded
2. A token is already found for `:key`
This plug will not do anything.
This, like all other Guardian plugs, requires a Guardian pipeline to be setup.
It requires an implementation module, an error handler and a key.
These can be set either:
1. Upstream on the connection with `plug Guardian.Pipeline`
2. Upstream on the connection with `Guardian.Pipeline.{put_module, put_error_handler, put_key}`
3. Inline with an option of `:module`, `:error_handler`, `:key`
If a token is found but is invalid, the error handler will be called with
`auth_error(conn, {:invalid_token, reason}, opts)`
Once a token has been found it will be decoded, the token and claims will be put onto the connection.
They will be available using `Guardian.Plug.current_claims/2` and `Guardian.Plug.current_token/2`
Options:
* `claims` - The literal claims to check to ensure that a token is valid
* `header_name` - The name of the header to search for a token. Defaults to `authorization`.
* `realm` - The prefix for the token in the header. Defaults to `Bearer`. `:none` will not use a prefix.
* `key` - The location to store the information in the connection. Defaults to: `default`
### Example
```elixir
# setup the upstream pipeline
plug Guardian.Plug.VerifyHeader, claims: %{typ: "access"}
```
This will check the authorization header for a token
`Authorization: Bearer <token>`
This token will be placed into the connection depending on the key and can be accessed with
`Guardian.Plug.current_token` and `Guardian.Plug.current_claims`
OR
`MyApp.ImplementationModule.current_token` and `MyApp.ImplementationModule.current_claims`
"""
alias Guardian.Plug.Pipeline
import Plug.Conn
@behaviour Plug
@impl Plug
@spec init(opts :: Keyword.t()) :: Keyword.t()
def init(opts \\ []) do
realm = Keyword.get(opts, :realm, "Bearer")
case realm do
"" ->
opts
:none ->
opts
_realm ->
{:ok, reg} = Regex.compile("#{realm}\:?\s+(.*)$", "i")
Keyword.put(opts, :realm_reg, reg)
end
end
@impl Plug
@spec call(Plug.Conn.t(), Keyword.t()) :: Plug.Conn.t()
def call(conn, opts) do
with nil <- Guardian.Plug.current_token(conn, opts),
{:ok, token} <- fetch_token_from_header(conn, opts),
module <- Pipeline.fetch_module!(conn, opts),
claims_to_check <- Keyword.get(opts, :claims, %{}),
key <- storage_key(conn, opts),
{:ok, claims} <- Guardian.decode_and_verify(module, token, claims_to_check, opts) do
conn
|> Guardian.Plug.put_current_token(token, key: key)
|> Guardian.Plug.put_current_claims(claims, key: key)
else
:no_token_found ->
conn
{:error, reason} ->
conn
|> Pipeline.fetch_error_handler!(opts)
|> apply(:auth_error, [conn, {:invalid_token, reason}, opts])
|> halt()
_ ->
conn
end
end
@spec fetch_token_from_header(Plug.Conn.t(), Keyword.t()) ::
:no_token_found
| {:ok, String.t()}
defp fetch_token_from_header(conn, opts) do
header_name = Keyword.get(opts, :header_name, "authorization")
headers = get_req_header(conn, header_name)
fetch_token_from_header(conn, opts, headers)
end
@spec fetch_token_from_header(Plug.Conn.t(), Keyword.t(), Keyword.t()) ::
:no_token_found
| {:ok, String.t()}
defp fetch_token_from_header(_, _, []), do: :no_token_found
defp fetch_token_from_header(conn, opts, [token | tail]) do
reg = Keyword.get(opts, :realm_reg, ~r/^(.*)$/)
trimmed_token = String.trim(token)
case Regex.run(reg, trimmed_token) do
[_, match] -> {:ok, String.trim(match)}
_ -> fetch_token_from_header(conn, opts, tail)
end
end
@spec storage_key(Plug.Conn.t(), Keyword.t()) :: String.t()
defp storage_key(conn, opts), do: Pipeline.fetch_key(conn, opts)
end
end
|
lib/guardian/plug/verify_header.ex
| 0.788054 | 0.919859 |
verify_header.ex
|
starcoder
|
defmodule DeadLetter do
@moduledoc """
Structure around errors in the data processing pipeline. `DeadLetter`
objects should be written to the dead-letter-queue through `dlq`.
## Configuration
* `dataset_id` - Required.
* `subset_id` - Required.
* `app_name` - Required. Atom or string name for application that produced the error.
* `original_message` - Original message that caused the error.
* `stacktrace` - Stacktrace for the error.
* `reason` - Reason for the error. This is usually taken from an `{:error, reason}` tuple.
"""
@type t :: %__MODULE__{
version: integer,
dataset_id: String.t(),
subset_id: String.t(),
original_message: term,
app_name: String.Chars.t(),
stacktrace: list,
reason: Exception.t() | String.Chars.t(),
timestamp: DateTime.t()
}
@derive Jason.Encoder
defstruct version: 1,
dataset_id: nil,
subset_id: nil,
original_message: nil,
app_name: nil,
stacktrace: [],
reason: nil,
timestamp: nil
@spec new(keyword) :: t
def new(values) do
reason = Keyword.get(values, :reason, nil)
stacktrace = Keyword.get(values, :stacktrace, [])
struct_values =
values
|> Keyword.update(:app_name, "", &to_string/1)
|> Keyword.update(:original_message, "", &sanitize_message/1)
|> Keyword.update(:reason, "", &format_reason/1)
|> Keyword.put(:stacktrace, format_stacktrace(stacktrace, reason))
|> Keyword.put_new(:timestamp, DateTime.utc_now())
struct(__MODULE__, struct_values)
end
defp format_reason({:failed, reason}) do
reason = Exception.normalize(:error, reason)
Exception.format(:error, reason)
end
defp format_reason({kind, reason, _stacktrace}) do
reason = Exception.normalize(kind, reason)
Exception.format(kind, reason)
end
defp format_reason(reason) when reason != nil do
reason = Exception.normalize(:error, reason)
Exception.format(:error, reason)
end
defp format_reason(nil), do: ""
defp format_stacktrace(stacktrace, _) when stacktrace != nil and stacktrace != [] do
Exception.format_stacktrace(stacktrace)
end
defp format_stacktrace(_, {_kind, _reason, stacktrace}) do
Exception.format_stacktrace(stacktrace)
end
defp format_stacktrace(_, _) do
{:current_stacktrace, trace} = Process.info(self(), :current_stacktrace)
Exception.format_stacktrace(trace)
end
defp sanitize_message(message) do
case Jason.encode(message) do
{:ok, _} -> message
{:error, _} -> inspect(message)
end
end
end
|
apps/definition_deadletter/lib/dead_letter.ex
| 0.823825 | 0.519887 |
dead_letter.ex
|
starcoder
|
defmodule Sanbase.Clickhouse.Label do
@moduledoc """
Labeling addresses
"""
@type label :: %{
name: String.t(),
metadata: String.t()
}
def list_all(:all = _blockchain) do
query = """
SELECT DISTINCT(label) FROM blockchain_address_labels
"""
Sanbase.ClickhouseRepo.query_transform(query, [], fn [label] -> label end)
end
def list_all(blockchain) do
query = """
SELECT DISTINCT(label) FROM blockchain_address_labels PREWHERE blockchain = ?1
"""
Sanbase.ClickhouseRepo.query_transform(query, [blockchain], fn [label] -> label end)
end
def add_labels(_, []), do: {:ok, []}
def add_labels(slug, maps) when is_list(maps) do
addresses = get_list_of_addresses(maps)
blockchain = slug_to_blockchain(slug)
{query, args} = addresses_labels_query(slug, blockchain, addresses)
Sanbase.ClickhouseRepo.query_reduce(query, args, %{}, fn [address, label, metadata], acc ->
label = %{name: label, metadata: metadata, origin: "santiment"}
Map.update(acc, address, [label], &[label | &1])
end)
|> case do
{:ok, labels_map} ->
{:ok, do_add_labels(maps, labels_map)}
{:error, reason} ->
{:error, reason}
end
end
def get_address_labels(_slug, []), do: {:ok, %{}}
def get_address_labels(slug, addresses) when is_list(addresses) do
blockchain = slug_to_blockchain(slug)
{query, args} = addresses_labels_query(slug, blockchain, addresses)
Sanbase.ClickhouseRepo.query_reduce(query, args, %{}, fn [address, label, metadata], acc ->
label = %{name: label, metadata: metadata, origin: "santiment"}
Map.update(acc, address, [label], &[label | &1])
end)
end
# Private functions
# For backwards compatibility, if the slug is nil treat it as ethereum blockchain
def slug_to_blockchain(nil), do: "ethereum"
def slug_to_blockchain(slug), do: Sanbase.Model.Project.slug_to_blockchain(slug)
defp addresses_labels_query(slug, "ethereum", addresses) do
query = create_addresses_labels_query(slug)
args =
case slug do
nil -> [addresses]
_ -> [addresses, slug]
end
{query, args}
end
defp addresses_labels_query(_slug, blockchain, addresses) do
query = """
SELECT address, label, metadata
FROM blockchain_address_labels FINAL
PREWHERE blockchain = ?1 AND address IN (?2)
HAVING sign = 1
"""
{query, [blockchain, addresses]}
end
defp get_list_of_addresses(maps) do
maps
|> Enum.flat_map(fn map ->
[
Map.get(map, :address) && map.address.address,
Map.get(map, :from_address) && map.from_address.address,
Map.get(map, :to_address) && map.to_address.address
]
end)
|> Enum.uniq()
|> Enum.reject(&is_nil/1)
end
defp do_add_labels(maps, labels_map) do
add_labels = fn
# In this case the address type does not exist, so the result is not used
nil -> nil
map -> Map.put(map, :labels, Map.get(labels_map, map.address, []))
end
maps
|> Enum.map(fn %{} = map ->
map
|> Map.replace(:address, add_labels.(Map.get(map, :address)))
|> Map.replace(:from_address, add_labels.(Map.get(map, :from_address)))
|> Map.replace(:to_address, add_labels.(Map.get(map, :to_address)))
end)
end
defp create_addresses_labels_query(slug) do
"""
SELECT address,
label,
concat('\{', '"owner": "', owner, '"\}') as metadata
FROM (
SELECT address,
arrayJoin(labels_owners_filtered) as label_owner,
label_owner.1 as label_raw,
label_owner.2 as owner,
multiIf(
owner = 'uniswap router', 'Uniswap Router',
label_raw='uniswap_ecosystem', 'Uniswap Ecosystem',
label_raw='cex_dex_trader', 'CEX & DEX Trader',
label_raw='centralized_exchange', 'CEX',
label_raw='decentralized_exchange', 'DEX',
label_raw='withdrawal', 'CEX Trader',
label_raw='dex_trader', 'DEX Trader',
#{whale_filter(slug, position: 2)}
label_raw='deposit', 'CEX Deposit',
label_raw='defi', 'DeFi',
label_raw='deployer', 'Deployer',
label_raw='stablecoin', 'Stablecoin',
label_raw='uniswap_ecosystem', 'Uniswap',
label_raw='makerdao-cdp-owner', 'MakerDAO CDP Owner',
label_raw='makerdao-bite-keeper', 'MakerDAO Bite Keeper',
label_raw='genesis', 'Genesis',
label_raw='proxy', 'Proxy',
label_raw='system', 'System',
label_raw='miner', 'Miner',
label_raw='contract_factory', 'Contract Factory',
label_raw='derivative_token', 'Derivative Token',
label_raw='eth2stakingcontract', 'ETH2 Staking Contract',
label_raw
) as label
FROM (
SELECT address_hash,
address,
asset_id,
splitByChar(',', labels) as label_arr,
splitByChar(',', owners) as owner_arr,
arrayZip(label_arr, owner_arr) as labels_owners,
multiIf(
-- if there is the `system` label for an address, we exclude other labels
has(label_arr, 'system'), arrayFilter(x -> x.1 = 'system', labels_owners),
-- if an address has a `centralized_exchange` label and at least one of the `deposit` and
-- `withdrawal` labels, we exclude the `deposit` and `withdrawal` labels.
has(label_arr, 'centralized_exchange') AND hasAny(label_arr, ['deposit', 'withdrawal']), arrayFilter(x -> x.1 NOT IN ('deposit', 'withdrawal'), labels_owners),
-- if there are the `dex_trader` and `decentralized_exchange` labels for an address, we exclude `dex_trader` label
hasAll(label_arr, ['dex_trader', 'decentralized_exchange']), arrayFilter(x -> x.1 != 'dex_trader', labels_owners),
-- if there are the `deposit` and `withdrawal` labels for an address, we exclude the `withdrawal` label
hasAll(label_arr, ['deposit', 'withdrawal']), arrayFilter(x -> x.1 != 'withdrawal', labels_owners),
-- if there are the `dex_trader` and `withdrawal` labels for an address, we replace these metrics to the `cex_dex_trader` label
hasAll(label_arr, ['dex_trader', 'withdrawal']), arrayPushFront(arrayFilter(x -> x.1 NOT IN ['dex_trader', 'withdrawal'], labels_owners), ('cex_dex_trader', arrayFilter(x -> x.1 == 'withdrawal', labels_owners)[1].2)),
labels_owners
) as labels_owners_filtered
FROM eth_labels_final
ANY INNER JOIN (
SELECT cityHash64(address) as address_hash,
address
FROM (
SELECT lower(arrayJoin([?1])) as address
)
)
USING address_hash
PREWHERE address_hash IN (
SELECT cityHash64(address)
FROM (
SELECT lower(arrayJoin([?1])) as address
)
)
)
ANY LEFT JOIN (
select asset_id, name from asset_metadata
) USING asset_id
)
WHERE label != 'whale_wrong'
"""
end
defp whale_filter(nil, _) do
"""
label_raw='whale', concat('Whale, token:', name),
"""
end
defp whale_filter(slug, opts) when is_binary(slug) do
position = Keyword.fetch!(opts, :position)
"""
label_raw='whale' AND asset_id = (SELECT asset_id FROM asset_metadata FINAL PREWHERE name = ?#{position}), 'Whale',
label_raw='whale' AND asset_id != (SELECT asset_id FROM asset_metadata FINAL PREWHERE name = ?#{position}), 'whale_wrong',
"""
end
end
|
lib/sanbase/clickhouse/labels.ex
| 0.749087 | 0.455562 |
labels.ex
|
starcoder
|
defmodule Plausible.Stats.Query do
defstruct date_range: nil,
interval: nil,
period: nil,
filters: %{},
sample_threshold: 20_000_000,
include_imported: false
@default_sample_threshold 20_000_000
def shift_back(%__MODULE__{period: "year"} = query, site) do
# Querying current year to date
{new_first, new_last} =
if Timex.compare(Timex.now(site.timezone), query.date_range.first, :year) == 0 do
diff =
Timex.diff(
Timex.beginning_of_year(Timex.now(site.timezone)),
Timex.now(site.timezone),
:days
) - 1
{query.date_range.first |> Timex.shift(days: diff),
Timex.now(site.timezone) |> Timex.to_date() |> Timex.shift(days: diff)}
else
diff = Timex.diff(query.date_range.first, query.date_range.last, :days) - 1
{query.date_range.first |> Timex.shift(days: diff),
query.date_range.last |> Timex.shift(days: diff)}
end
Map.put(query, :date_range, Date.range(new_first, new_last))
end
def shift_back(%__MODULE__{period: "month"} = query, site) do
# Querying current month to date
{new_first, new_last} =
if Timex.compare(Timex.now(site.timezone), query.date_range.first, :month) == 0 do
diff =
Timex.diff(
Timex.beginning_of_month(Timex.now(site.timezone)),
Timex.now(site.timezone),
:days
) - 1
{query.date_range.first |> Timex.shift(days: diff),
Timex.now(site.timezone) |> Timex.to_date() |> Timex.shift(days: diff)}
else
diff = Timex.diff(query.date_range.first, query.date_range.last, :days) - 1
{query.date_range.first |> Timex.shift(days: diff),
query.date_range.last |> Timex.shift(days: diff)}
end
Map.put(query, :date_range, Date.range(new_first, new_last))
end
def shift_back(query, _site) do
diff = Timex.diff(query.date_range.first, query.date_range.last, :days) - 1
new_first = query.date_range.first |> Timex.shift(days: diff)
new_last = query.date_range.last |> Timex.shift(days: diff)
Map.put(query, :date_range, Date.range(new_first, new_last))
end
def from(site, %{"period" => "realtime"} = params) do
date = today(site.timezone)
%__MODULE__{
period: "realtime",
interval: "minute",
date_range: Date.range(date, date),
filters: parse_filters(params),
sample_threshold: Map.get(params, "sample_threshold", @default_sample_threshold),
include_imported: false
}
end
def from(site, %{"period" => "day"} = params) do
date = parse_single_date(site.timezone, params)
%__MODULE__{
period: "day",
date_range: Date.range(date, date),
interval: "hour",
filters: parse_filters(params),
sample_threshold: Map.get(params, "sample_threshold", @default_sample_threshold)
}
|> maybe_include_imported(site, params)
end
def from(site, %{"period" => "7d"} = params) do
end_date = parse_single_date(site.timezone, params)
start_date = end_date |> Timex.shift(days: -6)
%__MODULE__{
period: "7d",
date_range: Date.range(start_date, end_date),
interval: "date",
filters: parse_filters(params),
sample_threshold: Map.get(params, "sample_threshold", @default_sample_threshold)
}
|> maybe_include_imported(site, params)
end
def from(site, %{"period" => "30d"} = params) do
end_date = parse_single_date(site.timezone, params)
start_date = end_date |> Timex.shift(days: -30)
%__MODULE__{
period: "30d",
date_range: Date.range(start_date, end_date),
interval: "date",
filters: parse_filters(params),
sample_threshold: Map.get(params, "sample_threshold", @default_sample_threshold)
}
|> maybe_include_imported(site, params)
end
def from(site, %{"period" => "month"} = params) do
date = parse_single_date(site.timezone, params)
start_date = Timex.beginning_of_month(date)
end_date = Timex.end_of_month(date)
%__MODULE__{
period: "month",
date_range: Date.range(start_date, end_date),
interval: "date",
filters: parse_filters(params),
sample_threshold: Map.get(params, "sample_threshold", @default_sample_threshold)
}
|> maybe_include_imported(site, params)
end
def from(site, %{"period" => "6mo"} = params) do
end_date =
parse_single_date(site.timezone, params)
|> Timex.end_of_month()
start_date =
Timex.shift(end_date, months: -5)
|> Timex.beginning_of_month()
%__MODULE__{
period: "6mo",
date_range: Date.range(start_date, end_date),
interval: Map.get(params, "interval", "month"),
filters: parse_filters(params),
sample_threshold: Map.get(params, "sample_threshold", @default_sample_threshold)
}
|> maybe_include_imported(site, params)
end
def from(site, %{"period" => "12mo"} = params) do
end_date =
parse_single_date(site.timezone, params)
|> Timex.end_of_month()
start_date =
Timex.shift(end_date, months: -11)
|> Timex.beginning_of_month()
%__MODULE__{
period: "12mo",
date_range: Date.range(start_date, end_date),
interval: Map.get(params, "interval", "month"),
filters: parse_filters(params),
sample_threshold: Map.get(params, "sample_threshold", @default_sample_threshold)
}
|> maybe_include_imported(site, params)
end
def from(site, %{"period" => "year"} = params) do
end_date =
parse_single_date(site.timezone, params)
|> Timex.end_of_year()
start_date = Timex.beginning_of_year(end_date)
%__MODULE__{
period: "year",
date_range: Date.range(start_date, end_date),
interval: Map.get(params, "interval", "month"),
filters: parse_filters(params),
sample_threshold: Map.get(params, "sample_threshold", @default_sample_threshold)
}
|> maybe_include_imported(site, params)
end
def from(site, %{"period" => "all"} = params) do
start_date =
site.stats_start_date
|> Timex.Timezone.convert("UTC")
|> Timex.Timezone.convert(site.timezone)
|> Timex.to_date()
now = today(site.timezone)
cond do
Timex.diff(now, start_date, :months) > 0 ->
from(
site,
Map.merge(params, %{
"period" => "custom",
"from" => Date.to_iso8601(start_date),
"to" => Date.to_iso8601(now),
"interval" => "month"
})
)
|> Map.put(:period, "all")
Timex.diff(now, start_date, :days) > 0 ->
from(
site,
Map.merge(params, %{
"period" => "custom",
"from" => Date.to_iso8601(start_date),
"to" => Date.to_iso8601(now),
"interval" => "date"
})
)
|> Map.put(:period, "all")
true ->
from(site, Map.merge(params, %{"period" => "day", "date" => "today"}))
|> Map.put(:period, "all")
end
end
def from(site, %{"period" => "custom", "from" => from, "to" => to} = params) do
new_params =
params
|> Map.delete("from")
|> Map.delete("to")
|> Map.put("date", Enum.join([from, to], ","))
from(site, new_params)
end
def from(site, %{"period" => "custom", "date" => date} = params) do
[from, to] = String.split(date, ",")
from_date = Date.from_iso8601!(String.trim(from))
to_date = Date.from_iso8601!(String.trim(to))
%__MODULE__{
period: "custom",
date_range: Date.range(from_date, to_date),
interval: Map.get(params, "interval", "date"),
filters: parse_filters(params),
sample_threshold: Map.get(params, "sample_threshold", @default_sample_threshold)
}
|> maybe_include_imported(site, params)
end
def from(tz, params) do
__MODULE__.from(tz, Map.merge(params, %{"period" => "30d"}))
end
def put_filter(query, key, val) do
%__MODULE__{
query
| filters: Map.put(query.filters, key, val)
}
end
def treat_page_filter_as_entry_page(%__MODULE__{filters: %{"visit:entry_page" => _}} = q), do: q
def treat_page_filter_as_entry_page(%__MODULE__{filters: %{"event:page" => f}} = q) do
q
|> put_filter("visit:entry_page", f)
|> put_filter("event:page", nil)
end
def treat_page_filter_as_entry_page(q), do: q
def treat_prop_filter_as_entry_prop(%__MODULE__{filters: filters} = q) do
prop_filter = get_filter_by_prefix(q, "event:props:")
case {filters["event:goal"], prop_filter} do
{nil, {"event:props:" <> prop, filter_value}} ->
q
|> remove_event_filters([:props])
|> put_filter("visit:entry_props:" <> prop, filter_value)
_ ->
q
end
end
def remove_event_filters(query, opts) do
new_filters =
Enum.filter(query.filters, fn {filter_key, _} ->
cond do
:page in opts && filter_key == "event:page" -> false
:goal in opts && filter_key == "event:goal" -> false
:props in opts && filter_key && String.starts_with?(filter_key, "event:props:") -> false
true -> true
end
end)
|> Enum.into(%{})
%__MODULE__{query | filters: new_filters}
end
def has_event_filters?(query) do
Enum.any?(query.filters, fn
{"event:" <> _, _} -> true
_ -> false
end)
end
def get_filter_by_prefix(query, prefix) do
Enum.find(query.filters, fn {prop, _value} ->
String.starts_with?(prop, prefix)
end)
end
defp today(tz) do
Timex.now(tz) |> Timex.to_date()
end
defp parse_single_date(tz, params) do
case params["date"] do
"today" -> Timex.now(tz) |> Timex.to_date()
date when is_binary(date) -> Date.from_iso8601!(date)
_ -> Timex.now(tz) |> Timex.to_date()
end
end
defp parse_filters(%{"filters" => filters}) when is_binary(filters) do
case Jason.decode(filters) do
{:ok, parsed} -> parsed
{:error, err} -> parse_filter_expression(err.data)
end
end
defp parse_filters(%{"filters" => filters}) when is_map(filters), do: filters
defp parse_filters(_), do: %{}
defp parse_filter_expression(str) do
filters = String.split(str, ";")
Enum.map(filters, &parse_single_filter/1)
|> Enum.into(%{})
end
defp parse_single_filter(str) do
[key, val] =
String.trim(str)
|> String.split(["==", "!="], trim: true)
|> Enum.map(&String.trim/1)
is_negated = String.contains?(str, "!=")
is_list = String.contains?(val, "|")
is_wildcard = String.contains?(val, "*")
cond do
key == "event:goal" -> {key, parse_goal_filter(val)}
is_wildcard && is_negated -> {key, {:does_not_match, val}}
is_wildcard -> {key, {:matches, val}}
is_list -> {key, {:member, String.split(val, "|")}}
is_negated -> {key, {:is_not, val}}
true -> {key, {:is, val}}
end
end
defp parse_goal_filter("Visit " <> page), do: {:is, :page, page}
defp parse_goal_filter(event), do: {:is, :event, event}
defp maybe_include_imported(query, site, params) do
imported_data_requested = params["with_imported"] == "true"
has_imported_data = site.imported_data && site.imported_data.status == "ok"
date_range_overlaps =
has_imported_data && !Timex.after?(query.date_range.first, site.imported_data.end_date)
no_filters_applied = Enum.empty?(query.filters)
include_imported =
imported_data_requested && has_imported_data && date_range_overlaps && no_filters_applied
%{query | include_imported: !!include_imported}
end
end
|
lib/plausible/stats/query.ex
| 0.784402 | 0.573798 |
query.ex
|
starcoder
|
defmodule Identicon do
alias IO.ANSI, as: Ansi
@moduledoc """
Identicon generator written in Elixir
Resources:
* https://en.wikipedia.org/wiki/Identicon
* https://github.com/mauricius/identicon
"""
@doc """
Exports a string into an Identicon PNG image.
## Parameters
- input: The input string.
"""
def export(input) do
input
|> hash_input
|> pick_color
|> build_grid
|> filter_odd_squares
|> build_pixel_map
|> draw_image
|> save_image(input)
end
@doc """
Outputs the Identicon directly to console.
## Parameters
- input: The input string.
"""
def console(input) do
input
|> hash_input
|> pick_color
|> build_grid
|> output_image
end
@doc """
Outputs the Identicon.Image to console.
## Parameters
- image: The Identicon.Image struct.
"""
def output_image(%Identicon.Image{color: color, grid: grid} = _image) do
color = Color.rgb_to_ansi(color)
Enum.each(grid, fn {code, index} ->
if rem(index, 5) == 0 do
IO.write("\n")
end
if rem(code, 2) == 0 do
IO.write(Ansi.color_background(color) <> " " <> Ansi.reset())
else
IO.write(Ansi.white_background() <> " " <> Ansi.reset())
end
end)
IO.write("\n")
end
@doc """
Draws the Identicon.Image as binary image
http://erlang.org/documentation/doc-6.1/lib/percept-0.8.9/doc/html/egd.html
## Parameters
- image: The Identicon.Image struct.
"""
def draw_image(%Identicon.Image{color: color, pixel_map: pixel_map}) do
image = :egd.create(250, 250)
fill = :egd.color(color)
Enum.each(pixel_map, fn {start, stop} ->
:egd.filledRectangle(image, start, stop, fill)
end)
:egd.render(image)
end
@doc """
Saves the image as PNG.
## Parameters
- image: The binary Image.
- input: the input string
"""
def save_image(image, input) do
File.write("#{input}.png", image)
end
@doc """
Generates the pixel map from the Image grid
## Parameters
- image: The Identicon.Image struct
"""
def build_pixel_map(%Identicon.Image{grid: grid} = image) do
pixel_map =
Enum.map(grid, fn {_, index} ->
horizontal = rem(index, 5) * 50
vertical = div(index, 5) * 50
top_left = {horizontal, vertical}
bottom_right = {horizontal + 50, vertical + 50}
{top_left, bottom_right}
end)
%Identicon.Image{image | pixel_map: pixel_map}
end
def filter_odd_squares(%Identicon.Image{grid: grid} = image) do
grid =
Enum.filter(grid, fn {code, _index} ->
rem(code, 2) == 0
end)
%Identicon.Image{image | grid: grid}
end
@doc """
Builds the Identicon grid
## Parameters
- image: The Identicon.Image struct
"""
def build_grid(%Identicon.Image{hex: hex} = image) do
grid =
hex
# Enum.chunk has been deprecated
|> Enum.chunk_every(3, 3, :discard)
|> Enum.map(&mirror_row/1)
|> List.flatten()
|> Enum.with_index()
%Identicon.Image{image | grid: grid}
end
@doc """
Mirrors an enumerable with 3 elements
## Parameters
- row: An enumerable
## Examples
iex> Identicon.mirror_row([1,2,3])
[1,2,3,2,1]
"""
def mirror_row(row) do
[first, second | _tail] = row
row ++ [second, first]
end
@doc """
Picks the first three elements as the RGB color for the identicon
## Parameters
- image: The Identicon.Image struct
"""
def pick_color(%Identicon.Image{hex: [r, g, b | _tail]} = image) do
%Identicon.Image{image | color: {r, g, b}}
end
@doc """
Hashes the input and converts it into a list of bytes().
## Parameters
- input: The input string
"""
def hash_input(input) do
hex =
:crypto.hash(:md5, input)
|> :binary.bin_to_list()
%Identicon.Image{hex: hex}
end
end
|
lib/identicon.ex
| 0.801548 | 0.405979 |
identicon.ex
|
starcoder
|
defmodule Stripe.Request do
@moduledoc """
A module for working with requests to the Stripe API.
Requests are composed in a functional manner. The request does not happen
until it is configured and passed to `make_request/1`.
Currently encompasses only requests to the normal Stripe API. The OAuth
endpoint is not yet supported.
Generally intended to be used internally, but can also be used by end-users
to work around missing endpoints (if any).
At a minimum, a request must have the endpoint and method specified to be
valid.
"""
alias Stripe.{API, Converter, Request}
@type t :: %__MODULE__{
cast_to_id: MapSet.t(),
endpoint: String.t() | nil,
headers: map | nil,
method: Stripe.API.method() | nil,
opts: Keyword.t() | nil,
params: map
}
@type error_code ::
:endpoint_fun_invalid_result
| :invalid_endpoint
defstruct opts: [],
endpoint: nil,
headers: nil,
method: nil,
params: %{},
cast_to_id: MapSet.new()
@doc """
Creates a new request.
Optionally accepts options for the request, such as using a specific API key.
See `t:Stripe.options` for details.
"""
@spec new_request(Stripe.options(), map) :: t
def new_request(opts \\ [], headers \\ %{}) do
%Request{opts: opts, headers: headers}
end
@doc """
Specifies an endpoint for the request.
The endpoint should not include the `v1` prefix or an initial slash, for
example `put_endpoint(request, "charges")`.
The endpoint can be a binary or a function which takes the parameters of the
query and returns an endpoint. The function is not evaluated until just
before the request is made so the actual parameters can be specified after
the endpoint.
"""
@spec put_endpoint(t, String.t()) :: t
def put_endpoint(%Request{} = request, endpoint) do
%{request | endpoint: endpoint}
end
@doc """
Specifies a method to use for the request.
Accepts any of the standard HTTP methods as atoms, that is `:get`, `:post`,
`:put`, `:patch` or `:delete`.
"""
@spec put_method(t, Stripe.API.method()) :: t
def put_method(%Request{} = request, method)
when method in [:get, :post, :put, :patch, :delete] do
%{request | method: method}
end
@doc """
Specifies the parameters to be used for the request.
If the request is a POST request, these are encoded in the request body.
Otherwise, they are encoded in the URL.
Calling this function multiple times will merge, not replace, the params
currently specified.
"""
@spec put_params(t, map) :: t
def put_params(%Request{params: params} = request, new_params) do
%{request | params: Map.merge(params, new_params)}
end
@doc """
Specify a single param to be included in the request.
"""
@spec put_param(t, atom, any) :: t
def put_param(%Request{params: params} = request, key, value) do
%{request | params: Map.put(params, key, value)}
end
@doc """
Specify that a given set of parameters should be cast to a simple ID.
Sometimes, it may be convenient to allow end-users to pass in structs (say,
the card to charge) but the API requires only the ID of the object. This
function will ensure that before the request is made, the parameters
specified here will be cast to IDs – if the value of a parameter is a
struct with an `:id` field, the value of that field will replace the struct
in the parameter list.
If the function is called multiple times, the set of parameters to cast to
ID is merged between the multiple calls.
"""
@spec cast_to_id(t, [atom]) :: t
def cast_to_id(%Request{cast_to_id: cast_to_id} = request, new_cast_to_id) do
%{request | cast_to_id: MapSet.union(cast_to_id, MapSet.new(new_cast_to_id))}
end
@doc """
Specify that a given path in the parameters should be cast to a simple ID.
Acts similar to `cast_to_id/2` but specifies only one parameter to be cast,
by specifying its path (as in the `Access` protocol). Used to cast nested
objects to their IDs.
"""
@spec cast_path_to_id(t, [atom]) :: t
def cast_path_to_id(%Request{cast_to_id: cast_to_id} = request, new_cast_to_id) do
%{request | cast_to_id: MapSet.put(cast_to_id, new_cast_to_id)}
end
@doc ~S"""
Normalise the argument to a simple Stripe ID.
Actively extracts the ID, given a struct with an `:id` field, or returns the
binary if one is passed in.
Useful for eagerly getting the ID of an object passed in, for example when
computing the endpoint to use:
```
def capture(id, params, opts) do
new_request(opts)
|> put_endpoint(@plural_endpoint <> "/#{get_id!(id)}/capture")
...
```
"""
@spec get_id!(Stripe.id() | struct) :: Stripe.id()
def get_id!(id) when is_binary(id), do: id
def get_id!(%{id: id}) when is_binary(id), do: id
def get_id!(_), do: raise("You must provide an ID or a struct with an ID to this operation.")
@doc ~S"""
Prefixes all `:expand` values provided in `opts` with the given prefix.
When using object expansion on a `list` function for a resource, the values must
be prefixed with `data.`. This is required because the stripe api nests the
returned objects within `data: {}`.
For all `create`, `update`, `cancel` and `retrieve` functions this is not required.
```
opts = [expand: ["balance_transaction"]]
request = prefix_expansions(%Request{opts: opts})
request.opts == ["data.balance_transaction"]
```
"""
@spec prefix_expansions(t) :: t
def prefix_expansions(%Request{opts: opts} = request) do
case Keyword.get(opts, :expand) do
nil ->
request
expansions ->
mapped_expansions = Enum.map(expansions, &"data.#{&1}")
opts = Keyword.replace!(opts, :expand, mapped_expansions)
%{request | opts: opts}
end
end
@doc """
Executes the request and returns the response.
"""
@spec make_request(t) :: {:ok, struct} | {:error, Stripe.Error.t()}
def make_request(
%Request{params: params, endpoint: endpoint, method: method, headers: headers, opts: opts} =
request
) do
with {:ok, params} <- do_cast_to_id(params, request.cast_to_id),
{:ok, endpoint} <- consolidate_endpoint(endpoint, params),
{:ok, result} <- API.request(params, method, endpoint, headers, opts) do
{:ok, Converter.convert_result(result)}
end
end
@doc """
Executes the request and returns the response for file uploads
"""
@spec make_file_upload_request(t) :: {:ok, struct} | {:error, Stripe.Error.t()}
def make_file_upload_request(
%Request{params: params, endpoint: endpoint, method: method, opts: opts} = request
) do
with {:ok, params} <- do_cast_to_id(params, request.cast_to_id),
{:ok, endpoint} <- consolidate_endpoint(endpoint, params),
{:ok, result} <- API.request_file_upload(params, method, endpoint, %{}, opts) do
{:ok, Converter.convert_result(result)}
end
end
defp do_cast_to_id(params, cast_to_id) do
to_cast = MapSet.to_list(cast_to_id)
params =
Enum.reduce(to_cast, params, fn key, params ->
case params[key] do
%{__struct__: _, id: id} -> put_in(params[key], id)
_ -> params
end
end)
{:ok, params}
end
defp consolidate_endpoint(endpoint, _) when is_binary(endpoint), do: {:ok, endpoint}
defp consolidate_endpoint(endpoint_fun, params) when is_function(endpoint_fun, 1) do
case endpoint_fun.(params) do
result when is_binary(result) ->
{:ok, result}
invalid ->
{
:error,
Stripe.Error.new(
source: :internal,
code: :endpoint_fun_invalid_result,
message:
"calling the endpoint function produced an invalid result of #{inspect(invalid)} "
)
}
end
end
defp consolidate_endpoint(_, _) do
{
:error,
Stripe.Error.new(
source: :internal,
code: :invalid_endpoint,
message: "endpoint must be a string or a function from params to a string"
)
}
end
end
|
lib/stripe/request.ex
| 0.910349 | 0.575349 |
request.ex
|
starcoder
|
defmodule Nabo.Parser do
@moduledoc """
The behaviour to implement a parser in Nabo. It requires a `parse/2` callback to be implemented.
There are three kinds of parser for accordingly three components in a post: a front parser for metadata,
an excerpt parser and finally a post body parser.
By default Nabo uses JSON for post metadata and Markdown for excerpt and post body, but with these three parsers
implemented, you can have these components in your most loved format.
## Example
Given a raw post like this.
```
title,slug,published_at,draft
Nabo,nabo,"Monday, 15-Aug-2005 15:52:01 UTC",false
---
This is a post body
---
[heading]Heading 1[/heading]
[b]bold[b]
[i]italics[i]
[url=https://www.wikipedia.org/]Wikipedia[/url]
```
### Implement a front Parser
The `parse/2` function of a front parser has to return `{:ok, %Nabo.Metadata{}}`.
```
defmodule MyFrontParser do
@behaviour Nabo.Parser
def parse(binary, options) do
data = MyCSVParser.parse(binary, options)
metadata = %Nabo.Metadata{
title: data["title"],
slug: data["slug"],
published_at: data["published_at"],
draft?: data["draft"]
}
{:ok, metadata}
end
end
```
### Implement an excerpt parser
```
defmodule MyExcerptParser do
def parse(binary, _options) do
{:ok, binary}
end
end
```
### Implement a body parser
```
defmodule MyBodyParser do
def parse(binary, options) do
{:ok, MyBBCodeParser.parse(binary, options)}
end
end
```
Then everything is ready to be configured in the repo.
```
defmodule MyRepo do
use Nabo.Repo,
root: "/path/to/posts",
compiler: [
front_parser: {MyFrontParser, []},
excerpt_parser: {MyExcerptParser, []},
body_parser: {MyBodyParser, []},
]
end
```
"""
@doc """
Parses a input binary into the desired format.
"""
@callback parse(data :: binary, options :: any) ::
{:ok, parsed :: any} | {:error, reason :: any}
end
|
lib/nabo/parser.ex
| 0.909292 | 0.922273 |
parser.ex
|
starcoder
|
defmodule Contex.GanttChart do
@moduledoc """
Generates a Gantt Chart.
Bars are drawn for each task covering the start and end time for each task. In addition, tasks can be grouped
into categories which have a different coloured background - this is useful for showing projects that are
in major phases.
The time interval columns must be of a date time type (either `NaiveDateTime` or `DateTime`)
Labels can optionally be drawn for each task (use `show_task_labels/2`) and a description for each task, including
the time interval is generated and added as a '<title>' element attached to the bar. Most browsers provide
a tooltip functionality to display the title when the mouse hovers over the containing element.
By default, the first four columns of the supplied dataset are used for the category, task, start time and end time.
"""
import Contex.SVG
alias __MODULE__
alias Contex.{Scale, OrdinalScale, TimeScale, CategoryColourScale}
alias Contex.{Dataset, Mapping}
alias Contex.Axis
alias Contex.Utils
defstruct [
:dataset,
:mapping,
:options,
:time_scale,
:task_scale,
:category_scale
]
@required_mappings [
category_col: :exactly_one,
task_col: :exactly_one,
start_col: :exactly_one,
finish_col: :exactly_one,
id_col: :zero_or_one
]
@default_options [
width: 100,
height: 100,
show_task_labels: true,
padding: 2,
colour_palette: :default,
phx_event_handler: nil,
phx_event_target: nil
]
@type t() :: %__MODULE__{}
@doc ~S"""
Creates a new Gantt chart from a dataset and sets defaults.
Options may be passed to control the settings for the barchart. Options available are:
- `:padding` : integer (default 2) - Specifies the padding between the task bars. Defaults to 2. Specified relative to the plot size.
- `:show_task_labels` : `true` (default) or false - display labels for each task
- `:colour_palette` : `:default` (default) or colour palette - see `colours/2`
Overrides the default colours.
Colours can either be a named palette defined in `Contex.CategoryColourScale` or a list of strings representing hex code
of the colour as per CSS colour hex codes, but without the #. For example:
```
gantt = GanttChart.new(
dataset,
mapping: %{category_col: :category, task_col: :task_name, start_col: :start_time, finish_col: :end_time, id_col: :task_id},
colour_palette: ["fbb4ae", "b3cde3", "ccebc5"]
)
```
The colours will be applied to the data series in the same order as the columns are specified in `set_val_col_names/2`
- `:phx_event_handler` : `nil` (default) or string representing `phx-click` event handler
- `:phx_event_target` : `nil` (default) or string representing `phx-target` for handler
Optionally specify a LiveView event handler. This attaches a `phx-click` attribute to each bar element.
You can specify the event_target for LiveComponents - a `phx-target` attribute will also be attached.
Note that it may not work with some browsers (e.g. Safari on iOS).
- `:mapping` : Maps attributes required to generate the barchart to columns in the dataset.
If the data in the dataset is stored as a map, the `:mapping` option is required. If the dataset
is not stored as a map, `:mapping` may be left out, in which case the columns will be assigned
in order to category, task, start time, finish time, task id.
If a mapping is explicit (recommended) the value must be a map of the plot's
`:category_col`, `:task_col`, `:start_col`, `:finish_col`, `:id_col` to keys in the map,
For example:
`mapping: %{category_col: :category, task_col: :task_name, start_col: :start_time, finish_col: :end_time, id_col: :task_id}`
"""
@spec new(Contex.Dataset.t(), keyword()) :: Contex.GanttChart.t()
def new(%Dataset{} = dataset, options \\ []) do
options = Keyword.merge(@default_options, options)
mapping = Mapping.new(@required_mappings, Keyword.get(options, :mapping), dataset)
%GanttChart{dataset: dataset, mapping: mapping, options: options}
end
@doc """
Sets the default scales for the plot based on its column mapping.
"""
@deprecated "Default scales are now silently applied"
@spec set_default_scales(Contex.GanttChart.t()) :: Contex.GanttChart.t()
def set_default_scales(%GanttChart{mapping: %{column_map: column_map}} = plot) do
set_category_task_cols(plot, column_map.category_col, column_map.task_col)
|> set_task_interval_cols({column_map.start_col, column_map.finish_col})
end
@doc """
Show or hide labels on the bar for each task
"""
@deprecated "Set in new/2 options"
@spec show_task_labels(Contex.GanttChart.t(), boolean()) :: Contex.GanttChart.t()
def show_task_labels(%GanttChart{} = plot, show_task_labels) do
set_option(plot, :show_task_labels, show_task_labels)
end
@doc false
def set_size(%GanttChart{} = plot, width, height) do
plot
|> set_option(:width, width)
|> set_option(:height, height)
end
@doc """
Specify the columns used for category and task
"""
@deprecated "Use `:mapping` option in `new/2`"
@spec set_category_task_cols(
Contex.GanttChart.t(),
Contex.Dataset.column_name(),
Contex.Dataset.column_name()
) ::
Contex.GanttChart.t()
def set_category_task_cols(%GanttChart{mapping: mapping} = plot, cat_col_name, task_col_name) do
mapping = Mapping.update(mapping, %{category_col: cat_col_name, task_col: task_col_name})
%{plot | mapping: mapping}
end
@doc """
Specify the columns used for start and end time of each task.
"""
@deprecated "Use `:mapping` option in `new/2`"
@spec set_task_interval_cols(
Contex.GanttChart.t(),
{Contex.Dataset.column_name(), Contex.Dataset.column_name()}
) ::
Contex.GanttChart.t()
def set_task_interval_cols(
%GanttChart{mapping: mapping} = plot,
{start_col_name, finish_col_name}
) do
mapping = Mapping.update(mapping, %{start_col: start_col_name, finish_col: finish_col_name})
%{plot | mapping: mapping}
end
defp prepare_scales(%GanttChart{} = plot) do
plot
|> prepare_time_scale()
|> prepare_task_scale()
|> prepare_category_scale()
end
defp prepare_task_scale(%GanttChart{dataset: dataset, mapping: mapping} = plot) do
task_col_name = mapping.column_map[:task_col]
height = get_option(plot, :height)
padding = get_option(plot, :padding)
tasks = Dataset.unique_values(dataset, task_col_name)
task_scale =
OrdinalScale.new(tasks)
|> Scale.set_range(0, height)
|> OrdinalScale.padding(padding)
%{plot | task_scale: task_scale}
end
defp prepare_category_scale(%GanttChart{dataset: dataset, mapping: mapping} = plot) do
cat_col_name = mapping.column_map[:category_col]
categories = Dataset.unique_values(dataset, cat_col_name)
cat_scale = CategoryColourScale.new(categories)
%{plot | category_scale: cat_scale}
end
defp prepare_time_scale(%GanttChart{dataset: dataset, mapping: mapping} = plot) do
start_col_name = mapping.column_map[:start_col]
finish_col_name = mapping.column_map[:finish_col]
width = get_option(plot, :width)
{min, _} = Dataset.column_extents(dataset, start_col_name)
{_, max} = Dataset.column_extents(dataset, finish_col_name)
time_scale =
TimeScale.new()
|> TimeScale.domain(min, max)
|> Scale.set_range(0, width)
%{plot | time_scale: time_scale}
end
@doc """
Optionally specify a LiveView event handler. This attaches a `phx-click` attribute to each bar element.
You can specify the event_target for LiveComponents - a `phx-target` attribute will also be attached.
Note that it may not work with some browsers (e.g. Safari on iOS).
"""
@deprecated "Set in new/2 options"
def event_handler(%GanttChart{} = plot, event_handler, event_target \\ nil) do
plot
|> set_option(:phx_event_handler, event_handler)
|> set_option(:phx_event_target, event_target)
end
@doc """
If id_col is set it is used as the value sent by the phx_event_handler.
Otherwise, the category and task is used
"""
@deprecated "Use `:mapping` option in `new/2`"
@spec set_id_col(Contex.GanttChart.t(), Contex.Dataset.column_name()) :: Contex.GanttChart.t()
def set_id_col(%GanttChart{mapping: mapping} = plot, id_col_name) do
%{plot | mapping: Mapping.update(mapping, %{id_col: id_col_name})}
end
defp set_option(%GanttChart{options: options} = plot, key, value) do
options = Keyword.put(options, key, value)
%{plot | options: options}
end
defp get_option(%GanttChart{options: options}, key) do
Keyword.get(options, key)
end
@doc false
def to_svg(%GanttChart{} = plot, _options) do
plot = prepare_scales(plot)
time_scale = plot.time_scale
height = get_option(plot, :height)
time_axis = Axis.new_bottom_axis(time_scale) |> Axis.set_offset(height)
toptime_axis = Axis.new_top_axis(time_scale) |> Axis.set_offset(height)
toptime_axis = %{toptime_axis | tick_size_inner: 3, tick_padding: 1}
[
get_category_rects_svg(plot),
Axis.to_svg(toptime_axis),
Axis.to_svg(time_axis),
Axis.gridlines_to_svg(time_axis),
"<g>",
get_svg_bars(plot),
"</g>"
]
end
defp get_category_rects_svg(
%GanttChart{mapping: mapping, dataset: dataset, category_scale: cat_scale} = plot
) do
categories = Dataset.unique_values(dataset, mapping.column_map.category_col)
Enum.map(categories, fn cat ->
fill = CategoryColourScale.colour_for_value(cat_scale, cat)
band = get_category_band(plot, cat) |> adjust_category_band()
x_extents = {0, get_option(plot, :width)}
# TODO: When we have a colour manipulation library we can fade the colour. Until then, we'll draw a transparent white box on top
[
rect(x_extents, band, "", fill: fill, opacity: "0.2"),
rect(x_extents, band, "", fill: "FFFFFF", opacity: "0.3"),
get_category_tick_svg(cat, band)
]
end)
end
# Adjust band to fill gap
defp adjust_category_band({y1, y2}), do: {y1 - 1, y2 + 1}
defp get_category_tick_svg(text, {_min_y, max_y} = _band) do
# y = midpoint(band)
y = max_y
[
~s|<g class="exc-tick" font-size="10" text-anchor="start" transform="translate(0, #{y})">|,
text(text, x: "2", dy: "-0.32em", alignment_baseline: "baseline"),
"</g>"
]
end
defp get_svg_bars(%GanttChart{dataset: dataset} = plot) do
dataset.data
|> Enum.map(fn row -> get_svg_bar(row, plot) end)
end
defp get_svg_bar(
row,
%GanttChart{
mapping: mapping,
task_scale: task_scale,
time_scale: time_scale,
category_scale: cat_scale
} = plot
) do
task_data = mapping.accessors.task_col.(row)
cat_data = mapping.accessors.category_col.(row)
start_time = mapping.accessors.start_col.(row)
end_time = mapping.accessors.finish_col.(row)
title = ~s|#{task_data}: #{start_time} -> #{end_time}|
task_band = OrdinalScale.get_band(task_scale, task_data)
fill = CategoryColourScale.colour_for_value(cat_scale, cat_data)
start_x = Scale.domain_to_range(time_scale, start_time)
end_x = Scale.domain_to_range(time_scale, end_time)
opts = get_bar_event_handler_opts(row, plot, cat_data, task_data) ++ [fill: fill]
[
rect({start_x, end_x}, task_band, title(title), opts),
get_svg_bar_label(plot, {start_x, end_x}, task_data, task_band)
]
end
defp get_svg_bar_label(plot, {bar_start, bar_end} = bar, label, band) do
case get_option(plot, :show_task_labels) do
true ->
text_y = midpoint(band)
width = width(bar)
{text_x, class, anchor} =
case width < 50 do
true -> {bar_end + 2, "exc-barlabel-out", "start"}
_ -> {bar_start + 5, "exc-barlabel-in", "start"}
end
text(text_x, text_y, label, anchor: anchor, dominant_baseline: "central", class: class)
_ ->
""
end
end
defp get_bar_event_handler_opts(row, %GanttChart{} = plot, category, task) do
handler = get_option(plot, :phx_event_handler)
target = get_option(plot, :phx_event_target)
base_opts =
case target do
nil -> [phx_click: handler]
"" -> [phx_click: handler]
_ -> [phx_click: handler, phx_target: target]
end
id_opts = get_bar_click_id(row, plot, category, task)
case handler do
nil -> []
"" -> []
_ -> Keyword.merge(base_opts, id_opts)
end
end
defp get_bar_click_id(
_row,
%GanttChart{
mapping: %{column_map: %{id_col: nil}}
},
category,
task
) do
[category: "#{category}", task: task]
end
defp get_bar_click_id(
row,
%GanttChart{mapping: mapping},
_category,
_task
) do
id = mapping.accessors.id_col.(row)
[id: "#{id}"]
end
defp get_category_band(
%GanttChart{mapping: mapping, task_scale: task_scale, dataset: dataset},
category
) do
Enum.reduce(dataset.data, {nil, nil}, fn row, {min, max} = acc ->
task = mapping.accessors.task_col.(row)
cat = mapping.accessors.category_col.(row)
case cat == category do
false ->
{min, max}
_ ->
task_band = OrdinalScale.get_band(task_scale, task)
max_band(acc, task_band)
end
end)
end
defp midpoint({a, b}), do: (a + b) / 2.0
defp width({a, b}), do: abs(a - b)
defp max_band({a1, b1}, {a2, b2}), do: {Utils.safe_min(a1, a2), Utils.safe_max(b1, b2)}
end
|
lib/chart/gantt.ex
| 0.926179 | 0.941493 |
gantt.ex
|
starcoder
|
defmodule Numerix.Statistics do
@moduledoc """
Common statistical functions.
"""
use Numerix.Tensor
alias Numerix.Common
@doc """
The average of a list of numbers.
"""
@spec mean(Common.vector()) :: Common.maybe_float()
def mean(%Tensor{items: []}), do: nil
def mean(x = %Tensor{}) do
sum(x) / Enum.count(x.items)
end
def mean(xs) do
x = Tensor.new(xs)
mean(x)
end
@doc """
The middle value in a list of numbers.
"""
@spec median(Common.vector()) :: Common.maybe_float()
def median(%Tensor{items: []}), do: nil
def median(x = %Tensor{}) do
middle_index = round(length(x.items) / 2) - 1
x.items |> Enum.sort() |> Enum.at(middle_index)
end
def median(xs) do
x = Tensor.new(xs)
median(x)
end
@doc """
The most frequent value(s) in a list.
"""
@spec mode(Common.vector()) :: Common.maybe_vector()
def mode(%Tensor{items: []}), do: nil
def mode(x = %Tensor{}) do
counts =
Enum.reduce(x.items, %{}, fn i, acc ->
acc |> Map.update(i, 1, fn count -> count + 1 end)
end)
{_, max_count} = counts |> Enum.max_by(fn {_x, count} -> count end)
case max_count do
1 ->
nil
_ ->
counts
|> Stream.filter(fn {_x, count} -> count == max_count end)
|> Enum.map(fn {i, _count} -> i end)
end
end
def mode(xs) do
x = Tensor.new(xs)
mode(x)
end
@doc """
The difference between the largest and smallest values in a list.
"""
@spec range(Common.vector()) :: Common.maybe_float()
def range(%Tensor{items: []}), do: nil
def range(x = %Tensor{}) do
{minimum, maximum} = Enum.min_max(x.items)
maximum - minimum
end
def range(xs) do
x = Tensor.new(xs)
range(x)
end
@doc """
The unbiased population variance from a sample.
It measures how far the vector is spread out from the mean.
"""
@spec variance(Common.vector()) :: Common.maybe_float()
def variance(%Tensor{items: []}), do: nil
def variance(%Tensor{items: [_x]}), do: nil
def variance(x = %Tensor{}) do
sum_powered_deviations(x, 2) / (Enum.count(x.items) - 1)
end
def variance(xs) do
x = Tensor.new(xs)
variance(x)
end
@doc """
The variance for a full population.
It measures how far the vector is spread out from the mean.
"""
@spec population_variance(Common.vector()) :: Common.maybe_float()
def population_variance(%Tensor{items: []}), do: nil
def population_variance(x = %Tensor{}), do: moment(x, 2)
def population_variance(xs) do
x = Tensor.new(xs)
population_variance(x)
end
@doc """
The unbiased standard deviation from a sample.
It measures the amount of variation of the vector.
"""
@spec std_dev(Common.vector()) :: Common.maybe_float()
def std_dev(%Tensor{items: []}), do: nil
def std_dev(%Tensor{items: [_x]}), do: nil
def std_dev(x = %Tensor{}), do: :math.sqrt(variance(x))
def std_dev(xs) do
x = Tensor.new(xs)
std_dev(x)
end
@doc """
The standard deviation for a full population.
It measures the amount of variation of the vector.
"""
@spec population_std_dev(Common.vector()) :: Common.maybe_float()
def population_std_dev(%Tensor{items: []}), do: nil
def population_std_dev(x = %Tensor{}), do: :math.sqrt(population_variance(x))
def population_std_dev(xs) do
x = Tensor.new(xs)
population_std_dev(x)
end
@doc """
The nth moment about the mean for a sample.
Used to calculate skewness and kurtosis.
"""
@spec moment(Common.vector(), pos_integer) :: Common.maybe_float()
def moment(%Tensor{items: []}, _), do: nil
def moment(_, 1), do: 0.0
def moment(x = %Tensor{}, n), do: sum_powered_deviations(x, n) / Enum.count(x.items)
def moment(xs, n) do
x = Tensor.new(xs)
moment(x, n)
end
@doc """
The sharpness of the peak of a frequency-distribution curve.
It defines the extent to which a distribution differs from a normal distribution.
Like skewness, it describes the shape of a probability distribution.
"""
@spec kurtosis(Common.vector()) :: Common.maybe_float()
def kurtosis(%Tensor{items: []}), do: nil
def kurtosis(x = %Tensor{}), do: moment(x, 4) / :math.pow(population_variance(x), 2) - 3
def kurtosis(xs) do
x = Tensor.new(xs)
kurtosis(x)
end
@doc """
The skewness of a frequency-distribution curve.
It defines the extent to which a distribution differs from a normal distribution.
Like kurtosis, it describes the shape of a probability distribution.
"""
@spec skewness(Common.vector()) :: Common.maybe_float()
def skewness(%Tensor{items: []}), do: nil
def skewness(x = %Tensor{}), do: moment(x, 3) / :math.pow(population_variance(x), 1.5)
def skewness(xs) do
x = Tensor.new(xs)
skewness(x)
end
@doc """
Calculates the unbiased covariance from two sample vectors.
It is a measure of how much the two vectors change together.
"""
@spec covariance(Common.vector(), Common.vector()) :: Common.maybe_float()
def covariance(%Tensor{items: []}, _), do: nil
def covariance(_, %Tensor{items: []}), do: nil
def covariance(%Tensor{items: [_x]}, _), do: nil
def covariance(_, %Tensor{items: [_y]}), do: nil
def covariance(%Tensor{items: x}, %Tensor{items: y}) when length(x) != length(y), do: nil
def covariance(x = %Tensor{}, y = %Tensor{}) do
divisor = Enum.count(x.items) - 1
do_covariance(x, y, divisor)
end
def covariance(xs, ys) do
x = Tensor.new(xs)
y = Tensor.new(ys)
covariance(x, y)
end
@doc """
Calculates the population covariance from two full population vectors.
It is a measure of how much the two vectors change together.
"""
@spec population_covariance(Common.vector(), Common.vector()) :: Common.maybe_float()
def population_covariance(%Tensor{items: []}, _), do: nil
def population_covariance(_, %Tensor{items: []}), do: nil
def population_covariance(%Tensor{items: x}, %Tensor{items: y}) when length(x) != length(y),
do: nil
def population_covariance(x = %Tensor{}, y = %Tensor{}) do
divisor = Enum.count(x.items)
do_covariance(x, y, divisor)
end
def population_covariance(xs, ys) do
x = Tensor.new(xs)
y = Tensor.new(ys)
population_covariance(x, y)
end
@doc """
Estimates the tau-th quantile from the vector.
Approximately median-unbiased irrespective of the sample distribution.
This implements the R-8 type of https://en.wikipedia.org/wiki/Quantile.
"""
@spec quantile(Common.vector(), number) :: Common.maybe_float()
def quantile(%Tensor{items: []}, _tau), do: nil
def quantile(_xs, tau) when tau < 0 or tau > 1, do: nil
def quantile(x = %Tensor{}, tau) do
sorted_x = Enum.sort(x.items)
h = (length(sorted_x) + 1 / 3) * tau + 1 / 3
hf = h |> Float.floor() |> round
do_quantile(sorted_x, h, hf)
end
def quantile(xs, tau) do
x = Tensor.new(xs)
quantile(x, tau)
end
@doc """
Estimates the p-Percentile value from the vector.
Approximately median-unbiased irrespective of the sample distribution.
This implements the R-8 type of https://en.wikipedia.org/wiki/Quantile.
"""
@spec percentile(Common.vector(), integer) :: Common.maybe_float()
def percentile(%Tensor{items: []}, _p), do: nil
def percentile(_xs, p) when p < 0 or p > 100, do: nil
def percentile(x = %Tensor{}, p), do: quantile(x, p / 100)
def percentile(xs, p) do
x = Tensor.new(xs)
percentile(x, p)
end
@doc """
Calculates the weighted measure of how much two vectors change together.
"""
@spec weighted_covariance(Common.vector(), Common.vector(), Common.vector()) ::
Common.maybe_float()
def weighted_covariance(%Tensor{items: []}, _, _), do: nil
def weighted_covariance(_, %Tensor{items: []}, _), do: nil
def weighted_covariance(_, _, %Tensor{items: []}), do: nil
def weighted_covariance(%Tensor{items: x}, %Tensor{items: y}, %Tensor{items: w})
when length(x) != length(y) or length(x) != length(w),
do: nil
def weighted_covariance(x = %Tensor{}, y = %Tensor{}, w = %Tensor{}) do
weighted_mean1 = weighted_mean(x, w)
weighted_mean2 = weighted_mean(y, w)
sum(w * (x - weighted_mean1) * (y - weighted_mean2)) / sum(w)
end
def weighted_covariance(xs, ys, weights) do
x = Tensor.new(xs)
y = Tensor.new(ys)
w = Tensor.new(weights)
weighted_covariance(x, y, w)
end
@doc """
Calculates the weighted average of a list of numbers.
"""
@spec weighted_mean(Common.vector(), Common.vector()) :: Common.maybe_float()
def weighted_mean(%Tensor{items: []}, _), do: nil
def weighted_mean(_, %Tensor{items: []}), do: nil
def weighted_mean(%Tensor{items: x}, %Tensor{items: w}) when length(x) != length(w), do: nil
def weighted_mean(x = %Tensor{}, w = %Tensor{}), do: sum(x * w) / sum(w)
def weighted_mean(xs, weights) do
x = Tensor.new(xs)
w = Tensor.new(weights)
weighted_mean(x, w)
end
defp sum_powered_deviations(x, n) do
x_mean = mean(x)
sum(pow(x - x_mean, n))
end
defp do_covariance(x, y, divisor) do
mean_x = mean(x)
mean_y = mean(y)
sum((x - mean_x) * (y - mean_y)) / divisor
end
defp do_quantile([head | _], _h, hf) when hf < 1, do: head
defp do_quantile(xs, _h, hf) when hf >= length(xs), do: List.last(xs)
defp do_quantile(xs, h, hf) do
Enum.at(xs, hf - 1) + (h - hf) * (Enum.at(xs, hf) - Enum.at(xs, hf - 1))
end
@doc """
Calculates rolling mean, prepending zeros for the values that don't
complete the minimum count of elements.
## Examples
iex> Statistics.rolling_mean([2.8440000000000003, 3.096, 3.672, 4.572, 4.284], 2)
[0.0, 2.97, 3.3840000000000003, 4.122, 4.428]
"""
def rolling_mean(collection, count) when is_list(collection) and is_integer(count) do
values_to_append = Enum.map(1..(count - 1), fn _ -> 0.0 end)
real_values =
rolling(collection, count)
|> Enum.map(&mean/1)
Enum.concat(values_to_append, real_values)
end
defp rolling(collection, count) do
Stream.chunk_every(collection, count, 1, :discard)
end
@doc """
Sums elements in an array cumulatively returning same number of elements provided.
## Example
iex> Statistics.cumulative_sum([10, 20, 30, 50])
[10, 30, 60, 110]
"""
def cumulative_sum(values) when is_list(values) do
Enum.reduce(values, [0 | []], fn value, [hd | _tl] = acc ->
[value + hd | acc]
end)
|> Enum.reverse()
|> Enum.drop(1)
end
@doc """
Hypotenuse formula
## Example
iex> Statistics.hypotenuse([1, 2, 3], [1, 2, 3], 8) == [
1.41421356,
2.82842712,
4.24264069
]
"""
def hypotenuse(list_1, list_2, rounding_unit \\ 0)
when is_list(list_1) and is_list(list_2) and is_integer(rounding_unit) do
Enum.zip(list_1, list_2)
|> Enum.map(fn {x1, x2} ->
:math.sqrt(:math.pow(x1, 2) + :math.pow(x2, 2))
|> Float.round(rounding_unit)
end)
end
end
|
lib/statistics.ex
| 0.942566 | 0.713045 |
statistics.ex
|
starcoder
|
defmodule Nerves.NetworkInterface do
@moduledoc """
This module exposes a simplified view of Linux network configuration to
applications.
## Overview
This module should be added to a supervision tree or started via the
`start_link/0` call. Once running, the module provides functions to
list network interfaces, modify their state (up or down), get statistics
and set IP networking parameters. Network events, such as when an Ethernet
cable is connected, are reported via a Registry Nerves.NetworkInterface.
## Privilege
The functions that return information don't require that the `Nerves.NetworkInterface`'s
associated port process has privileged access to the system. If you
need to change any parameters or bring up or down an interface, you should
ensure that the port process is running as a privileged user.
"""
@doc """
Return the list of network interfaces on this machine.
"""
defdelegate interfaces, to: Nerves.NetworkInterface.Worker
@doc """
Return link-level status on the specified interface.
For example, `Nerves.NetworkInterface.status pid, "eth0"` could return:
{:ok,
%{ifname: "eth0", index: 2, is_broadcast: true, is_lower_up: true,
is_multicast: true, is_running: true, is_up: true,
mac_address: <<224, 219, 85, 231, 139, 93>>,
mac_broadcast: <<255, 255, 255, 255, 255, 255>>, mtu: 1500, operstate: :up,
stats: %{collisions: 0, multicast: 427, rx_bytes: 358417207, rx_dropped: 0,
rx_errors: 0, rx_packets: 301021, tx_bytes: 22813761, tx_dropped: 0,
tx_errors: 0, tx_packets: 212480}, type: :ethernet}}
If the interface doesn't exist, `{:error, :enodev}` is returned.
"""
defdelegate status(ifname), to: Nerves.NetworkInterface.Worker
@doc """
Bring the specified interface up.
Returns `:ok` on success or `{:error, reason}` if an error occurs.
"""
defdelegate ifup(ifname), to: Nerves.NetworkInterface.Worker
@doc """
Bring the specified interface down.
Returns `:ok` on success or `{:error, reason}` if an error occurs.
"""
defdelegate ifdown(ifname), to: Nerves.NetworkInterface.Worker
@doc """
Return the IP configuration for the specified interface as a map. See
`setup/3` for options.
Returns `{:ok, config}` on success or `{:error, reason}` if an error occurs.
"""
defdelegate settings(ifname), to: Nerves.NetworkInterface.Worker
@doc """
Set IP settings for the specified interface. The following options are
available:
* `:ipv4_address` - the IPv4 address of the interface
* `:ipv4_broadcast` - the IPv4 broadcast address for the interface
* `:ipv4_subnet_mask` - the IPv4 subnet mask
* `:ipv4_gateway` - the default gateway
Options can be specified either as a keyword list or as a map.
Returns `:ok` on success or `{:error, reason}` if an error occurs.
"""
defdelegate setup(ifname, options), to: Nerves.NetworkInterface.Worker
end
|
lib/nerves_network_interface.ex
| 0.877221 | 0.540621 |
nerves_network_interface.ex
|
starcoder
|
defmodule Plymio.Vekil.Forom.List do
@moduledoc ~S"""
The module implements the `Plymio.Vekil.Forom` protocol and manages a list of other *forom*
See `Plymio.Vekil.Forom` for the definitions of the protocol functions.
See `Plymio.Vekil` for an explanation of the test environment.
## Module State
See `Plymio.Vekil.Forom` for the common fields.
The default `:produce_default` is an empty list.
The default `:realise_default` is an empty list.
The module's state is held in a `struct` with the following field(s):
| Field | Aliases | Purpose |
| :--- | :--- | :--- |
| `:forom` | | *holds the list of child forom* |
"""
require Plymio.Fontais.Guard
require Plymio.Fontais.Option
require Plymio.Fontais.Vekil.ProxyForomDict, as: PROXYFOROMDICT
use Plymio.Fontais.Attribute
use Plymio.Vekil.Attribute
@type t :: %__MODULE__{}
@type opts :: Plymio.Fontais.opts()
@type error :: Plymio.Fontais.error()
@type kv :: Plymio.Fontais.kv()
@type product :: Plymio.Vekil.product()
import Plymio.Fontais.Error,
only: [
new_error_result: 1
],
warn: false
import Plymio.Fontais.Guard,
only: [
# is_value_unset_or_nil: 1,
is_value_set: 1
]
import Plymio.Fontais.Option,
only: [
opts_create_aliases_dict: 1,
opts_canonical_keys: 2
]
@plymio_vekil_forom_list_kvs_aliases [
# struct
@plymio_vekil_field_alias_forom,
@plymio_vekil_field_alias_produce_default,
@plymio_vekil_field_alias_realise_default,
@plymio_fontais_field_alias_protocol_name,
@plymio_fontais_field_alias_protocol_impl,
# virtual
@plymio_vekil_field_alias_vekil,
@plymio_vekil_field_alias_proxy,
@plymio_vekil_field_alias_seen
]
@plymio_vekil_forom_list_dict_aliases @plymio_vekil_forom_list_kvs_aliases
|> opts_create_aliases_dict
@doc false
def update_canonical_opts(opts, dict \\ @plymio_vekil_forom_list_dict_aliases) do
opts |> opts_canonical_keys(dict)
end
@plymio_vekil_defstruct [
{@plymio_vekil_field_forom, @plymio_fontais_the_unset_value},
{@plymio_vekil_field_produce_default, []},
{@plymio_vekil_field_realise_default, []},
{@plymio_fontais_field_protocol_name, Plymio.Vekil.Forom},
{@plymio_fontais_field_protocol_impl, __MODULE__}
]
defstruct @plymio_vekil_defstruct
@doc_new ~S"""
`new/1` takes an optional *opts* and creates a new *forom* returning `{:ok, forom}`.
## Examples
iex> {:ok, forom} = new()
...> match?(%FOROMLIST{}, forom)
true
`Plymio.Vekil.Utility.forom?/1` returns `true` if the value implements `Plymio.Vekil.Forom`
iex> {:ok, forom} = new()
...> forom |> Plymio.Vekil.Utility.forom?
true
The list is passed using the `:forom` key:
iex> {:ok, forom1} = FOROMFORM.new(forom: quote(do: x = x + 1))
...> {:ok, forom} = new(forom: forom1)
...> forom |> Plymio.Vekil.Utility.forom?
true
iex> {:ok, forom} = new(
...> forom: [
...> FOROMTERM.new!(forom: 42),
...> FOROMFORM.new!(forom: quote(do: x = x * x * x)),
...> FOROMPROXY.new!(forom: :x_sub_1),
...> ])
...> forom |> Plymio.Vekil.Utility.forom?
true
"""
@doc_update ~S"""
`update/2` implements `Plymio.Vekil.Forom.update/2`.
## Examples
iex> {:ok, forom} = new(forom: FOROMTERM.new!(forom: 7))
...> {:ok, forom} = forom |> FOROMPROT.update(
...> forom: [FOROMTERM.new!(forom: 33), FOROMTERM.new!(forom: 2)])
...> {:ok, {values, %FOROMLIST{}}} = forom |> FOROMPROT.realise
...> values |> Enum.sum
35
"""
@doc_normalise ~S"""
`normalise/1` creates a new *forom* from its argument unless the argument is already one.
The function tries to make it as as convenient as possible to
create a new *forom*, making some assumptions and may *not* return a **list** *forom*.
If the argument is a list, each element is treated as below and the created *forom* uses to create a **list** *forom*.
If an atom is found, a `Plymio.Vekil.Forom.Proxy` is created.
If the argument is a valid *form* (`Macro.validate/1`) a new `Plymio.Vekil.Forom.Form` is created.
Any other argument creates a `Plymio.Vekil.Forom.Term`.
## Examples
Here a *form* is recognised:
iex> {:ok, %FOROMFORM{} = forom} = quote(do: x = x + 1) |> normalise
...> {:ok, {form, _}} = forom |> FOROMPROT.realise
...> form |> harnais_helper_test_forms!(binding: [x: 6])
{7, ["x = x + 1"]}
Here the argument is an atom and a *proxy forom* is created. Note a
*vekil* is needed to resolve the *proxy*.
iex> {:ok, %FOROMPROXY{} = forom} = :x_mul_x |> normalise
...> realise_opts = [vekil: vekil_helper_form_vekil_example1()]
...> {:ok, {form, _}} = forom |> FOROMPROT.realise(realise_opts)
...> form |> harnais_helper_test_forms!(binding: [x: 3])
{9, ["x = x * x"]}
A list of atoms works. Note the returned *forom* is a **list** *forom*.
iex> {:ok, %FOROMLIST{} = forom} = [:x_add_1, :x_mul_x, :x_sub_1] |> normalise
...> realise_opts = [vekil: vekil_helper_form_vekil_example1()]
...> {:ok, {form, _}} = forom |> FOROMPROT.realise(realise_opts)
...> form |> harnais_helper_test_forms!(binding: [x: 7])
{63, ["x = x + 1", "x = x * x", "x = x - 1"]}
A mixture works:
iex> {:ok, %FOROMLIST{} = forom} = [
...> 42,
...> quote(do: x = x * x * x),
...> :x_sub_1
...> ] |> normalise
...> realise_opts = [vekil: vekil_helper_form_vekil_example1()]
...> {:ok, {values, _}} = forom |> FOROMPROT.realise(realise_opts)
...> values |> harnais_helper_show_forms
{:ok, ["42", "x = x * x * x", "x = x - 1"]}
The other examples don't really highlight that the child *forom* of a
**list** *forom* can themselves be **list** *forom*.
iex> {:ok, forom1} = [:x_sub_1, :x_add_1, :x_mul_x] |> normalise
...> {:ok, forom2} = [
...> quote(do: x = x + 9),
...> quote(do: x = x - 5),
...> quote(do: x = x * x * x),
...> ] |> normalise
...> {:ok, forom3} = :x_funs |> normalise
...> {:ok, forom} = [forom1, forom2, forom3] |> normalise
...> realise_opts = [vekil: vekil_helper_form_vekil_example1()]
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise(realise_opts)
...> forms |> harnais_helper_test_forms!(binding: [x: 3])
{4831203, ["x = x - 1", "x = x + 1", "x = x * x",
"x = x + 9", "x = x - 5", "x = x * x * x",
"x = x + 1", "x = x * x", "x = x - 1"]}
Anything else creates a **term** *forom*:
iex> {:ok, %FOROMTERM{} = forom} = %{a: 1} |> normalise
...> {:ok, {value, _}} = forom |> FOROMPROT.realise
...> value
%{a: 1}
"""
@doc_produce ~S"""
`produce/2` takes a *forom* and an optional *opts*.
It calls `produce/2` on each of the *forom's* children and merges
their products into a single `Keyword` returning `{:ok, {product, forom}}`
## Examples
Here the list contains integers:
iex> {:ok, forom} = new(
...> forom: [
...> FOROMTERM.new!(forom: 7),
...> FOROMTERM.new!(forom: 33),
...> FOROMTERM.new!(forom: 2),
...> ])
...> {:ok, {product, %FOROMLIST{}}} = forom |> FOROMPROT.produce
...> product |> Keyword.get_values(:forom) |> Enum.sum
42
Here the list contains code snippets:
iex> {:ok, forom} = [forom: [
...> FOROMFORM.new!(forom: quote(do: x = x + 1)),
...> FOROMFORM.new!(forom: quote(do: x = x * x)),
...> FOROMFORM.new!(forom: quote(do: x = x - 1))
...> ]] |> FOROMLIST.new
...> {:ok, {product, _}} = forom |> FOROMPROT.produce
...> product |> Keyword.get_values(:forom)
...> |> harnais_helper_test_forms!(binding: [x: 3])
{15, ["x = x + 1", "x = x * x", "x = x - 1"]}
As an aside, and same example, `normalise/1` helps reduce the boilerplace:
iex> {:ok, forom} = [
...> quote(do: x = x + 1),
...> quote(do: x = x * x),
...> quote(do: x = x - 1)
...> ] |> normalise
...> {:ok, {product, _}} = forom |> FOROMPROT.produce
...> product |> Keyword.get_values(:forom)
...> |> harnais_helper_test_forms!(binding: [x: 3])
{15, ["x = x + 1", "x = x * x", "x = x - 1"]}
A similar example to the one above but the list contains *proxy
foroms* which are recursively produced. Note the *proxy foroms* need
a *vekil* to resolve the proxies.
iex> {:ok, forom} = [forom: [
...> FOROMPROXY.new!(forom: :x_add_1),
...> FOROMPROXY.new!(forom: :x_mul_x),
...> FOROMPROXY.new!(forom: :x_sub_1)
...> ]] |> FOROMLIST.new
...> produce_opts = [vekil: vekil_helper_form_vekil_example1()]
...> {:ok, {product, _}} = forom |> FOROMPROT.produce(produce_opts)
...> product |> Keyword.get_values(:forom)
...> |> harnais_helper_test_forms!(binding: [x: 3])
{15, ["x = x + 1", "x = x * x", "x = x - 1"]}
An empty *forom* does not produce any `:forom` keys so the `:produce_default` value is returned. Here the default `:produce_default` is an empty list.
iex> {:ok, forom} = new()
...> {:ok, {product, _}} = forom |> FOROMPROT.produce
...> product |> Keyword.get_values(:forom)
[]
Same example but the `:produce_default` value is set:
iex> {:ok, forom} = new()
...> {:ok, {product, _}} = forom
...> |> FOROMPROT.produce(produce_default: [forom: 1, forom: :due, forom: "tre"])
...> product |> Keyword.get_values(:forom)
[1, :due, "tre"]
"""
@doc_realise ~S"""
`realise/2` takes a *forom* and an optional *opts*, calls
`produce/2`, and then gets (`Keyword.get_values/2`) the `:forom` key values.
The example are essentially the same as `produce/2`
## Examples
iex> {:ok, forom} = new(
...> forom: [
...> FOROMTERM.new!(forom: 7),
...> FOROMTERM.new!(forom: 33),
...> FOROMTERM.new!(forom: 2),
...> ])
...> {:ok, {values, %FOROMLIST{}}} = forom |> FOROMPROT.realise
...> values |> Enum.sum
42
iex> {:ok, forom} = [forom: [
...> FOROMFORM.new!(forom: quote(do: x = x + 1)),
...> FOROMFORM.new!(forom: quote(do: x = x * x)),
...> FOROMFORM.new!(forom: quote(do: x = x - 1))
...> ]] |> FOROMLIST.new
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 3])
{15, ["x = x + 1", "x = x * x", "x = x - 1"]}
iex> {:ok, forom} = [forom: [
...> FOROMPROXY.new!(forom: :x_add_1),
...> FOROMPROXY.new!(forom: :x_mul_x),
...> FOROMPROXY.new!(forom: :x_sub_1)
...> ]] |> FOROMLIST.new
...> realise_opts = [vekil: vekil_helper_form_vekil_example1()]
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise(realise_opts)
...> forms |> harnais_helper_test_forms!(binding: [x: 3])
{15, ["x = x + 1", "x = x * x", "x = x - 1"]}
iex> {:ok, forom} = [:x_add_1, :x_mul_x, :x_sub_1] |> normalise
...> realise_opts = [vekil: vekil_helper_form_vekil_example1()]
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise(realise_opts)
...> forms |> harnais_helper_test_forms!(binding: [x: 3])
{15, ["x = x + 1", "x = x * x", "x = x - 1"]}
iex> {:ok, forom} = new()
...> {:ok, {values, _forom}} = forom |> FOROMPROT.realise
...> values
[]
"""
@vekil [
Plymio.Vekil.Codi.Dict.__vekil__(),
# overrides to the defaults
%{
doc_false: quote(do: @doc(false)),
state_def_new_doc: quote(do: @doc(unquote(@doc_new))),
state_def_update_doc: quote(do: @doc(unquote(@doc_update))),
vekil_forom_def_normalise_doc: quote(do: @doc(unquote(@doc_normalise))),
vekil_forom_def_produce_doc: quote(do: @doc(unquote(@doc_produce))),
vekil_forom_def_realise_doc: quote(do: @doc(unquote(@doc_realise)))
}
]
|> PROXYFOROMDICT.create_proxy_forom_dict!()
@vekil
|> Enum.sort_by(fn {k, _v} -> k end)
@vekil_proxies [
:state_base_package,
:state_defp_update_field_header,
:state_vekil_forom_list_defp_update_field_forom_normalise_forom_list,
:state_vekil_defp_update_field_produce_default_passthru,
:state_vekil_defp_update_field_realise_default_passthru,
:state_vekil_forom_list_defp_update_field_other_propagate,
:state_defp_update_field_unknown,
:vekil_forom_list_def_produce,
:vekil_forom_list_def_realise,
:vekil_forom_list_defp_realise_product,
:vekil_forom_def_normalise,
# forom_value_normalise - other clauses below
:vekil_forom_defp_forom_value_normalise_header,
:vekil_forom_defp_forom_value_normalise_clause_match_forom,
:vekil_forom_defp_forom_value_normalise_clause_l0_new,
:vekil_forom_defp_forom_value_normalise_clause_l_gt_0,
:vekil_forom_defp_forom_value_normalise_clause_match_atom_new_proxy
]
@codi_opts [
{@plymio_fontais_key_dict, @vekil}
]
@vekil_proxies
|> PROXYFOROMDICT.reify_proxies(@codi_opts)
defp forom_value_normalise(value, pvo) do
value
|> Macro.validate()
|> case do
:ok ->
with {:ok, pvo} <- pvo |> Plymio.Vekil.Forom.Form.update_canonical_opts(),
{:ok, pvo} <- pvo |> Plymio.Vekil.PVO.pvo_put_forom(value),
{:ok, %Plymio.Vekil.Forom.Form{}} = result <- pvo |> Plymio.Vekil.Forom.Form.new() do
result
else
{:error, %{__exception__: true}} = result -> result
end
_ ->
with {:ok, pvo} <- pvo |> Plymio.Vekil.Forom.Term.update_canonical_opts(),
{:ok, pvo} <- pvo |> Plymio.Vekil.PVO.pvo_put_forom(value),
{:ok, %Plymio.Vekil.Forom.Term{}} = result <- pvo |> Plymio.Vekil.Forom.Term.new() do
result
else
{:error, %{__exception__: true}} = result -> result
end
end
end
end
defimpl Plymio.Vekil.Forom, for: Plymio.Vekil.Forom.List do
@funs :functions
|> @protocol.__info__
|> Keyword.drop([:__protocol__, :impl_for, :impl_for!])
for {fun, arity} <- @funs do
defdelegate unquote(fun)(unquote_splicing(Macro.generate_arguments(arity, nil))), to: @for
end
end
defimpl Inspect, for: Plymio.Vekil.Forom.List do
use Plymio.Vekil.Attribute
import Plymio.Fontais.Guard,
only: [
is_value_unset_or_nil: 1
]
def inspect(
%Plymio.Vekil.Forom.List{
@plymio_vekil_field_forom => forom
},
_opts
) do
forom_telltale =
forom
|> case do
x when is_value_unset_or_nil(x) ->
nil
x when is_list(x) and length(x) < 4 ->
x
|> Enum.map(&inspect/1)
|> (fn texts ->
texts |> Enum.join(",")
end).()
x when is_list(x) ->
"#{length(x)}"
_x ->
"?"
end
forom_telltale =
[
forom_telltale
]
|> List.flatten()
|> Enum.reject(&is_nil/1)
|> Enum.join("; ")
"FOROMList(#{forom_telltale})"
end
end
|
lib/vekil/concrete/forom/list.ex
| 0.836755 | 0.615521 |
list.ex
|
starcoder
|
defmodule AWS.Glacier do
@moduledoc """
Amazon S3 Glacier (Glacier) is a storage solution for "cold data."
Glacier is an extremely low-cost storage service that provides secure, durable,
and easy-to-use storage for data backup and archival.
With Glacier, customers can store their data cost effectively for months, years,
or decades. Glacier also enables customers to offload the administrative burdens
of operating and scaling storage to AWS, so they don't have to worry about
capacity planning, hardware provisioning, data replication, hardware failure and
recovery, or time-consuming hardware migrations.
Glacier is a great storage choice when low storage cost is paramount and your
data is rarely retrieved. If your application requires fast or frequent access
to your data, consider using Amazon S3. For more information, see [Amazon Simple Storage Service (Amazon S3)](http://aws.amazon.com/s3/).
You can store any kind of data in any format. There is no maximum limit on the
total amount of data you can store in Glacier.
If you are a first-time user of Glacier, we recommend that you begin by reading
the following sections in the *Amazon S3 Glacier Developer Guide*:
* [What is Amazon S3 Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/introduction.html)
- This section of the Developer Guide describes the underlying data model, the
operations it supports, and the AWS SDKs that you can use to interact with the
service.
* [Getting Started with Amazon S3 Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/amazon-glacier-getting-started.html)
- The Getting Started section walks you through the process of creating a vault,
uploading archives, creating jobs to download archives, retrieving the job
output, and deleting archives.
"""
@doc """
This operation aborts a multipart upload identified by the upload ID.
After the Abort Multipart Upload request succeeds, you cannot upload any more
parts to the multipart upload or complete the multipart upload. Aborting a
completed upload fails. However, aborting an already-aborted upload will
succeed, for a short time. For more information about uploading a part and
completing a multipart upload, see `UploadMultipartPart` and
`CompleteMultipartUpload`.
This operation is idempotent.
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Working with Archives in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html)
and [Abort Multipart Upload](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-abort-upload.html)
in the *Amazon Glacier Developer Guide*.
"""
def abort_multipart_upload(client, account_id, upload_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads/#{URI.encode(upload_id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This operation aborts the vault locking process if the vault lock is not in the
`Locked` state.
If the vault lock is in the `Locked` state when this operation is requested, the
operation returns an `AccessDeniedException` error. Aborting the vault locking
process removes the vault lock policy from the specified vault.
A vault lock is put into the `InProgress` state by calling `InitiateVaultLock`.
A vault lock is put into the `Locked` state by calling `CompleteVaultLock`. You
can get the state of a vault lock by calling `GetVaultLock`. For more
information about the vault locking process, see [Amazon Glacier Vault Lock](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). For
more information about vault lock policies, see [Amazon Glacier Access Control with Vault Lock
Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html).
This operation is idempotent. You can successfully invoke this operation
multiple times, if the vault lock is in the `InProgress` state or if there is no
policy associated with the vault.
"""
def abort_vault_lock(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/lock-policy"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This operation adds the specified tags to a vault.
Each tag is composed of a key and a value. Each vault can have up to 10 tags. If
your request would cause the tag limit for the vault to be exceeded, the
operation throws the `LimitExceededException` error. If a tag already exists on
the vault under a specified key, the existing key value will be overwritten. For
more information about tags, see [Tagging Amazon S3 Glacier Resources](https://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html).
"""
def add_tags_to_vault(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/tags?operation=add"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 204)
end
@doc """
You call this operation to inform Amazon S3 Glacier (Glacier) that all the
archive parts have been uploaded and that Glacier can now assemble the archive
from the uploaded parts.
After assembling and saving the archive to the vault, Glacier returns the URI
path of the newly created archive resource. Using the URI path, you can then
access the archive. After you upload an archive, you should save the archive ID
returned to retrieve the archive at a later point. You can also get the vault
inventory to obtain a list of archive IDs in a vault. For more information, see
`InitiateJob`.
In the request, you must include the computed SHA256 tree hash of the entire
archive you have uploaded. For information about computing a SHA256 tree hash,
see [Computing Checksums](https://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html).
On the server side, Glacier also constructs the SHA256 tree hash of the
assembled archive. If the values match, Glacier saves the archive to the vault;
otherwise, it returns an error, and the operation fails. The `ListParts`
operation returns a list of parts uploaded for a specific multipart upload. It
includes checksum information for each uploaded part that can be used to debug a
bad checksum issue.
Additionally, Glacier also checks for any missing content ranges when assembling
the archive, if missing content ranges are found, Glacier returns an error and
the operation fails.
Complete Multipart Upload is an idempotent operation. After your first
successful complete multipart upload, if you call the operation again within a
short period, the operation will succeed and return the same archive ID. This is
useful in the event you experience a network issue that causes an aborted
connection or receive a 500 server error, in which case you can repeat your
Complete Multipart Upload request and get the same archive ID without creating
duplicate archives. Note, however, that after the multipart upload completes,
you cannot call the List Parts operation and the multipart upload will not
appear in List Multipart Uploads response, even if idempotent complete is
possible.
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Uploading Large Archives in Parts (Multipart
Upload)](https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html)
and [Complete Multipart Upload](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-complete-upload.html)
in the *Amazon Glacier Developer Guide*.
"""
def complete_multipart_upload(client, account_id, upload_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads/#{URI.encode(upload_id)}"
{headers, input} =
[
{"archiveSize", "x-amz-archive-size"},
{"checksum", "x-amz-sha256-tree-hash"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"x-amz-archive-id", "archiveId"},
{"x-amz-sha256-tree-hash", "checksum"},
{"Location", "location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation completes the vault locking process by transitioning the vault
lock from the `InProgress` state to the `Locked` state, which causes the vault
lock policy to become unchangeable.
A vault lock is put into the `InProgress` state by calling `InitiateVaultLock`.
You can obtain the state of the vault lock by calling `GetVaultLock`. For more
information about the vault locking process, [Amazon Glacier Vault Lock](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html).
This operation is idempotent. This request is always successful if the vault
lock is in the `Locked` state and the provided lock ID matches the lock ID
originally used to lock the vault.
If an invalid lock ID is passed in the request when the vault lock is in the
`Locked` state, the operation returns an `AccessDeniedException` error. If an
invalid lock ID is passed in the request when the vault lock is in the
`InProgress` state, the operation throws an `InvalidParameter` error.
"""
def complete_vault_lock(client, account_id, lock_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/lock-policy/#{URI.encode(lock_id)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 204)
end
@doc """
This operation creates a new vault with the specified name.
The name of the vault must be unique within a region for an AWS account. You can
create up to 1,000 vaults per account. If you need to create more vaults,
contact Amazon S3 Glacier.
You must use the following guidelines when naming a vault.
* Names can be between 1 and 255 characters long.
* Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-'
(hyphen), and '.' (period).
This operation is idempotent.
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Creating a Vault in Amazon
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/creating-vaults.html)
and [Create Vault
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-put.html) in
the *Amazon Glacier Developer Guide*.
"""
def create_vault(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}"
headers = []
query_ = []
case request(client, :put, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"Location", "location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation deletes an archive from a vault.
Subsequent requests to initiate a retrieval of this archive will fail. Archive
retrievals that are in progress for this archive ID may or may not succeed
according to the following scenarios:
* If the archive retrieval job is actively preparing the data for
download when Amazon S3 Glacier receives the delete archive request, the
archival retrieval operation might fail.
* If the archive retrieval job has successfully prepared the archive
for download when Amazon S3 Glacier receives the delete archive request, you
will be able to download the output.
This operation is idempotent. Attempting to delete an already-deleted archive
does not result in an error.
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Deleting an Archive in Amazon
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-an-archive.html)
and [Delete Archive](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html)
in the *Amazon Glacier Developer Guide*.
"""
def delete_archive(client, account_id, archive_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/archives/#{URI.encode(archive_id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This operation deletes a vault.
Amazon S3 Glacier will delete a vault only if there are no archives in the vault
as of the last inventory and there have been no writes to the vault since the
last inventory. If either of these conditions is not satisfied, the vault
deletion fails (that is, the vault is not removed) and Amazon S3 Glacier returns
an error. You can use `DescribeVault` to return the number of archives in a
vault, and you can use [Initiate a Job (POST jobs)](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html)
to initiate a new inventory retrieval for a vault. The inventory contains the
archive IDs you use to delete archives using [Delete Archive (DELETE archive)](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html).
This operation is idempotent.
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Deleting a Vault in Amazon
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-vaults.html)
and [Delete Vault
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-delete.html) in
the *Amazon S3 Glacier Developer Guide*.
"""
def delete_vault(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This operation deletes the access policy associated with the specified vault.
The operation is eventually consistent; that is, it might take some time for
Amazon S3 Glacier to completely remove the access policy, and you might still
see the effect of the policy for a short time after you send the delete request.
This operation is idempotent. You can invoke delete multiple times, even if
there is no policy associated with the vault. For more information about vault
access policies, see [Amazon Glacier Access Control with Vault Access Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html).
"""
def delete_vault_access_policy(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/access-policy"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This operation deletes the notification configuration set for a vault.
The operation is eventually consistent; that is, it might take some time for
Amazon S3 Glacier to completely disable the notifications and you might still
receive some notifications for a short time after you send the delete request.
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Configuring Vault Notifications in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html)
and [Delete Vault Notification Configuration
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-delete.html)
in the Amazon S3 Glacier Developer Guide.
"""
def delete_vault_notifications(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/notification-configuration"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This operation returns information about a job you previously initiated,
including the job initiation date, the user who initiated the job, the job
status code/message and the Amazon SNS topic to notify after Amazon S3 Glacier
(Glacier) completes the job.
For more information about initiating a job, see `InitiateJob`.
This operation enables you to check the status of your job. However, it is
strongly recommended that you set up an Amazon SNS topic and specify it in your
initiate job request so that Glacier can notify the topic after it completes the
job.
A job ID will not expire for at least 24 hours after Glacier completes the job.
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For more information about using this operation, see the documentation for the
underlying REST API [Describe Job](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-describe-job-get.html)
in the *Amazon Glacier Developer Guide*.
"""
def describe_job(client, account_id, job_id, vault_name, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/jobs/#{URI.encode(job_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation returns information about a vault, including the vault's Amazon
Resource Name (ARN), the date the vault was created, the number of archives it
contains, and the total size of all the archives in the vault.
The number of archives and their total size are as of the last inventory
generation. This means that if you add or remove an archive from a vault, and
then immediately use Describe Vault, the change in contents will not be
immediately reflected. If you want to retrieve the latest inventory of the
vault, use `InitiateJob`. Amazon S3 Glacier generates vault inventories
approximately daily. For more information, see [Downloading a Vault Inventory in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html).
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Retrieving Vault Metadata in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html)
and [Describe Vault
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-get.html) in
the *Amazon Glacier Developer Guide*.
"""
def describe_vault(client, account_id, vault_name, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation returns the current data retrieval policy for the account and
region specified in the GET request.
For more information about data retrieval policies, see [Amazon Glacier Data Retrieval
Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html).
"""
def get_data_retrieval_policy(client, account_id, options \\ []) do
path_ = "/#{URI.encode(account_id)}/policies/data-retrieval"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation downloads the output of the job you initiated using
`InitiateJob`.
Depending on the job type you specified when you initiated the job, the output
will be either the content of an archive or a vault inventory.
You can download all the job output or download a portion of the output by
specifying a byte range. In the case of an archive retrieval job, depending on
the byte range you specify, Amazon S3 Glacier (Glacier) returns the checksum for
the portion of the data. You can compute the checksum on the client and verify
that the values match to ensure the portion you downloaded is the correct data.
A job ID will not expire for at least 24 hours after Glacier completes the job.
That a byte range. For both archive and inventory retrieval jobs, you should
verify the downloaded size against the size returned in the headers from the
**Get Job Output** response.
For archive retrieval jobs, you should also verify that the size is what you
expected. If you download a portion of the output, the expected size is based on
the range of bytes you specified. For example, if you specify a range of
`bytes=0-1048575`, you should verify your download size is 1,048,576 bytes. If
you download an entire archive, the expected size is the size of the archive
when you uploaded it to Amazon S3 Glacier The expected size is also returned in
the headers from the **Get Job Output** response.
In the case of an archive retrieval job, depending on the byte range you
specify, Glacier returns the checksum for the portion of the data. To ensure the
portion you downloaded is the correct data, compute the checksum on the client,
verify that the values match, and verify that the size is what you expected.
A job ID does not expire for at least 24 hours after Glacier completes the job.
That is, you can download the job output within the 24 hours period after Amazon
Glacier completes the job.
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and the underlying REST API, see [Downloading a Vault Inventory](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html),
[Downloading an Archive](https://docs.aws.amazon.com/amazonglacier/latest/dev/downloading-an-archive.html),
and [Get Job Output
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-job-output-get.html)
"""
def get_job_output(client, account_id, job_id, vault_name, range \\ nil, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/jobs/#{URI.encode(job_id)}/output"
headers = []
headers = if !is_nil(range) do
[{"Range", range} | headers]
else
headers
end
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"Accept-Ranges", "acceptRanges"},
{"x-amz-archive-description", "archiveDescription"},
{"x-amz-sha256-tree-hash", "checksum"},
{"Content-Range", "contentRange"},
{"Content-Type", "contentType"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation retrieves the `access-policy` subresource set on the vault; for
more information on setting this subresource, see [Set Vault Access Policy (PUT access-policy)](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-SetVaultAccessPolicy.html).
If there is no access policy set on the vault, the operation returns a `404 Not
found` error. For more information about vault access policies, see [Amazon Glacier Access Control with Vault Access
Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html).
"""
def get_vault_access_policy(client, account_id, vault_name, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/access-policy"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation retrieves the following attributes from the `lock-policy`
subresource set on the specified vault:
* The vault lock policy set on the vault.
* The state of the vault lock, which is either `InProgess` or
`Locked`.
* When the lock ID expires. The lock ID is used to complete the
vault locking process.
* When the vault lock was initiated and put into the `InProgress`
state.
A vault lock is put into the `InProgress` state by calling `InitiateVaultLock`.
A vault lock is put into the `Locked` state by calling `CompleteVaultLock`. You
can abort the vault locking process by calling `AbortVaultLock`. For more
information about the vault locking process, [Amazon Glacier Vault Lock](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html).
If there is no vault lock policy set on the vault, the operation returns a `404
Not found` error. For more information about vault lock policies, [Amazon Glacier Access Control with Vault Lock
Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html).
"""
def get_vault_lock(client, account_id, vault_name, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/lock-policy"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation retrieves the `notification-configuration` subresource of the
specified vault.
For information about setting a notification configuration on a vault, see
`SetVaultNotifications`. If a notification configuration for a vault is not set,
the operation returns a `404 Not Found` error. For more information about vault
notifications, see [Configuring Vault Notifications in Amazon S3 Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html).
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Configuring Vault Notifications in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html)
and [Get Vault Notification Configuration
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-get.html)
in the *Amazon Glacier Developer Guide*.
"""
def get_vault_notifications(client, account_id, vault_name, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/notification-configuration"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation initiates a job of the specified type, which can be a select, an
archival retrieval, or a vault retrieval.
For more information about using this operation, see the documentation for the
underlying REST API [Initiate a Job](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html).
"""
def initiate_job(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/jobs"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 202) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"x-amz-job-id", "jobId"},
{"x-amz-job-output-path", "jobOutputPath"},
{"Location", "location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation initiates a multipart upload.
Amazon S3 Glacier creates a multipart upload resource and returns its ID in the
response. The multipart upload ID is used in subsequent requests to upload parts
of an archive (see `UploadMultipartPart`).
When you initiate a multipart upload, you specify the part size in number of
bytes. The part size must be a megabyte (1024 KB) multiplied by a power of 2-for
example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so
on. The minimum allowable part size is 1 MB, and the maximum is 4 GB.
Every part you upload to this resource (see `UploadMultipartPart`), except the
last one, must have the same size. The last one can be the same size or smaller.
For example, suppose you want to upload a 16.2 MB file. If you initiate the
multipart upload with a part size of 4 MB, you will upload four parts of 4 MB
each and one part of 0.2 MB.
You don't need to know the size of the archive when you start a multipart upload
because Amazon S3 Glacier does not require you to specify the overall archive
size.
After you complete the multipart upload, Amazon S3 Glacier (Glacier) removes the
multipart upload resource referenced by the ID. Glacier also removes the
multipart upload resource if you cancel the multipart upload or it may be
removed if there is no activity for a period of 24 hours.
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Uploading Large Archives in Parts (Multipart
Upload)](https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html)
and [Initiate Multipart Upload](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-initiate-upload.html)
in the *Amazon Glacier Developer Guide*.
"""
def initiate_multipart_upload(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads"
{headers, input} =
[
{"archiveDescription", "x-amz-archive-description"},
{"partSize", "x-amz-part-size"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"Location", "location"},
{"x-amz-multipart-upload-id", "uploadId"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation initiates the vault locking process by doing the following:
* Installing a vault lock policy on the specified vault.
* Setting the lock state of vault lock to `InProgress`.
* Returning a lock ID, which is used to complete the vault locking
process.
You can set one vault lock policy for each vault and this policy can be up to 20
KB in size. For more information about vault lock policies, see [Amazon Glacier Access Control with Vault Lock
Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html).
You must complete the vault locking process within 24 hours after the vault lock
enters the `InProgress` state. After the 24 hour window ends, the lock ID
expires, the vault automatically exits the `InProgress` state, and the vault
lock policy is removed from the vault. You call `CompleteVaultLock` to complete
the vault locking process by setting the state of the vault lock to `Locked`.
After a vault lock is in the `Locked` state, you cannot initiate a new vault
lock for the vault.
You can abort the vault locking process by calling `AbortVaultLock`. You can get
the state of the vault lock by calling `GetVaultLock`. For more information
about the vault locking process, [Amazon Glacier Vault Lock](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html).
If this operation is called when the vault lock is in the `InProgress` state,
the operation returns an `AccessDeniedException` error. When the vault lock is
in the `InProgress` state you must call `AbortVaultLock` before you can initiate
a new vault lock policy.
"""
def initiate_vault_lock(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/lock-policy"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"x-amz-lock-id", "lockId"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation lists jobs for a vault, including jobs that are in-progress and
jobs that have recently finished.
The List Job operation returns a list of these jobs sorted by job initiation
time.
Amazon Glacier retains recently completed jobs for a period before deleting
them; however, it eventually removes completed jobs. The output of completed
jobs can be retrieved. Retaining completed jobs for a period of time after they
have completed enables you to get a job output in the event you miss the job
completion notification or your first attempt to download it fails. For example,
suppose you start an archive retrieval job to download an archive. After the job
completes, you start to download the archive but encounter a network error. In
this scenario, you can retry and download the archive while the job exists.
The List Jobs operation supports pagination. You should always check the
response `Marker` field. If there are no more jobs to list, the `Marker` field
is set to `null`. If there are more jobs to list, the `Marker` field is set to a
non-null value, which you can use to continue the pagination of the list. To
return a list of jobs that begins at a specific job, set the marker request
parameter to the `Marker` value for that job that you obtained from a previous
List Jobs request.
You can set a maximum limit for the number of jobs returned in the response by
specifying the `limit` parameter in the request. The default limit is 50. The
number of jobs returned might be fewer than the limit, but the number of
returned jobs never exceeds the limit.
Additionally, you can filter the jobs list returned by specifying the optional
`statuscode` parameter or `completed` parameter, or both. Using the `statuscode`
parameter, you can specify to return only jobs that match either the
`InProgress`, `Succeeded`, or `Failed` status. Using the `completed` parameter,
you can specify to return only jobs that were completed (`true`) or jobs that
were not completed (`false`).
For more information about using this operation, see the documentation for the
underlying REST API [List Jobs](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-jobs-get.html).
"""
def list_jobs(client, account_id, vault_name, completed \\ nil, limit \\ nil, marker \\ nil, statuscode \\ nil, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/jobs"
headers = []
query_ = []
query_ = if !is_nil(statuscode) do
[{"statuscode", statuscode} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"marker", marker} | query_]
else
query_
end
query_ = if !is_nil(limit) do
[{"limit", limit} | query_]
else
query_
end
query_ = if !is_nil(completed) do
[{"completed", completed} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation lists in-progress multipart uploads for the specified vault.
An in-progress multipart upload is a multipart upload that has been initiated by
an `InitiateMultipartUpload` request, but has not yet been completed or aborted.
The list returned in the List Multipart Upload response has no guaranteed order.
The List Multipart Uploads operation supports pagination. By default, this
operation returns up to 50 multipart uploads in the response. You should always
check the response for a `marker` at which to continue the list; if there are no
more items the `marker` is `null`. To return a list of multipart uploads that
begins at a specific upload, set the `marker` request parameter to the value you
obtained from a previous List Multipart Upload request. You can also limit the
number of uploads returned in the response by specifying the `limit` parameter
in the request.
Note the difference between this operation and listing parts (`ListParts`). The
List Multipart Uploads operation lists all multipart uploads for a vault and
does not require a multipart upload ID. The List Parts operation requires a
multipart upload ID since parts are associated with a single upload.
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and the underlying REST API, see [Working with Archives in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html)
and [List Multipart Uploads
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-uploads.html)
in the *Amazon Glacier Developer Guide*.
"""
def list_multipart_uploads(client, account_id, vault_name, limit \\ nil, marker \\ nil, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads"
headers = []
query_ = []
query_ = if !is_nil(marker) do
[{"marker", marker} | query_]
else
query_
end
query_ = if !is_nil(limit) do
[{"limit", limit} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation lists the parts of an archive that have been uploaded in a
specific multipart upload.
You can make this request at any time during an in-progress multipart upload
before you complete the upload (see `CompleteMultipartUpload`. List Parts
returns an error for completed uploads. The list returned in the List Parts
response is sorted by part range.
The List Parts operation supports pagination. By default, this operation returns
up to 50 uploaded parts in the response. You should always check the response
for a `marker` at which to continue the list; if there are no more items the
`marker` is `null`. To return a list of parts that begins at a specific part,
set the `marker` request parameter to the value you obtained from a previous
List Parts request. You can also limit the number of parts returned in the
response by specifying the `limit` parameter in the request.
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and the underlying REST API, see [Working with Archives in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html)
and [List Parts](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-parts.html)
in the *Amazon Glacier Developer Guide*.
"""
def list_parts(client, account_id, upload_id, vault_name, limit \\ nil, marker \\ nil, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads/#{URI.encode(upload_id)}"
headers = []
query_ = []
query_ = if !is_nil(marker) do
[{"marker", marker} | query_]
else
query_
end
query_ = if !is_nil(limit) do
[{"limit", limit} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation lists the provisioned capacity units for the specified AWS
account.
"""
def list_provisioned_capacity(client, account_id, options \\ []) do
path_ = "/#{URI.encode(account_id)}/provisioned-capacity"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation lists all the tags attached to a vault.
The operation returns an empty map if there are no tags. For more information
about tags, see [Tagging Amazon S3 Glacier Resources](https://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html).
"""
def list_tags_for_vault(client, account_id, vault_name, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/tags"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation lists all vaults owned by the calling user's account.
The list returned in the response is ASCII-sorted by vault name.
By default, this operation returns up to 10 items. If there are more vaults to
list, the response `marker` field contains the vault Amazon Resource Name (ARN)
at which to continue the list with a new List Vaults request; otherwise, the
`marker` field is `null`. To return a list of vaults that begins at a specific
vault, set the `marker` request parameter to the vault ARN you obtained from a
previous List Vaults request. You can also limit the number of vaults returned
in the response by specifying the `limit` parameter in the request.
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Retrieving Vault Metadata in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html)
and [List Vaults
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vaults-get.html) in
the *Amazon Glacier Developer Guide*.
"""
def list_vaults(client, account_id, limit \\ nil, marker \\ nil, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults"
headers = []
query_ = []
query_ = if !is_nil(marker) do
[{"marker", marker} | query_]
else
query_
end
query_ = if !is_nil(limit) do
[{"limit", limit} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation purchases a provisioned capacity unit for an AWS account.
"""
def purchase_provisioned_capacity(client, account_id, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/provisioned-capacity"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"x-amz-capacity-id", "capacityId"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation removes one or more tags from the set of tags attached to a
vault.
For more information about tags, see [Tagging Amazon S3 Glacier Resources](https://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html).
This operation is idempotent. The operation will be successful, even if there
are no tags attached to the vault.
"""
def remove_tags_from_vault(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/tags?operation=remove"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 204)
end
@doc """
This operation sets and then enacts a data retrieval policy in the region
specified in the PUT request.
You can set one policy per region for an AWS account. The policy is enacted
within a few minutes of a successful PUT operation.
The set policy operation does not affect retrieval jobs that were in progress
before the policy was enacted. For more information about data retrieval
policies, see [Amazon Glacier Data Retrieval Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html).
"""
def set_data_retrieval_policy(client, account_id, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/policies/data-retrieval"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, 204)
end
@doc """
This operation configures an access policy for a vault and will overwrite an
existing policy.
To configure a vault access policy, send a PUT request to the `access-policy`
subresource of the vault. An access policy is specific to a vault and is also
called a vault subresource. You can set one access policy per vault and the
policy can be up to 20 KB in size. For more information about vault access
policies, see [Amazon Glacier Access Control with Vault Access Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html).
"""
def set_vault_access_policy(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/access-policy"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, 204)
end
@doc """
This operation configures notifications that will be sent when specific events
happen to a vault.
By default, you don't get any notifications.
To configure vault notifications, send a PUT request to the
`notification-configuration` subresource of the vault. The request should
include a JSON document that provides an Amazon SNS topic and specific events
for which you want Amazon S3 Glacier to send notifications to the topic.
Amazon SNS topics must grant permission to the vault to be allowed to publish
notifications to the topic. You can configure a vault to publish a notification
for the following vault events:
* **ArchiveRetrievalCompleted** This event occurs when a job that
was initiated for an archive retrieval is completed (`InitiateJob`). The status
of the completed job can be "Succeeded" or "Failed". The notification sent to
the SNS topic is the same output as returned from `DescribeJob`.
* **InventoryRetrievalCompleted** This event occurs when a job that
was initiated for an inventory retrieval is completed (`InitiateJob`). The
status of the completed job can be "Succeeded" or "Failed". The notification
sent to the SNS topic is the same output as returned from `DescribeJob`.
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Configuring Vault Notifications in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html)
and [Set Vault Notification Configuration
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-put.html)
in the *Amazon Glacier Developer Guide*.
"""
def set_vault_notifications(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/notification-configuration"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, 204)
end
@doc """
This operation adds an archive to a vault.
This is a synchronous operation, and for a successful upload, your data is
durably persisted. Amazon S3 Glacier returns the archive ID in the
`x-amz-archive-id` header of the response.
You must use the archive ID to access your data in Amazon S3 Glacier. After you
upload an archive, you should save the archive ID returned so that you can
retrieve or delete the archive later. Besides saving the archive ID, you can
also index it and give it a friendly name to allow for better searching. You can
also use the optional archive description field to specify how the archive is
referred to in an external index of archives, such as you might create in Amazon
DynamoDB. You can also get the vault inventory to obtain a list of archive IDs
in a vault. For more information, see `InitiateJob`.
You must provide a SHA256 tree hash of the data you are uploading. For
information about computing a SHA256 tree hash, see [Computing Checksums](https://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html).
You can optionally specify an archive description of up to 1,024 printable ASCII
characters. You can get the archive description when you either retrieve the
archive or get the vault inventory. For more information, see `InitiateJob`.
Amazon Glacier does not interpret the description in any way. An archive
description does not need to be unique. You cannot use the description to
retrieve or sort the archive list.
Archives are immutable. After you upload an archive, you cannot edit the archive
or its description.
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Uploading an Archive in Amazon
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-an-archive.html)
and [Upload Archive](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-post.html)
in the *Amazon Glacier Developer Guide*.
"""
def upload_archive(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/archives"
{headers, input} =
[
{"archiveDescription", "x-amz-archive-description"},
{"checksum", "x-amz-sha256-tree-hash"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"x-amz-archive-id", "archiveId"},
{"x-amz-sha256-tree-hash", "checksum"},
{"Location", "location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation uploads a part of an archive.
You can upload archive parts in any order. You can also upload them in parallel.
You can upload up to 10,000 parts for a multipart upload.
Amazon Glacier rejects your upload part request if any of the following
conditions is true:
* **SHA256 tree hash does not match**To ensure that part data is not
corrupted in transmission, you compute a SHA256 tree hash of the part and
include it in your request. Upon receiving the part data, Amazon S3 Glacier also
computes a SHA256 tree hash. If these hash values don't match, the operation
fails. For information about computing a SHA256 tree hash, see [Computing Checksums](https://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html).
* **Part size does not match**The size of each part except the last
must match the size specified in the corresponding `InitiateMultipartUpload`
request. The size of the last part must be the same size as, or smaller than,
the specified size.
If you upload a part whose size is smaller than the part size you specified in
your initiate multipart upload request and that part is not the last part, then
the upload part request will succeed. However, the subsequent Complete Multipart
Upload request will fail.
* **Range does not align**The byte range value in the request does
not align with the part size specified in the corresponding initiate request.
For example, if you specify a part size of 4194304 bytes (4 MB), then 0 to
4194303 bytes (4 MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid part
ranges. However, if you set a range value of 2 MB to 6 MB, the range does not
align with the part size and the upload will fail.
This operation is idempotent. If you upload the same part multiple times, the
data included in the most recent request overwrites the previously uploaded
data.
An AWS account has full permission to perform all operations (actions). However,
AWS Identity and Access Management (IAM) users don't have any permissions by
default. You must grant them explicit permission to perform specific actions.
For more information, see [Access Control Using AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Uploading Large Archives in Parts (Multipart
Upload)](https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html)
and [Upload Part
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-upload-part.html) in
the *Amazon Glacier Developer Guide*.
"""
def upload_multipart_part(client, account_id, upload_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads/#{URI.encode(upload_id)}"
{headers, input} =
[
{"checksum", "x-amz-sha256-tree-hash"},
{"range", "Content-Range"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, 204) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"x-amz-sha256-tree-hash", "checksum"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "glacier"}
host = build_host("glacier", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :json) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/glacier.ex
| 0.871898 | 0.623119 |
glacier.ex
|
starcoder
|
defmodule Scenic.Primitive.Triangle do
use Scenic.Primitive
alias Scenic.Math
# alias Scenic.Primitive
# alias Scenic.Primitive.Style
@styles [:hidden, :fill, :stroke]
# ===========================================================================
# data verification and serialization
# --------------------------------------------------------
def info(data),
do: """
#{IO.ANSI.red()}#{__MODULE__} data must be three points: {{x0,y0}, {x1,y1}, {x2,y2}}
#{IO.ANSI.yellow()}Received: #{inspect(data)}
#{IO.ANSI.default_color()}
"""
def verify({{x0, y0}, {x1, y1}, {x2, y2}} = data)
when is_number(x0) and is_number(y0) and is_number(x1) and is_number(y1) and is_number(x2) and
is_number(y2),
do: {:ok, data}
def verify(_), do: :invalid_data
# ============================================================================
def valid_styles(), do: @styles
# --------------------------------------------------------
def default_pin(data), do: centroid(data)
# --------------------------------------------------------
def centroid(data)
def centroid({{x0, y0}, {x1, y1}, {x2, y2}}) do
{
(x0 + x1 + x2) / 3,
(y0 + y1 + y2) / 3
}
end
# http://blackpawn.com/texts/pointinpoly/
# --------------------------------------------------------
@degenerate 0.0001
def contains_point?({{x0, y0} = p0, {x1, y1} = p1, {x2, y2} = p2}, px) do
# make sure the points are not collinear, if so the abs(area) will be very small
area = abs(x0 * (y1 - y2) + x1 * (y2 - y0) + x2 * (y0 - y1))
if area < @degenerate do
false
else
# compute vectors
v0 = Math.Vector2.sub(p2, p0)
v1 = Math.Vector2.sub(p1, p0)
v2 = Math.Vector2.sub(px, p0)
# compute dot products
dot00 = Math.Vector2.dot(v0, v0)
dot01 = Math.Vector2.dot(v0, v1)
dot02 = Math.Vector2.dot(v0, v2)
dot11 = Math.Vector2.dot(v1, v1)
dot12 = Math.Vector2.dot(v1, v2)
# Compute barycentric coordinates
invDenom = 1.0 / (dot00 * dot11 - dot01 * dot01)
u = (dot11 * dot02 - dot01 * dot12) * invDenom
v = (dot00 * dot12 - dot01 * dot02) * invDenom
# Check if point is in triangle
u >= 0 && v >= 0 && u + v < 1
end
end
end
|
lib/scenic/primitive/triangle.ex
| 0.695441 | 0.48054 |
triangle.ex
|
starcoder
|
defmodule Day08.Redo do
def part1(file_name \\ "test1.txt") do
file_name
|> parse()
|> count_1_4_7_8()
end
def part2(file_name \\ "test1.txt") do
file_name
|> parse()
|> sum_output()
end
def sum_output(lines) do
Enum.reduce(lines, 0, fn %{input: input, output: output}, total ->
output_number = input |> decoder() |> decode(output)
total + output_number
end)
end
def decode(decoder, output) do
output
|> Enum.map_join("", fn number -> decoder[number] end)
|> String.to_integer()
end
def decoder(input) do
input
|> by_segment_length()
|> init_numbers()
|> handle_lengths_of_six()
|> handle_lengths_of_five()
|> invert()
end
def init_numbers(groups) do
Map.new()
|> Map.put("1", groups[2])
|> Map.put("4", groups[4])
|> Map.put("7", groups[3])
|> Map.put("8", groups[7])
|> Map.put(:lengths_of_six, groups[6])
|> Map.put(:lengths_of_five, groups[5])
end
def handle_lengths_of_six(numbers) do
one = numbers["1"]
four = numbers["4"]
lengths_of_six = numbers[:lengths_of_six]
{zero, six, nine} = identify_lengths_of_six(lengths_of_six, four, one)
numbers
|> Map.delete(:lengths_of_six)
|> Map.put("0", zero)
|> Map.put("6", six)
|> Map.put("9", nine)
end
def handle_lengths_of_five(numbers) do
one = numbers["1"]
six = numbers["6"]
lengths_of_five = numbers[:lengths_of_five]
{two, three, five} = identify_lengths_of_five(lengths_of_five, six, one)
numbers
|> Map.delete(:lengths_of_five)
|> Map.put("2", two)
|> Map.put("3", three)
|> Map.put("5", five)
end
def identify_lengths_of_six(lengths_of_six, four, one) do
{nine, zero_or_six} = identify_by_subset(lengths_of_six, four)
{zero, six} = identify_by_subset(zero_or_six, one)
{hd(zero), hd(six), hd(nine)}
end
def identify_lengths_of_five(lengths_of_five, six, one) do
{five, two_or_three} = identify_by_superset(lengths_of_five, six)
{three, two} = identify_by_subset(two_or_three, one)
{hd(two), hd(three), hd(five)}
end
def identify_by(signals, fun) do
%{0 => match, 1 => not_match} = Enum.group_by(signals, fun)
{match, not_match}
end
def identify_by_superset(signals, superset) do
fun = fn maybe -> length(maybe -- superset) end
identify_by(signals, fun)
end
def identify_by_subset(signals, subset) do
fun = fn maybe -> length(subset -- maybe) end
identify_by(signals, fun)
end
def invert(numbers) do
numbers
|> Enum.map(fn {key, value} -> {value, key} end)
|> Map.new()
end
def by_segment_length(input) do
input
|> group_by_segment_length()
|> unwrap_single_length()
end
def group_by_segment_length(input) do
Enum.reduce(input, %{}, fn line, acc ->
length = length(line)
Map.update(acc, length, [line], & [line | &1])
end)
end
def unwrap_single_length(groups) do
Map.map(groups, fn
{_key, value} when length(value) == 1 -> hd(value)
{_key, value} -> value
end)
end
def count_1_4_7_8(lines) do
Enum.reduce(lines, 0, fn %{output: output}, total ->
total + count_output(output)
end)
end
def count_output(output) do
Enum.reduce(output, 0, fn number, acc ->
if length(number) in [2,3,4,7], do: acc + 1, else: acc
end)
end
def parse(file_name) do
"priv/" <> file_name
|> File.stream!()
|> Stream.map(fn line ->
line
|> String.trim_trailing()
|> String.split(" | ")
|> Enum.map(fn line ->
line
|> String.split(" ")
|> Enum.map(fn text -> text |> String.graphemes() |> Enum.sort() end)
end)
|> then(fn [input, output] -> %{input: input, output: output} end)
end)
|> Enum.to_list()
end
end
|
jpcarver+elixir/day08/lib/day08.redo.ex
| 0.536556 | 0.443841 |
day08.redo.ex
|
starcoder
|
defmodule RMap.Ruby do
@moduledoc """
Summarized all of Ruby's Hash functions.
Functions corresponding to the following patterns are not implemented
- When a function with the same name already exists in Elixir.
- When a method name includes `!`.
- <, <=, ==, >, >=, [], []=, default_*
"""
@spec __using__(any) :: list
defmacro __using__(_opts) do
RUtils.define_all_functions!(__MODULE__)
end
import RMap.Support
# https://ruby-doc.org/core-3.1.0/Hash.html
# [:any?, :assoc, :clear, :compact, :compact!, :compare_by_identity, :compare_by_identity?, :deconstruct_keys, :delete, :delete_if, :dig, :each, :each_key, :each_pair, :each_value, :empty?, :eql?, :except, :fetch, :fetch_values, :filter, :filter!, :flatten, :has_key?, :has_value?, :hash, :include?, :initialize_copy, :inspect, :invert, :keep_if, :key, :key?, :keys, :length, :member?, :merge, :merge!, :rassoc, :rehash, :reject, :reject!, :replace, :select, :select!, :shift, :size, :slice, :store, :to_a, :to_h, :to_hash, :to_proc, :to_s, :transform_keys, :transform_keys!, :transform_values, :transform_values!, :update, :value?, :values, :values_at]
# |> RUtils.required_functions([Map, REnum])
# ✔ assoc
# ✔ clear
# × compare_by_identity
# × compare_by_identity?
# × deconstruct_keys
# ✔ delete_if
# ✔ dig
# ✔ each_key
# ✔ each_pair
# ✔ each_value
# ✔ eql?
# ✔ except
# ✔ fetch_values
# ✔ flatten
# ✔ has_value?
# hash TODO: Low priority
# × initialize_copy
# ✔ inspect
# ✔ invert
# ✔ keep_if
# ✔ key
# ✔ key?
# ✔ length
# ✔ rassoc
# × rehash
# ✔ shift
# ✔ store
# ✔ to_hash
# × to_proc
# ✔ to_s
# ✔ transform_keys
# ✔ transform_values
# ✔ value?
# ✔ values_at
@doc """
Returns a list whose entries are those for which the function returns a truthy value.
## Examples
iex> RMap.filter(%{a: 1, b: 2, c: 3}, fn {_, v} -> v > 1 end)
%{b: 2, c: 3}
"""
@spec filter(map(), function()) :: map()
def filter(map, func) do
Enum.filter(map, func)
|> Map.new()
end
@doc """
Returns a list whose entries are all those from self for which the function returns false or nil.
## Examples
iex> RMap.reject(%{a: 1, b: 2, c: 3}, fn {_, v} -> v > 1 end)
%{a: 1}
"""
@spec reject(map(), function()) :: map()
def reject(map, func) do
Enum.reject(map, func)
|> Map.new()
end
@doc """
Returns %{}.
## Examples
iex> RMap.clear(%{a: 1, b: 2, c: 3})
%{}
"""
@spec clear(map()) :: %{}
def clear(_) do
%{}
end
@doc """
Calls the function with each value; returns :ok.
## Examples
iex> RMap.each_value(%{a: 1, b: 2, c: 3}, &IO.inspect(&1))
# 1
# 2
# 3
:ok
"""
@spec each_value(map(), function()) :: :ok
def each_value(map, func) do
Enum.each(map, fn {_, value} ->
func.(value)
end)
end
@doc """
Calls the function with each key; returns :ok.
## Examples
iex> RMap.each_key(%{a: 1, b: 2, c: 3}, &IO.inspect(&1))
# :a
# :b
# :c
:ok
"""
@spec each_key(map(), function()) :: :ok
def each_key(map, func) do
Enum.each(map, fn {key, _} ->
func.(key)
end)
end
@doc """
Returns true if value is a value in list, otherwise false.
## Examples
iex> RMap.value?(%{a: 1, b: 2, c: 3}, 3)
true
iex> RMap.value?(%{a: 1, b: 2, c: 3}, 4)
false
"""
@spec value?(map(), any) :: boolean()
def value?(map, value) do
Enum.any?(map, fn {_, v} ->
v == value
end)
end
@doc """
Returns a list containing values for the given keys.
## Examples
iex> RMap.values_at(%{a: 1, b: 2, c: 3}, [:a, :b, :d])
[1, 2, nil]
"""
@spec values_at(map(), list()) :: list()
def values_at(map, keys) do
Enum.map(keys, &Map.get(map, &1))
end
@doc """
Returns given map.
## Examples
iex> RMap.to_hash(%{a: 1, b: 2, c: 3})
%{a: 1, b: 2, c: 3}
"""
@spec to_hash(map()) :: map()
def to_hash(map) do
map
end
@doc """
Returns the object in nested map that is specified by a given key and additional arguments.
## Examples
iex> RMap.dig(%{a: %{b: %{c: 1}}}, [:a, :b, :c])
1
iex> RMap.dig(%{a: %{b: %{c: 1}}}, [:a, :c, :b])
nil
"""
def dig(nil, _), do: nil
def dig(result, []), do: result
@spec dig(map(), list()) :: any()
def dig(map, keys) do
[key | tail_keys] = keys
result = Map.get(map, key)
dig(result, tail_keys)
end
@doc """
Returns a 2-element tuple containing a given key and its value.
## Examples
iex> RMap.assoc(%{a: 1, b: 2, c: 3}, :a)
{:a, 1}
iex> RMap.assoc(%{a: 1, b: 2, c: 3}, :d)
nil
iex> RMap.assoc(%{a: %{b: %{c: 1}}}, :a)
{:a, %{b: %{c: 1}}}
"""
@spec assoc(map(), any()) :: any()
def assoc(map, key) do
if(value = Map.get(map, key)) do
{key, value}
else
nil
end
end
@doc """
Returns a 2-element tuple consisting of the key and value of the first-found entry having a given value.
## Examples
iex> RMap.rassoc(%{a: 1, b: 2, c: 3}, 1)
{:a, 1}
iex> RMap.rassoc(%{a: 1, b: 2, c: 3}, 4)
nil
iex> RMap.rassoc(%{a: %{b: %{c: 1}}}, %{b: %{c: 1}})
{:a, %{b: %{c: 1}}}
"""
@spec rassoc(map(), any()) :: any()
def rassoc(map, value) do
Enum.find_value(map, fn {k, v} ->
if v == value, do: {k, v}
end)
end
@doc """
Returns a map with modified keys.
## Examples
iex> RMap.transform_keys(%{a: 1, b: 2, c: 3}, &to_string(&1))
%{"a" => 1, "b" => 2, "c" => 3}
iex> RMap.transform_keys(%{a: %{b: %{c: 1}}}, &to_string(&1))
%{"a" => %{b: %{c: 1}}}
"""
@spec transform_keys(map(), function()) :: map()
def transform_keys(map, func) do
Enum.map(map, fn {key, value} ->
{func.(key), value}
end)
|> Map.new()
end
@doc """
Returns a map with modified values.
## Examples
iex> RMap.transform_values(%{a: 1, b: 2, c: 3}, &inspect(&1))
%{a: "1", b: "2", c: "3"}
iex> RMap.transform_values(%{a: %{b: %{c: 1}}}, &inspect(&1))
%{a: "%{b: %{c: 1}}"}
"""
@spec transform_values(map(), function()) :: map()
def transform_values(map, func) do
Enum.map(map, fn {key, value} ->
{key, func.(value)}
end)
|> Map.new()
end
@doc """
Returns a map excluding entries for the given keys.
## Examples
iex> RMap.except(%{a: 1, b: 2, c: 3}, [:a, :b])
%{c: 3}
"""
@spec except(map(), list()) :: map()
def except(map, keys) do
delete_if(map, fn {key, _} ->
key in keys
end)
end
@doc """
Returns a list containing the values associated with the given keys.
## Examples
iex> RMap.fetch_values(%{ "cat" => "feline", "dog" => "canine", "cow" => "bovine" }, ["cow", "cat"])
["bovine", "feline"]
iex> RMap.fetch_values(%{ "cat" => "feline", "dog" => "canine", "cow" => "bovine" }, ["cow", "bird"])
** (MapKeyError) key not found: bird
"""
@spec fetch_values(map(), list()) :: list()
def fetch_values(map, keys) do
Enum.map(keys, fn key ->
if(value = map |> Map.get(key)) do
value
else
raise MapKeyError, "key not found: #{key}"
end
end)
end
@doc """
When a function is given, calls the function with each missing key, treating the block's return value as the value for that key.
## Examples
iex> RMap.fetch_values(%{ "cat" => "feline", "dog" => "canine", "cow" => "bovine" }, ["cow", "bird"], &(String.upcase(&1)))
["bovine", "BIRD"]
"""
@spec fetch_values(map(), list(), function()) :: list()
def fetch_values(map, keys, func) do
Enum.map(keys, fn key ->
if(value = map |> Map.get(key)) do
value
else
func.(key)
end
end)
end
@doc """
Returns a flatten list.
## Examples
iex> RMap.flatten(%{1=> "one", 2 => [2,"two"], 3 => "three"})
[1, "one", 2, 2, "two", 3, "three"]
iex> RMap.flatten(%{1 => "one", 2 => %{a: 1, b: %{c: 3}}})
[1, "one", 2, :a, 1, :b, :c, 3]
"""
@spec flatten(map()) :: list()
def flatten(map) do
deep_to_list(map) |> List.flatten()
end
@doc """
Returns a map object with the each key-value pair inverted.
## Examples
iex> RMap.invert(%{"a" => 0, "b" => 100, "c" => 200, "d" => 300, "e" => 300})
%{0 => "a", 100 => "b", 200 => "c", 300 => "e"}
iex> RMap.invert(%{a: 1, b: 1, c: %{d: 2}})
%{1 => :b, %{d: 2} => :c}
"""
@spec invert(map()) :: map()
def invert(map) do
map
|> Enum.map(fn {k, v} ->
{v, k}
end)
|> Map.new()
end
@doc """
Removes the first map entry; returns a 2-element tuple.
First element is {key, value}.
Second element is a map without first pair.
## Examples
iex> RMap.shift(%{a: 1, b: 2, c: 3})
{{:a, 1}, %{b: 2, c: 3}}
iex> RMap.shift(%{})
{nil, %{}}
"""
@spec shift(map()) :: {tuple() | nil, map()}
def shift(map) do
{result, list} = map |> Enum.split(1)
{List.last(result), Map.new(list)}
end
defdelegate delete_if(map, func), to: __MODULE__, as: :reject
defdelegate keep_if(map, func), to: __MODULE__, as: :filter
defdelegate select(map, func), to: __MODULE__, as: :filter
defdelegate length(map), to: Enum, as: :count
defdelegate size(map), to: Enum, as: :count
defdelegate to_s(map), to: Kernel, as: :inspect
defdelegate inspect(map), to: Kernel, as: :inspect
defdelegate each_pair(map, func), to: Enum, as: :each
defdelegate key(map, key, default \\ nil), to: Map, as: :get
defdelegate key?(map, key), to: Map, as: :has_key?
defdelegate has_value?(map, value), to: __MODULE__, as: :value?
defdelegate store(map, key, value), to: Map, as: :put
defdelegate eql?(map1, map2), to: Map, as: :equal?
end
defmodule MapKeyError do
defexception [:message]
end
|
lib/r_map/ruby.ex
| 0.704465 | 0.57687 |
ruby.ex
|
starcoder
|
defmodule Aoc2019.Day12 do
@behaviour DaySolution
def solve_part1() do
positions = get_moons()
velocities = List.duplicate({0, 0, 0}, length(positions))
{positions, velocities} |> iterate(1000) |> total_energy()
end
def solve_part2() do
positions = get_moons()
velocities = List.duplicate({0, 0, 0}, length(positions))
{positions, velocities} |> get_period
end
def iterate(moons, n_steps) when n_steps < 1, do: moons
def iterate(moons, n_steps), do: iterate(step(moons), n_steps - 1)
def total_energy({positions, velocities}),
do: positions |> Enum.zip(velocities) |> Enum.map(&energy/1) |> Enum.sum()
# Find period separately for each dimension, then find lowest common multiple
def get_period({positions, velocities}),
do:
[:x, :y, :z]
|> Enum.map(fn dimension -> {positions, velocities} |> get_period_dim(dimension) end)
|> Utils.lowest_common_multiple()
defp get_period_dim({positions_init, velocities_init}, dimension),
do:
Stream.iterate(1, fn step -> step + 1 end)
|> Enum.reduce_while({positions_init, velocities_init}, fn step, {positions, velocities} ->
{positions, velocities} = step({positions, velocities})
if {positions, velocities} |> extract_dimension(dimension) ==
{positions_init, velocities_init} |> extract_dimension(dimension),
do: {:halt, step},
else: {:cont, {positions, velocities}}
end)
defp extract_dimension({positions, velocities}, dimension) do
[positions, velocities]
|> Enum.map(fn posvels ->
posvels
|> Enum.map(fn posvel ->
posvel
|> Tuple.to_list()
|> Enum.at(
case dimension do
:x -> 0
:y -> 1
:z -> 2
end
)
end)
end)
end
defp get_moons(),
do:
File.read!("inputs/input_day12")
|> String.replace("x=", "")
|> String.replace("y=", "")
|> String.replace("z=", "")
|> String.replace("<", "")
|> String.replace(">", "")
|> String.split("\n")
|> List.delete_at(-1)
|> Enum.map(fn line ->
line
|> String.split(",")
|> Enum.map(fn s -> s |> String.trim() |> Integer.parse() end)
|> Enum.map(fn {x, _} -> x end)
end)
|> Enum.map(&List.to_tuple/1)
defp energy({position, velocity}),
do:
[position, velocity]
|> Enum.map(fn tuple ->
tuple |> Tuple.to_list() |> Enum.map(fn x -> abs(x) end) |> Enum.sum()
end)
|> Enum.reduce(1, fn e, total -> total * e end)
defp step({positions, velocities}) do
n = length(positions)
velocities =
0..(n - 2)
|> Enum.flat_map(fn i -> (i + 1)..(n - 1) |> Enum.map(fn j -> {i, j} end) end)
|> Enum.reduce(velocities, fn {i, j}, velocities ->
{dx, dy, dz} = velocity_delta(positions |> Enum.at(i), positions |> Enum.at(j))
velocities
|> List.update_at(i, fn {x, y, z} -> {x + dx, y + dy, z + dz} end)
|> List.update_at(j, fn {x, y, z} -> {x - dx, y - dy, z - dz} end)
end)
positions =
positions
|> Enum.zip(velocities)
|> Enum.map(fn {{x, y, z}, {vx, vy, vz}} -> {x + vx, y + vy, z + vz} end)
{positions, velocities}
end
defp velocity_delta(position1, position2),
do:
Tuple.to_list(position1)
|> Enum.zip(Tuple.to_list(position2))
|> Enum.map(fn {x1, x2} -> if x1 == x2, do: 0, else: round((x2 - x1) / abs(x2 - x1)) end)
|> List.to_tuple()
end
|
lib/aoc2019/day12.ex
| 0.773388 | 0.585397 |
day12.ex
|
starcoder
|
defmodule Rubber.Index do
@moduledoc """
The indices APIs are used to manage individual indices, index settings, aliases, mappings, and index templates.
[Elastic documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices.html)
"""
import Rubber.HTTP, only: [prepare_url: 2]
alias Rubber.{HTTP, JSON}
@doc """
Creates a new index.
## Examples
iex> Rubber.Index.create("http://localhost:9200", "twitter", %{})
{:ok, %HTTPoison.Response{...}}
"""
@spec create(elastic_url :: String.t(), name :: String.t(), data :: map) :: HTTP.resp()
def create(elastic_url, name, data) do
prepare_url(elastic_url, name)
|> HTTP.put(JSON.encode!(data))
end
@doc """
Deletes an existing index.
## Examples
iex> Rubber.Index.delete("http://localhost:9200", "twitter")
{:ok, %HTTPoison.Response{...}}
"""
@spec delete(elastic_url :: String.t(), name :: String.t()) :: HTTP.resp()
def delete(elastic_url, name) do
prepare_url(elastic_url, name)
|> HTTP.delete()
end
@doc """
Fetches info about an existing index.
## Examples
iex> Rubber.Index.get("http://localhost:9200", "twitter")
{:ok, %HTTPoison.Response{...}}
"""
@spec get(elastic_url :: String.t(), name :: String.t()) :: HTTP.resp()
def get(elastic_url, name) do
prepare_url(elastic_url, name)
|> HTTP.get()
end
@doc """
Returns `true` if the specified index exists, `false` otherwise.
## Examples
iex> Rubber.Index.exists?("http://localhost:9200", "twitter")
{:ok, false}
iex> Rubber.Index.create("http://localhost:9200", "twitter", %{})
{:ok, %HTTPoison.Response{...}}
iex> Rubber.Index.exists?("http://localhost:9200", "twitter")
{:ok, true}
"""
@spec exists?(elastic_url :: String.t(), name :: String.t()) :: HTTP.resp()
def exists?(elastic_url, name) do
case prepare_url(elastic_url, name) |> HTTP.head() do
{:ok, response} ->
case response.status_code do
200 -> {:ok, true}
404 -> {:ok, false}
end
err ->
err
end
end
@doc """
Forces the [refresh](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html)
of the specified index.
## Examples
iex> Rubber.Index.refresh("http://localhost:9200", "twitter")
{:ok, %HTTPoison.Response{...}}
"""
@spec refresh(elastic_url :: String.t(), name :: String.t()) :: HTTP.resp()
def refresh(elastic_url, name) do
prepare_url(elastic_url, [name, "_refresh"])
|> HTTP.post("")
end
@doc """
[Opens](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html)
the specified index.
## Examples
iex> Rubber.Index.open("http://localhost:9200", "twitter")
{:ok, %HTTPoison.Response{...}}
"""
@spec open(elastic_url :: String.t(), name :: String.t()) :: HTTP.resp()
def open(elastic_url, name) do
prepare_url(elastic_url, [name, "_open"])
|> HTTP.post("")
end
@doc """
[Closes](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html)
the specified index.
## Examples
iex> Rubber.Index.close("http://localhost:9200", "twitter")
{:ok, %HTTPoison.Response{...}}
"""
@spec close(elastic_url :: String.t(), name :: String.t()) :: HTTP.resp()
def close(elastic_url, name) do
prepare_url(elastic_url, [name, "_close"])
|> HTTP.post("")
end
end
|
lib/rubber/index.ex
| 0.863291 | 0.402891 |
index.ex
|
starcoder
|
defmodule Divo.Compose do
@moduledoc """
Implements the basic docker-compose commands for running from
your mix tasks. Run, stop, and kill container services.
These operations only apply to services managed by Divo, i.e. defined in
your Mix.env file under the `:myapp, :divo` key.
"""
require Logger
alias Divo.{File, Helper, Validate}
@doc """
Builds and/or validates the compose file and executes the `docker-compose up`
call to start the entirety of the defined stack or a subset of the services
defined in the stack based on supplying an optional list of service keys.
"""
@spec run(keyword()) :: [any()]
def run(opts \\ []) do
services = get_services(opts)
(["up", "--detach"] ++ services)
|> execute()
await()
end
@doc """
Builds and/or validates the compose file and executes the `docker-compose stop`
call to stop the containerized services without removing the resources created
by the compose file.
"""
@spec stop() :: :ok | {:error, any()}
def stop() do
execute("stop")
end
@doc """
Builds and/or validates the compose file and executes the `docker-compose down`
call to stop the containerized services and removes all resources created by
the compose file such as containers, networks, and volumes.
"""
@spec kill() :: :ok | {:error, any()}
def kill() do
execute("down")
end
defp execute(action) do
file =
Helper.fetch_config()
|> File.ensure_file()
args =
(["--file", file] ++ [action])
|> List.flatten()
Validate.validate(file)
System.cmd("docker-compose", args, stderr_to_stdout: true)
|> log_compose()
end
defp log_compose({message, 0}), do: Logger.info(message)
defp log_compose({message, code}) do
Logger.error("Docker Compose exited with code: #{code}. #{message}")
raise "Docker Compose exited with code: #{code}. #{message}"
end
defp get_services(opts) do
case Keyword.get(opts, :services) do
nil -> []
defined -> Enum.map(defined, &to_string(&1))
end
end
defp await() do
Logger.info("Please wait for containers to register as 'healthy'")
fetch_containers()
|> Enum.filter(&health_defined?/1)
|> Enum.map(&await_healthy/1)
end
defp await_healthy(container) do
wait_config =
Helper.fetch_name()
|> Application.get_env(:divo_wait, dwell: 500, max_tries: 10)
dwell = Keyword.get(wait_config, :dwell)
tries = Keyword.get(wait_config, :max_tries)
Patiently.wait_for!(
check_health(container),
dwell: dwell,
max_tries: tries
)
end
defp check_health(container) do
fn ->
Logger.debug("Checking #{container} is healthy...")
container
|> health_status()
|> case do
"healthy" ->
Logger.info("Service #{container} ready!")
true
_ ->
false
end
end
end
defp fetch_containers() do
{containers, _} = System.cmd("docker", ["ps", "--quiet"])
String.split(containers, "\n", trim: true)
end
defp health_defined?(container) do
{health, _} = System.cmd("docker", ["inspect", "--format", "{{json .State.Health}}", container])
health
|> Jason.decode!()
|> case do
nil -> false
_ -> true
end
end
defp health_status(container) do
{status, _} = System.cmd("docker", ["inspect", "--format", "{{json .State.Health.Status}}", container])
Jason.decode!(status)
end
end
|
lib/divo/compose.ex
| 0.689515 | 0.440048 |
compose.ex
|
starcoder
|
defmodule Processes do
use Koans
@intro "Processes"
koan "You are a process" do
assert Process.alive?(self()) == true
end
koan "You can ask a process to introduce itself" do
information = Process.info(self())
assert information[:status] == :running
end
koan "Processes are referenced by their process ID (pid)" do
assert is_pid(self()) == true
end
koan "New processes are spawned functions" do
value =
spawn(fn ->
receive do
end
end)
assert is_pid(value) == true
end
koan "Processes die when their function exits" do
fast_process = spawn(fn -> :timer.sleep(10) end)
slow_process = spawn(fn -> :timer.sleep(1000) end)
# All spawned functions are executed concurrently with the current process.
# You check back on slow_process and fast_process 50ms later. Let's
# see if they are still alive!
:timer.sleep(50)
assert Process.alive?(fast_process) == false
assert Process.alive?(slow_process) == true
end
koan "Processes can send and receive messages" do
send(self(), "hola!")
receive do
msg -> assert msg == "hola!"
end
end
koan "A process will wait forever for a message" do
wait_forever = fn ->
receive do
end
end
pid = spawn(wait_forever)
assert Process.alive?(pid) == true
end
koan "Received messages are queued, first in first out" do
send(self(), "hola!")
send(self(), "como se llama?")
assert_receive "hola!"
assert_receive "como se llama?"
end
koan "A common pattern is to include the sender in the message, so that it can reply" do
greeter = fn ->
receive do
{:hello, sender} -> send(sender, :how_are_you?)
end
end
pid = spawn(greeter)
send(pid, {:hello, self()})
assert_receive :how_are_you?
end
def yelling_echo_loop do
receive do
{caller, value} ->
send(caller, String.upcase(value))
yelling_echo_loop()
end
end
koan "Use tail recursion to receive multiple messages" do
# tail recursion: last statement is recursive call
# - recall that it can often be more efficient in languages
# that optimize with tail-call optimization
# - this is since the compute does not require the recursive
# calls to return. the compute happens then is passed
pid = spawn_link(&yelling_echo_loop/0)
send(pid, {self(), "o"})
assert_receive "O"
send(pid, {self(), "hai"})
assert_receive "HAI"
end
def state(value) do
receive do
{caller, :get} ->
send(caller, value)
state(value)
{caller, :set, new_value} ->
state(new_value)
end
end
koan "Processes can be used to hold state" do
initial_state = "foo"
pid =
spawn(fn ->
state(initial_state)
end)
send(pid, {self(), :get})
assert_receive "foo"
send(pid, {self(), :set, "bar"})
send(pid, {self(), :get})
assert_receive "bar"
end
koan "Waiting for a message can get boring" do
parent = self()
# You can specify numbers like 1000000 as 1_000_000 for readability
# Also, :after is another param in a keyword list!
# I have some mixed feelings about these macros; they're nice for sure
# after you learn them, but since they look like fns that violate language
# rules at first.
# There's a lot of rules to remember as result, e.g. where to use ->
# I wonder if this :after could be implemented with a case statement
# This statement is implemented natively
spawn(fn ->
receive do
after
5 -> send(parent, {:waited_too_long, "I am impatient"})
end
end)
assert_receive {:waited_too_long, "I am impatient"}
end
koan "Trapping will allow you to react to someone terminating the process" do
# https://hexdocs.pm/elixir/Process.html#exit/2
parent = self()
pid =
spawn(fn ->
Process.flag(:trap_exit, true)
send(parent, :ready)
receive do
{:EXIT, _pid, reason} -> send(parent, {:exited, reason})
end
end)
receive do
:ready -> true
end
# why does this not work w/o the child sending and the parent recieving first?
# since recieve blocks! without the blocking on recieve, we'd close the process
# before it gets a chance to do anything. Concurrency is hard!
# :timer.sleep(50)
# :erlang.process_info(self(), :messages) |> IO.inspect
Process.exit(pid, :random_reason)
assert_receive {:exited, :random_reason}
end
koan "Parent processes can trap exits for children they are linked to" do
# https://elixir-lang.org/getting-started/processes.html#links
# Curious, how does the koans restart the process?
Process.flag(:trap_exit, true)
spawn_link(fn -> Process.exit(self(), :normal) end)
assert_receive {:EXIT, _pid, :normal}
end
koan "If you monitor your children, you'll be automatically informed of their departure" do
# https://hexdocs.pm/elixir/Process.html#monitor/1
spawn_monitor(fn -> Process.exit(self(), :normal) end)
assert_receive {:DOWN, _ref, :process, _pid, :normal}
end
end
|
lib/koans/15_processes.ex
| 0.613005 | 0.591163 |
15_processes.ex
|
starcoder
|
defmodule CensysEx.Hosts do
@moduledoc """
CensysEx wrapper for the search.censys.io v2 API for the "hosts" resource
"""
alias CensysEx.{Paginate, Search, Util}
@typedoc """
Values that determine how to query Virtual Hosts. `:exclude` will ignore any virtual hosts
entries, `:include` virtual hosts will be present in the returned list of hits, `:only` will
return only virtual hosts
"""
@type v_hosts :: :exclude | :include | :only
@index "hosts"
@doc """
Hits the Censys Hosts search API. Returns a stream of results for your query
- API docs: https://search.censys.io/api#/hosts/searchHosts
- Syntax: https://search.censys.io/search/language?resource=hosts
## Examples
```
CensysEx.Hosts.search("same_service(service_name: SSH and not port: 22)")
|> Stream.take(25)
|> Stream.map(&Map.get(&1, "ip"))
|> Enum.to_list()
["10.0.0.6", "10.2.0.1", ...]
```
"""
@spec search(String.t(), integer(), v_hosts()) :: CensysEx.result_stream(map())
def search(query \\ "", per_page \\ 100, virtual_hosts \\ :exclude),
do: Search.search(@index, query, per_page, virtual_hosts: vhost_to_string(virtual_hosts))
@doc """
Hits the Censys Hosts view API. Returning full
information about an IP at a given time
- API docs: https://search.censys.io/api#/hosts/viewHost
## Examples
```
CensysEx.Hosts.view("127.0.0.1")
# View "127.0.0.1" at a certain time
CensysEx.Hosts.view("127.0.0.1", ~U[2021-06-07 12:53:27.450073Z])
```
"""
@spec view(String.t(), DateTime.t() | nil) :: CensysEx.result()
def view(ip, at_time \\ nil),
do: Util.get_client().view(@index, ip, at_time)
@doc """
Hits the Censys Hosts view names API. Returning a stream of names for that IP.
- API docs: https://search.censys.io/api#/hosts/viewHostNames
## Examples
```
CensysEx.Hosts.names("127.0.0.1")
```
"""
@spec names(String.t()) :: CensysEx.result_stream(String.t())
def names(ip) do
next = fn params -> Util.get_client().get(@index, ip <> "/names", [], params) end
extractor = fn client = %Paginate{} -> get_in(client.results, ["result", "names"]) end
Paginate.stream(next, extractor)
end
@doc """
Hits the Censys Hosts diff API.
- API docs: https://search.censys.io/api#/hosts/viewHostDiff
## Examples
```
# diff the current host with it self 🤷
CensysEx.Hosts.diff("8.8.8.8")
# diff two hosts
CensysEx.Hosts.diff("8.8.8.8", "1.1.1.1")
# diff a host with itself at a time in the past
CensysEx.Hosts.diff("8.8.8.8", nil, ~U[2021-06-07 12:53:27.450073Z])
# diff two hosts in the past
CensysEx.Hosts.diff("8.8.8.8", "8.8.4.4" ~U[2021-06-07 12:53:27.450073Z], ~U[2021-06-07 12:53:27.450073Z])
```
"""
@spec diff(String.t(), String.t() | nil, DateTime.t() | nil, DateTime.t() | nil) ::
CensysEx.result()
def diff(ip, ip_b \\ nil, at_time \\ nil, at_time_b \\ nil),
do: Util.get_client().get(@index, ip <> "/diff", [], params: Util.build_diff_params(ip_b, at_time, at_time_b))
@doc """
Hits the Censys Hosts aggregate API. Optionally control number of buckets returned
- API docs: https://search.censys.io/api#/hosts/aggregateHosts
## Examples
```
CensysEx.Hosts.aggregate("location.country_code", "services.service_name: MEMCACHED")
CensysEx.Hosts.aggregate("location.country_code", "services.service_name: MEMCACHED", 1000)
```
"""
@spec aggregate(String.t(), String.t() | nil, integer(), v_hosts()) :: CensysEx.result()
def aggregate(field, query \\ nil, num_buckets \\ 50, virtual_hosts \\ :exclude),
do: Util.get_client().aggregate(@index, field, query, num_buckets, virtual_hosts: vhost_to_string(virtual_hosts))
@spec vhost_to_string(v_hosts) :: String.t()
defp vhost_to_string(v_host) do
case v_host do
:include -> "INCLUDE"
:only -> "ONLY"
_ -> "EXCLUDE"
end
end
end
|
lib/collections/hosts.ex
| 0.915318 | 0.754892 |
hosts.ex
|
starcoder
|
defmodule LetterLinesElixir.BoardWord do
@moduledoc """
Module for working with a BoardWord. Each BoardWord is aware of the x/y coordinates of the start of its word,
whether the word is presented horizontal or vertical, the string value of hte word, whether it has been revealed,
and the length of the word
"""
alias LetterLinesElixir.BoardWord
defstruct [:x, :y, :direction, :word, :revealed?, :size]
@type t :: %BoardWord{
x: integer(),
y: integer(),
direction: :h | :v,
word: String.t(),
revealed?: boolean(),
size: integer()
}
def new(x, y, direction, word) do
%BoardWord{x: x, y: y, direction: direction, word: word, revealed?: false, size: String.length(word)}
end
@doc """
Given a list of words, return the maximum x and y values for the size of the Board beginning from (0, 0)
"""
def get_max_size(words) when is_list(words) do
Enum.reduce(words, {0, 0}, fn word, {current_max_x, current_max_y} ->
{max_x, max_y} = get_max_size(word)
{max(current_max_x, max_x), max(current_max_y, max_y)}
end)
end
@doc """
Return the x/y coordinates for the last letter in a word
"""
def get_max_size(%BoardWord{x: x, y: y, direction: :h, word: word}), do: {x + String.length(word) - 1, y}
def get_max_size(%BoardWord{x: x, y: y, direction: :v, word: word}), do: {x, y + String.length(word) - 1}
@doc """
Given a BoardWord, determine if a letter exists at a given coordinate. If a letter is present, it is returned.
Otherwise return `nil`
"""
def get_letter_at(%BoardWord{direction: :h, y: y1}, _x, y2) when y1 != y2, do: :none
def get_letter_at(%BoardWord{direction: :v, x: x1}, x2, _y) when x1 != x2, do: :none
def get_letter_at(%BoardWord{direction: :h, word: word, x: x1, size: size}, x2, _y) when x2 >= x1 and x2 < size + x1,
do: String.at(word, x2 - x1)
def get_letter_at(%BoardWord{direction: :v, word: word, y: y1, size: size}, _x, y2) when y2 >= y1 and y2 < size + y1,
do: String.at(word, y2 - y1)
def get_letter_at(_, _, _), do: :none
def get_word(%BoardWord{word: word}) do
word
end
end
|
lib/letter_lines_elixir/board_word.ex
| 0.833968 | 0.619975 |
board_word.ex
|
starcoder
|
defmodule HTTPEventServer.Endpoint do
@moduledoc """
Forward requests to this router by `forward "/message", to: Messenger.Router`. This will capture
POST requests on the `/message/:task` route calling the task specified. In your config, you will need
to add the following options:
```
config :http_event_server,
api_token: System.get_env("API_TOKEN"),
task_module: YourTaskModule
```
Optionally, you can configure the response when a task is not found with the `fail_on_no_event_found`
config options. Setting it to true will return a 500 error
You will need to define a task module that has a `handle(message, data)` function. This function
needs to return either {:ok, %{}} or {:error, %{}}. If not, this will automatically return a 500 error.
You can send messages to this router by sending a `POST` request with a `JSON` body and an
`Authorization Bearer token` header.
"""
use Plug.Router
require Logger
plug Plug.Parsers, parsers: [:json],
pass: ["text/*"],
json_decoder: Poison
plug(:set_event_module)
plug(:match)
plug(:dispatch)
defp set_event_module(%{private: %{otp_app: otp_app}} = conn, _opts) do
conn
|> put_private(:event_module, Application.get_env(otp_app, :http_event_module))
end
defp set_event_module(conn, _opts), do: conn
match "/test/:task" do
run_tasks(conn, task)
end
match "/:task" do
case HTTPEventServer.Authorize.authorize(conn) do
%Plug.Conn{state: :sent} = conn -> conn
conn -> run_tasks(conn, task)
end
end
match "" do
send_event_response({:error, error_response("nil")}, conn, "nil")
end
defp run_tasks(conn, task) do
Logger.debug "Running event", [event: task]
send_event_response(attempt_to_send_task(conn.private, task, conn.body_params), conn, task)
end
defp send_unless(%{state: :sent} = conn, _code, _message), do: conn
defp send_unless(conn, code, message) when is_binary(message) do
conn
|> put_resp_content_type("application/json")
|> send_resp(code, message)
end
defp send_unless(conn, code, message) do
conn
|> put_resp_content_type("application/json")
|> send_resp(code, Poison.encode!(message))
end
defp attempt_to_send_task(opts, task, %{"_json" => data}), do: attempt_to_send_task(opts, task, data)
defp attempt_to_send_task(%{event_module: event_module}, task, data) do
event_module.handle(task, data)
end
defp attempt_to_send_task(opts, task, data) do
case Application.get_env(:http_event_server, :event_module) do
nil -> {:http_event_server_error, "No event module defined"}
module -> attempt_to_send_task(%{event_module: module}, task, data)
end
end
defp send_event_response(:not_defined, %{params: %{"task" => event}} = conn, _) do
send_unless(conn, 400, "Event '#{event}' not captured")
end
defp send_event_response({:error, resp}, conn, _) do
send_unless(conn, 400, resp)
end
defp send_event_response({:ok, resp}, conn, _) do
send_unless(conn, 200, resp)
end
defp send_event_response({:http_event_server_error, resp}, conn, task) do
send_unless(conn, 400, %{error: "Invalid return value from task", task: task, response: resp, method: conn.method})
end
defp send_event_response(resp, conn, _) do
send_unless(conn, 200, resp)
end
defp error_response(event) do
if Application.get_env(:http_event_server, :fail_on_no_event_found) do
{:http_event_server_error, "Event \"#{inspect event}\" not captured"}
else
"Event '#{event}' not captured"
end
end
end
|
lib/endpoint.ex
| 0.714927 | 0.623663 |
endpoint.ex
|
starcoder
|
defmodule Astarte.Flow.Blocks.Container do
@moduledoc """
This is a producer_consumer block that sends messages to a Docker container.
Messages are sent and received via AMQP.
The block will manage the creation of the Container in a Kubernetes cluster using
the Astarte Kubernetes Operator.
"""
use GenStage
require Logger
alias Astarte.Flow.Blocks.Container.RabbitMQClient
alias Astarte.Flow.Message
alias Astarte.Flow.K8s.ContainerBlock
@retry_timeout_ms 10_000
defmodule State do
@moduledoc false
defstruct [
:id,
:amqp_client,
:channel,
:amqp_config,
:config,
:channel_ref,
:conn_ref,
:image,
:type,
:inbound_routing_key,
:outbound_routing_key,
outbound_queues: [],
inbound_queues: []
]
end
@doc """
Starts the `Container` block.
## Options
* `:id` (required) - The id of the block, it has to be unique between all container blocks.
* `:image` (required) - The tag of the docker image that will be used by the block.
* `:config` - The Flow configuration that will be passed to the container.
* `:connection` - A keyword list containing the options that will be passed to
`AMQP.Connection.open/1`. Defaults to `[]`.
* `:amqp_client` - A module that implements the
`Astarte.Flow.Blocks.Container.AMQPClient` behaviour and that will
be used to connect to AMQP. Defaults to
`Astarte.Flow.Blocks.Container.RabbitMQClient`
"""
@spec start_link(options) :: GenServer.on_start()
when options: [option],
option:
{:id, String.t()}
| {:image, String.t()}
| {:type, :producer | :consumer | :producer_consumer}
| {:config, map()}
| {:connection, keyword()}
| {:amqp_client, module()}
def start_link(opts) do
GenStage.start_link(__MODULE__, opts)
end
def get_container_block(pid) do
# We use a long timeout since the block can be busy connecting to RabbitMQ
GenStage.call(pid, :get_container_block, 30_000)
end
@impl true
def init(opts) do
Process.flag(:trap_exit, true)
id = Keyword.fetch!(opts, :id)
image = Keyword.fetch!(opts, :image)
type = Keyword.fetch!(opts, :type)
amqp_client = Keyword.get(opts, :amqp_client, RabbitMQClient)
config = Keyword.get(opts, :config) || %{}
amqp_opts = Keyword.put(opts, :queue_prefix, id)
with {:ok, amqp_config} <- amqp_client.generate_config(amqp_opts) do
state = %State{
id: id,
type: type,
amqp_client: amqp_client,
channel: nil,
amqp_config: amqp_config,
config: config,
channel_ref: nil,
conn_ref: nil,
image: image
}
send(self(), :connect)
case type do
:producer ->
{:producer, state, dispatcher: GenStage.BroadcastDispatcher}
:producer_consumer ->
{:producer_consumer, state, dispatcher: GenStage.BroadcastDispatcher}
:consumer ->
{:consumer, state}
end
else
{:error, reason} ->
{:stop, reason}
_ ->
{:stop, :init_error}
end
end
@impl true
def handle_events(events, _from, state) do
%State{
amqp_client: amqp_client,
channel: channel,
outbound_routing_key: routing_key
} = state
# TODO: this should check if the channel is currently up and accumulate
# the events to publish them later otherwise
for %Message{} = event <- events do
payload =
Message.to_map(event)
|> Jason.encode!()
amqp_client.publish(channel, "", routing_key, payload)
end
{:noreply, [], state}
end
@impl true
def handle_info(:connect, state) do
{:noreply, [], connect(%{state | channel: nil})}
end
def handle_info({:DOWN, ref, :process, _pid, _reason}, %{conn_ref: ref} = state) do
{:noreply, [], connect(%{state | channel: nil})}
end
def handle_info({:DOWN, ref, :process, _pid, _reason}, %{channel_ref: ref} = state) do
{:noreply, [], connect(%{state | channel: nil})}
end
def handle_info({:basic_consume_ok, %{consumer_tag: _tag}}, state) do
{:noreply, [], state}
end
def handle_info({:basic_cancel, _}, state) do
{:noreply, [], connect(%{state | channel: nil})}
end
def handle_info({:basic_cancel_ok, _}, state) do
{:noreply, [], %{state | consumer_tag: nil}}
end
def handle_info({:basic_deliver, payload, meta}, state) do
%State{amqp_client: amqp_client, channel: channel} = state
with {:ok, decoded} <- Jason.decode(payload),
{:ok, message} <- Message.from_map(decoded) do
amqp_client.ack(channel, meta.delivery_tag)
{:noreply, [message], state}
else
{:error, reason} ->
Logger.warn("Invalid message received: #{inspect(reason)}",
tag: "container_invalid_message"
)
amqp_client.reject(channel, meta.delivery_tag, requeue: false)
{:noreply, [], state}
end
end
@impl true
def handle_call(:get_container_block, _from, %State{channel: nil} = state) do
# We're currently disconnected
{:reply, {:error, :not_connected}, [], state}
end
def handle_call(:get_container_block, _from, state) do
%State{
id: block_id,
image: image,
config: config,
inbound_routing_key: exchange_routing_key,
outbound_queues: [queue]
} = state
container_block = %ContainerBlock{
block_id: block_id,
image: image,
config: config,
exchange_routing_key: exchange_routing_key,
queue: queue,
# TODO: these are random values since we are currently forced to provide them to the struct
cpu_limit: "1",
memory_limit: "2048M",
cpu_requests: "0",
memory_requests: "256M"
}
{:reply, {:ok, container_block}, [], state}
end
defp connect(%State{amqp_client: amqp_client} = state) do
case amqp_client.setup(state.amqp_config, state.type) do
{:ok, result} ->
%{
channel: channel,
outbound_routing_key: outbound_routing_key,
outbound_queues: outbound_queues,
inbound_routing_key: inbound_routing_key,
inbound_queues: inbound_queues
} = result
conn_ref = Process.monitor(channel.conn.pid)
channel_ref = Process.monitor(channel.pid)
for queue <- inbound_queues do
amqp_client.consume(channel, queue)
end
%{
state
| channel: channel,
outbound_routing_key: outbound_routing_key,
outbound_queues: outbound_queues,
inbound_routing_key: inbound_routing_key,
inbound_queues: inbound_queues,
conn_ref: conn_ref,
channel_ref: channel_ref
}
{:error, reason} ->
Logger.warn(
"Cannot connect to RabbitMQ: #{inspect(reason)}. Retrying in #{@retry_timeout_ms} ms"
)
Process.send_after(self(), :connect, @retry_timeout_ms)
state
end
end
@impl true
def terminate(_reason, %State{channel: channel, amqp_client: amqp_client} = state) do
if channel do
amqp_client.close_connection(channel.conn)
end
{:noreply, state}
end
end
|
lib/astarte_flow/blocks/container.ex
| 0.744285 | 0.433142 |
container.ex
|
starcoder
|
defmodule Liquor.Transformer do
@moduledoc """
Transformer takes a list of search items and tries to
"""
@type spec_item ::
{:apply, module, atom, list} |
{:mod, module} |
{:type, atom} |
atom |
((atom, atom, term) -> {:ok, {atom, atom, term}} | :error)
@type type_spec :: %{ atom => spec_item }
@spec transform_value(term, Liquor.op, atom, spec_item) :: {:ok, {atom, atom, term}} | :error
defp transform_value(value, op, key, {:mod, module}) do
case module.cast(value) do
{:ok, new_value} -> {:ok, {op, key, new_value}}
:error -> :error
{:error, _} = err -> err
end
end
defp transform_value(value, op, key, {:apply, m, f, a}) when is_atom(m) and is_atom(f) do
:erlang.apply(m, f, [op, key, value | a])
end
defp transform_value(value, op, key, f) when is_function(f) do
f.(op, key, value)
end
defp transform_value(value, op, key, :boolean) do
case Liquor.Transformers.Boolean.transform(value) do
{:ok, new_value} -> {:ok, {op, key, new_value}}
:error -> :error
end
end
defp transform_value(value, op, key, {:type, :date}) do
case Liquor.Transformers.Date.transform(value) do
{:ok, new_value} -> {:ok, {op, key, new_value}}
:error -> :error
end
end
defp transform_value(value, op, key, {:type, :naive_datetime}) do
case Liquor.Transformers.NaiveDateTime.transform(value) do
{:ok, new_value} -> {:ok, {op, key, new_value}}
:error -> :error
end
end
defp transform_value(value, op, key, {:type, :time}) do
case Liquor.Transformers.Time.transform(value) do
{:ok, new_value} -> {:ok, {op, key, new_value}}
:error -> :error
end
end
defp transform_value(value, op, key, {:type, type}) when is_atom(type) do
case Ecto.Type.cast(type, value) do
{:ok, new_value} -> {:ok, {op, key, new_value}}
:error -> :error
end
end
@doc """
Transforms the given keywords or pairs into their search specs
It is expected that the values provided have already been whitelisted or renamed as needed
"""
@spec transform(list | map, type_spec) :: list
def transform(values, spec) do
Enum.reduce(values, [], fn
{op, key, value}, acc ->
if Map.has_key?(spec, key) do
{:ok, new_value} = transform_value(value, op, key, spec[key])
[new_value | acc]
else
acc
end
{key, value}, acc ->
if Map.has_key?(spec, key) do
{:ok, new_value} = transform_value(value, :match, key, spec[key])
[new_value | acc]
else
acc
end
value, acc ->
if spec.keyword do
{:ok, new_value} = transform_value(value, :match, nil, spec._)
[new_value | acc]
else
acc
end
end)
|> Enum.reverse()
end
end
|
lib/liquor/transformer.ex
| 0.786049 | 0.682686 |
transformer.ex
|
starcoder
|
defmodule D08.Challenge do
@moduledoc """
Solution sketch:
As described in part 1 we can identify 1, 4, 7 and 8 by counting their string length. We now have a map of
1 => cf
4 => bcdf
7 => acf
8 => abcdefg
(these are ideal values - they can change but have the same meaning!)
We can use the values of one and four to find the other numbers by looking at the shared segments. For example, if
the segments of four are ALL included in the tested number with segment length of 6, we can deduct it is a 9. 0 is missing
the middle segment, 6 is missing the upper right one that are shared with four.
The same strategy is applied for finding the other missing values and are descriped in the functions itself.
"""
require Logger
def run(1) do
signals = Utils.read_input(8, &map_input/1)
result =
signals
|> Stream.map(&decode_output/1)
|> Stream.map(&Integer.to_string/1)
|> Stream.flat_map(&String.codepoints/1)
|> Stream.filter(fn char -> char in ["1", "4", "7", "8"] end)
|> Enum.count()
Logger.info("The numbers 1, 4, 7, 8 appeared #{result} times")
end
def run(2) do
signals = Utils.read_input(8, &map_input/1)
result =
signals
|> Stream.map(&decode_output/1)
|> Enum.sum()
Logger.info("The sum of decoded numbers is #{result}")
end
defp map_input(line) do
parts = String.split(line, " | ") |> Enum.map(&String.split(&1, " "))
%{
pattern: hd(parts),
output: tl(parts) |> hd
}
end
defp decode_output(signal) do
pattern_by_length = Enum.group_by(signal.pattern, &String.length/1)
output =
signal.output
|> Enum.map(fn digit -> String.codepoints(digit) |> Enum.sort() |> Enum.join() end)
0..9
|> Enum.map(fn val -> {val, nil} end)
|> Map.new()
|> assign_unique(pattern_by_length)
|> assign_five_segments(pattern_by_length[5])
|> assign_six_segments(pattern_by_length[6])
|> invert_mapping()
|> decode_output(output)
end
defp invert_mapping(map) do
map
|> Stream.map(fn {key, val} -> {String.codepoints(val) |> Enum.sort() |> Enum.join(), key} end)
|> Map.new()
end
defp decode_output(decoded_map, output) do
output
|> Enum.map(&Map.get(decoded_map, &1))
|> Enum.join()
|> String.to_integer()
end
defp assign_unique(decoded_map, pattern) do
decoded_map
|> Map.put(1, Map.get(pattern, 2) |> hd)
|> Map.put(4, Map.get(pattern, 4) |> hd)
|> Map.put(7, Map.get(pattern, 3) |> hd)
|> Map.put(8, Map.get(pattern, 7) |> hd)
end
defp assign_five_segments(decoded_map, pattern) do
# 3 = 1 is completely inside it
# 5 = (4 - 1) is completely in it
# 2 otherwise
four = decoded_map[4] |> String.codepoints()
one = decoded_map[1] |> String.codepoints()
four_minus_one = four -- one
three = Enum.find(pattern, &digit_in?(&1, one))
pattern = List.delete(pattern, three)
five = Enum.find(pattern, &digit_in?(&1, four_minus_one))
pattern = List.delete(pattern, five)
decoded_map
|> Map.put(2, pattern |> hd)
|> Map.put(3, three)
|> Map.put(5, five)
end
defp assign_six_segments(decoded_map, pattern) do
# 9 = 4 is completely inside it
# 0 = is not 9, but has 1 in it
# 6 otherwise
four = decoded_map[4] |> String.codepoints()
one = decoded_map[1] |> String.codepoints()
nine = Enum.find(pattern, &digit_in?(&1, four))
pattern = List.delete(pattern, nine)
zero = Enum.find(pattern, &digit_in?(&1, one))
pattern = List.delete(pattern, zero)
decoded_map
|> Map.put(0, zero)
|> Map.put(6, pattern |> hd)
|> Map.put(9, nine)
end
defp digit_in?(single_pattern, digit) do
single_pattern = String.codepoints(single_pattern)
digit
|> Enum.all?(fn char -> char in single_pattern end)
end
end
|
lib/d08/challenge.ex
| 0.6508 | 0.607489 |
challenge.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.