code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule Adventofcode.Day18Duet.RecoveredFrequency do
defstruct registers: %{}, instructions: [], current_index: 0, last_played: nil, recovered: false
def recovered_frequency_value(input) do
%__MODULE__{instructions: parse(input)}
|> process_instructions()
|> result()
end
defp result(%{last_played: nil} = state), do: state
defp result(%{last_played: last_played}), do: last_played
defp parse(input) do
input
|> String.split("\n", trim: true)
|> Enum.map(&parse_line/1)
end
defp parse_line(line) do
[name | args] = String.split(line, " ")
[name | Enum.map(args, &parse_arg/1)]
end
defp parse_arg(arg) do
case Integer.parse(arg) do
{num, ""} -> num
_ -> arg
end
end
defp process_instructions(state) do
cond do
state.recovered -> state
state.current_index < 0 -> state
state.current_index >= length(state.instructions) -> state
true -> do_process_instruction(state)
end
end
defp do_process_instruction(state) do
instruction = Enum.at(state.instructions, state.current_index)
{index_add, updates} = process(instruction, state)
updates
|> Enum.reduce(state, fn {k, v}, acc -> Map.put(acc, k, v) end)
|> Map.update(:current_index, index_add, &(&1 + index_add))
|> process_instructions()
end
defp process([name, register, value], state) when is_binary(value) do
process([name, register, Map.get(state.registers, value, 0)], state)
end
defp process(["set", register, value], state) do
registers = Map.put(state.registers, register, value)
{1, registers: registers}
end
defp process(["add", register, value], state) do
registers = Map.update(state.registers, register, value, &(&1 + value))
{1, registers: registers}
end
defp process(["mul", register, value], state) do
registers = Map.update(state.registers, register, 0, &(&1 * value))
{1, registers: registers}
end
defp process(["mod", register, value], state) do
registers = Map.update(state.registers, register, value, &rem(&1, value))
{1, registers: registers}
end
defp process(["jgz", register, value], state) do
if Map.get(state.registers, register) <= 0 do
{1, []}
else
{value, []}
end
end
defp process(["snd", register], state) do
value = Map.get(state.registers, register)
{1, last_played: value}
end
defp process(["rcv", register], state) do
if Map.get(state.registers, register) == 0 do
{1, []}
else
{0, recovered: true}
end
end
end
|
lib/day_18_duet_recovered_frequency.ex
| 0.652906 | 0.571796 |
day_18_duet_recovered_frequency.ex
|
starcoder
|
defmodule Spidey.Filter.DefaultFilter do
@moduledoc """
An implementation of the `Spidey.Filter` behaviour which:
1. Transforms relative urls to absolute urls
2. Strips the query parameters of all urls, to simplify unicity.
3. Strips the trailing slashes of all urls.
4. Rejects all urls from a different domain than the seed's.
5. Rejects invalid urls
6. Reject static resources based on different criteria such as wordpress paths and file type.
This behaviour requires the option `:seed`.
"""
@behaviour Spidey.Filter
@impl true
@spec filter_urls(list(String.t()), Spidey.Filter.filter_options()) :: Enumerable.t()
def filter_urls(urls, opts) do
seed = get_seed_option(opts)
urls
|> process_relative_urls(seed)
|> strip_query_params()
|> strip_trailing_slashes()
|> reject_non_domain_urls(seed)
|> reject_invalid_urls()
|> reject_static_resources()
end
@spec strip_query_params(Enumerable.t()) :: Enumerable.t()
def strip_query_params(urls) do
Stream.map(urls, fn s -> String.split(s, "?") |> List.first() end)
end
@spec strip_trailing_slashes(Enumerable.t()) :: Enumerable.t()
def strip_trailing_slashes(urls) do
Stream.map(urls, fn s -> String.replace_trailing(s, "/", "") end)
end
@spec reject_invalid_urls(Enumerable.t()) :: Enumerable.t()
def reject_invalid_urls(urls) do
urls
|> Stream.reject(&is_nil/1)
|> Stream.reject(&(&1 == ""))
end
@spec reject_static_resources(Enumerable.t()) :: Enumerable.t()
def reject_static_resources(urls) do
urls
# Wordpress links
|> Stream.reject(&String.contains?(&1, "wp-content"))
|> Stream.reject(&String.contains?(&1, "wp-json"))
|> Stream.reject(&String.contains?(&1, "wprm_print"))
# images & other assets
|> Stream.reject(&String.ends_with?(&1, ".jpg"))
|> Stream.reject(&String.ends_with?(&1, ".jpeg"))
|> Stream.reject(&String.ends_with?(&1, ".png"))
|> Stream.reject(&String.ends_with?(&1, ".gif"))
|> Stream.reject(&String.ends_with?(&1, ".pdf"))
|> Stream.reject(&String.ends_with?(&1, ".xml"))
|> Stream.reject(&String.ends_with?(&1, ".php"))
|> Stream.reject(&String.ends_with?(&1, ".js"))
|> Stream.reject(&String.ends_with?(&1, ".css"))
# amp.dev
|> Stream.reject(&String.ends_with?(&1, "amp/"))
|> Stream.reject(&String.ends_with?(&1, "amp"))
# RSS
|> Stream.reject(&String.ends_with?(&1, "feed/"))
|> Stream.reject(&String.ends_with?(&1, "feed"))
end
@spec reject_non_domain_urls(Enumerable.t(), String.t()) :: Enumerable.t()
def reject_non_domain_urls(urls, seed) do
%URI{host: seed_host} = URI.parse(seed)
Stream.reject(urls, fn url -> URI.parse(url).host != seed_host end)
end
@spec process_relative_urls(Enumerable.t(), String.t()) :: Enumerable.t()
def process_relative_urls(urls, seed) do
urls
|> Stream.map(fn url -> to_absolute_url(url, seed) end)
|> Stream.reject(&(&1 == ""))
end
## Private.
defp to_absolute_url(url, seed) do
with %URI{scheme: s, host: h, path: p} <- URI.parse(url),
%URI{scheme: seed_scheme, host: seed_host} <- URI.parse(seed),
scheme <- scheme(s, seed_scheme),
{:ok, host} <- host(h, seed_host),
path <- path(p) do
scheme <> host <> path
else
_ -> ""
end
end
defp scheme("https", _), do: "https://"
defp scheme(_, s) when s != nil and s != "", do: s <> "://"
defp scheme(_, _), do: "http://"
defp host(h, _) when h != nil and h != "", do: {:ok, h}
defp host(_, s) when s != nil and s != "", do: {:ok, s}
defp host(_, _), do: {:error, "nil or empty host"}
defp path(nil), do: "/"
defp path(""), do: "/"
defp path(p), do: p
defp get_seed_option(opts) do
case Keyword.get(opts, :seed) do
seed when is_binary(seed) ->
seed
_ ->
raise """
The `:seed` option is compulsary for the Spidey.Filter.DefaultFilter
to work. Make sure that it's passed with the options and that it is a
string.
"""
end
end
end
|
lib/spidey/filter/default_filter.ex
| 0.872992 | 0.449272 |
default_filter.ex
|
starcoder
|
defmodule Univrse.Envelope do
@moduledoc """
An Envelope is a structure for encoding any arbitrary data payload for data
interchange and/or storage.
An Envelope consists of a set of headers and a data payload. Optionally one or
more `t:Univrse.Signature.t/0` structures may be used to protect data integrity with
digital signature and MAC algorithms. And optionally, one or more `t:Univrse.Recipient.t/0`
structures may be used to ensure confidentiality of the data payload using
encryption algorithms.
A Univrse Envelope can be serialised into several formats appropriate for data
interchange or storage.
* CBOR encoding
* String encoding
* Bitcoin script
"""
alias Univrse.{Header, Key, Signature, Recipient}
import Univrse.Util, only: [tag_binary: 1, untag: 1]
defdelegate decrypt(env, key, opts \\ []), to: Recipient
defdelegate encrypt(env, key, headers, opts \\ []), to: Recipient
defdelegate sign(env, key, headers \\ %{}), to: Signature
defdelegate verify(env, key), to: Signature
defstruct header: %Header{},
payload: nil,
signature: nil,
recipient: nil
@typedoc "Envelope struct"
@type t :: %__MODULE__{
header: Header.t,
payload: any,
signature: Signature.t | list(Signature.t) | nil,
recipient: nil
}
@typedoc "Envelope encoding"
@type encoding :: :cbor | :base64
@base64regex ~r/^([a-zA-Z0-9_-]+\.?){2,4}$/
@univrse_prefix "UNIV"
@doc """
Decodes the given binary into an Envelope structure.
Automatically detects the correct encoding from the binary, assuming it is
a supported `t:encoding()`.
"""
@spec decode(binary) :: {:ok, t} | {:error, any}
def decode(binary) do
case Regex.match?(@base64regex, binary) do
true -> decode(binary, :base64)
false -> decode(binary, :cbor)
end
end
@doc """
Decodes the given binary into an Envelope structure, using the specified
`t:encoding()`.
"""
@spec decode(binary, encoding) :: {:ok, t} | {:error, any}
def decode(data, :cbor) when is_binary(data) do
with {:ok, parts, _rest} <- CBOR.decode(data) do
env = parts
|> untag()
|> from_list()
{:ok, env}
end
end
def decode(data, :base64) when is_binary(data) do
parts = String.split(data, ".")
with {:ok, parts} <- b64_decode_all(parts),
{:ok, parts} <- cbor_decode_all(parts)
do
env = parts
|> untag()
|> from_list()
{:ok, env}
end
end
@doc """
Decodes the given CBOR encoded payload and puts it in the envelope struct.
"""
@spec decode_payload(t, binary) :: {:ok, t} | {:error, any}
def decode_payload(%__MODULE__{} = env, payload) do
with {:ok, payload, _rest} <- CBOR.decode(payload) do
{:ok, Map.put(env, :payload, payload)}
end
end
@doc """
Decrypts the envelope payload by first decrypting the content key for the
recipient at the specified index with the given key.
The envelope must contain multiple recipients.
"""
@spec decrypt_at(t, integer, Key.t, keyword) :: {:ok, t} | {:error, any}
def decrypt_at(env, idx, key, opts \\ [])
def decrypt_at(%__MODULE__{recipient: recipients} = env, idx, %Key{} = key, opts)
when is_list(recipients)
and is_integer(idx)
and idx < length(recipients)
do
recipient = Enum.at(recipients, idx)
with {:ok, %Recipient{key: key}} <- decrypt(recipient, key, opts) do
decrypt(env, key, opts)
end
end
def decrypt_at(%__MODULE__{}, _idx, %Key{}, _opts),
do: {:error, "Invalid recipient index"}
@doc """
Decodes the Envelope into a binary using the specified `t:encoding()`.
Default encoding is `:cbor`.
"""
@spec encode(t, encoding) :: binary | String.t
def encode(env, encoding \\ :cbor)
def encode(%__MODULE__{} = env, :cbor) do
env
|> Map.update!(:payload, &tag_binary/1)
|> to_list()
|> CBOR.encode()
end
def encode(%__MODULE__{} = env, :base64) do
env
|> Map.update!(:payload, &tag_binary/1)
|> to_list
|> Enum.map(&CBOR.encode/1)
|> Enum.map(& Base.url_encode64(&1, padding: false))
|> Enum.join(".")
end
@doc """
CBOR encodes the Envelope payload and returns the encoded binary.
"""
@spec encode_payload(t) :: binary
def encode_payload(%__MODULE__{payload: payload}) do
payload
|> tag_binary()
|> CBOR.encode()
end
@doc """
Parses the given Bitcoin Script and returns an Envelope structure.
"""
@spec parse_script(BSV.Script.t) :: {:ok, t} | {:error, any}
def parse_script(%BSV.Script{chunks: chunks}) do
with [_ | _] = parts <- slice_univrse_op_return(chunks),
{:ok, parts} <- cbor_decode_all(parts)
do
env = parts
|> untag()
|> from_list()
{:ok, env}
else
_ ->
{:error, "Invalid Univrse script"}
end
end
@doc """
Pushes the given `t:Signature.t` or `t:Recipient.t` into the Envelope.
"""
@spec push(t, Signature.t | Recipient.t) :: t
def push(%__MODULE__{} = env, %Signature{} = signature) do
case env.signature do
nil ->
Map.put(env, :signature, signature)
%Signature{} ->
update_in(env.signature, & [&1, signature])
sigs when is_list(sigs) ->
update_in(env.signature, & &1 ++ [signature])
end
end
def push(%__MODULE__{} = env, %Recipient{} = recipient) do
case env.recipient do
nil ->
Map.put(env, :recipient, recipient)
%Recipient{} ->
update_in(env.recipient, & [&1, recipient])
recipients when is_list(recipients) ->
update_in(env.recipient, & &1 ++ [recipient])
end
end
@doc """
Encodes the envelope into a valid Univrse OP_RETURN script and returns the
script.
"""
@spec to_script(t, boolean) :: BSV.Script.t
def to_script(env, false_return \\ true)
def to_script(%__MODULE__{} = env, true) do
env
|> to_script(false)
|> Map.update!(:chunks, & [:OP_FALSE | &1])
end
def to_script(%__MODULE__{} = env, false) do
chunks = env
|> Map.update!(:payload, &tag_binary/1)
|> to_list()
|> Enum.map(&CBOR.encode/1)
%BSV.Script{chunks: [:OP_RETURN, @univrse_prefix | chunks]}
end
@doc """
Wraps the given payload and headers in a new Envelope structure.
"""
@spec wrap(any, map | Header.t) :: t
def wrap(payload, headers \\ %{})
def wrap(payload, %Header{} = header),
do: %__MODULE__{header: header, payload: payload}
def wrap(payload, %{} = headers),
do: %__MODULE__{header: Header.wrap(headers), payload: payload}
# Converts the given list of elements to a Envelope struct.
defp from_list([header, payload]),
do: %__MODULE__{header: Header.wrap(header), payload: payload}
defp from_list([header, payload, signature]),
do: from_list([header, payload]) |> Map.put(:signature, decode_signature(signature))
defp from_list([header, payload, signature, recipient]),
do: from_list([header, payload, signature]) |> Map.put(:recipient, decode_recipient(recipient))
# Decodes the signatures
defp decode_signature([headers, sig]) when is_map(headers) and is_binary(sig),
do: Signature.wrap(sig, headers)
defp decode_signature(signatures) when is_list(signatures),
do: Enum.map(signatures, &decode_signature/1)
defp decode_signature(nil), do: nil
# Decodes the recipients
defp decode_recipient([headers, cek]) when is_map(headers),
do: Recipient.wrap(cek, headers)
defp decode_recipient(signatures) when is_list(signatures),
do: Enum.map(signatures, &decode_recipient/1)
defp decode_recipient(nil), do: nil
# Converts the envelope to a list of elements prior to encoding.
defp to_list(%__MODULE__{signature: nil, recipient: nil} = env),
do: [env.header, env.payload]
defp to_list(%__MODULE__{signature: signature, recipient: nil} = env)
when not is_nil(signature),
do: [env.header, env.payload, signature]
defp to_list(%__MODULE__{signature: signature, recipient: recipient} = env)
when not is_nil(recipient),
do: [env.header, env.payload, signature, recipient]
# Base64 decodes all parts
defp b64_decode_all(parts, result \\ [])
defp b64_decode_all([head | tail], result) do
with {:ok, data} <- Base.url_decode64(head, padding: false) do
b64_decode_all(tail, [data | result])
end
end
defp b64_decode_all([], result), do: {:ok, Enum.reverse(result)}
# CBOR decodes all parts
defp cbor_decode_all(parts, result \\ [])
defp cbor_decode_all([head | tail], result) do
with {:ok, data, ""} <- CBOR.decode(head) do
cbor_decode_all(tail, [data | result])
end
end
defp cbor_decode_all([], result), do: {:ok, Enum.reverse(result)}
# Slices the script chunks to return the envelope elements priro to decoding
defp slice_univrse_op_return([]), do: []
defp slice_univrse_op_return([:OP_RETURN, @univrse_prefix | chunks]), do: chunks
defp slice_univrse_op_return([_ | chunks]), do: slice_univrse_op_return(chunks)
end
|
lib/univrse/envelope.ex
| 0.812123 | 0.670878 |
envelope.ex
|
starcoder
|
defmodule Serum.Build.Pass1 do
@moduledoc """
This module takes care of the first pass of site building process.
In pass 1, the following modules are run sequentially or parallelly. See the
docs for each module for more information.
* `Serum.Build.Pass1.PageBuilder`
* `Serum.Build.Pass1.PostBuilder`
After executing these two modules, the following tasks are performed:
1. Generates a tag map, where its keys are all tags existing in the project
and values are lists of `Serum.PostInfo` objects which has the key as one of
its tags.
2. Generates a site context, which is a list of variable bindings used when
rendering templates.
"""
alias Serum.Build
alias Serum.Build.Pass1.PageBuilder
alias Serum.Build.Pass1.PostBuilder
alias Serum.Error
alias Serum.PageInfo
alias Serum.PostInfo
@doc "Starts the first pass of the building process in given build mode."
@spec run(Build.mode, Build.state) :: Error.result(Build.state)
def run(build_mode, state)
def run(:parallel, state) do
IO.puts "\u26a1 \x1b[1mStarting parallel build...\x1b[0m"
t1 = Task.async fn -> PageBuilder.run :parallel, state end
t2 = Task.async fn -> PostBuilder.run :parallel, state end
with {:ok, pages} <- Task.await(t1),
{:ok, posts} <- Task.await(t2)
do
{:ok, update_state(pages, posts, state)}
else
{:error, _} = error -> error
end
end
def run(:sequential, state) do
IO.puts "\u231b \x1b[1mStarting sequential build...\x1b[0m"
with {:ok, pages} <- PageBuilder.run(:parallel, state),
{:ok, posts} <- PostBuilder.run(:parallel, state)
do
{:ok, update_state(pages, posts, state)}
else
{:error, _} = error -> error
end
end
@spec update_state([PageInfo.t], [PostInfo.t], Build.state) :: Build.state
defp update_state(pages, posts, state) do
pages = Enum.sort pages, & &1.order < &2.order
posts = Enum.sort posts, & &1.raw_date > &2.raw_date
tag_map = get_tag_map posts
tags = Enum.map tag_map, fn {k, v} -> {k, Enum.count(v)} end
proj = state.project_info
site_ctx = [
site_name: proj.site_name, site_description: proj.site_description,
author: proj.author, author_email: proj.author_email,
pages: pages, posts: posts, tags: tags
]
state
|> Map.put(:site_ctx, site_ctx)
|> Map.put(:tag_map, tag_map)
end
@spec get_tag_map([PostInfo.t]) :: map
defp get_tag_map(all_posts) do
all_tags =
Enum.reduce all_posts, MapSet.new(), fn info, acc ->
MapSet.union acc, MapSet.new(info.tags)
end
for tag <- all_tags, into: %{} do
posts = Enum.filter all_posts, &(tag in &1.tags)
{tag, posts}
end
end
end
|
lib/serum/build/pass_1.ex
| 0.772445 | 0.618708 |
pass_1.ex
|
starcoder
|
defmodule Estructura.Void do
@moduledoc false
use Estructura, access: false, enumerable: false, collectable: false
defstruct foo: 42, bar: "", baz: %{inner_baz: 42}, zzz: nil
end
defmodule Estructura.LazyInst do
@moduledoc false
use Estructura, access: :lazy
def parse_int(bin), do: with {int, _} <- Integer.parse(bin), do: {:ok, int}
def current_time("42"), do: {:ok, DateTime.utc_now()}
defstruct __lazy_data__: "42",
foo: Estructura.Lazy.new(&Estructura.LazyInst.parse_int/1),
bar: Estructura.Lazy.new(&Estructura.LazyInst.current_time/1, 100)
end
defmodule Estructura.Full do
@moduledoc "Full Example"
@foo_range 0..1_000
use Estructura, access: true, coercion: [:foo], validation: true, enumerable: true, collectable: :bar,
generator: [
foo: {StreamData, :integer, [@foo_range]},
bar: {StreamData, :string, [:alphanumeric]},
baz: {StreamData, :fixed_map,
[[key1: {StreamData, :integer}, key2: {StreamData, :integer}]]},
zzz: &Estructura.Full.zzz_generator/0
]
defstruct foo: 42, bar: "", baz: %{inner_baz: 42}, zzz: nil
require Integer
@doc false
def zzz_generator do
StreamData.filter(StreamData.integer(), &Integer.is_even/1)
end
@impl Estructura.Full.Coercible
def coerce_foo(value) when is_integer(value), do: {:ok, value}
def coerce_foo(value) when is_float(value), do: {:ok, round(value)}
def coerce_foo(value) when is_binary(value) do
case Integer.parse(value) do
{value, ""} -> {:ok, value}
_ -> {:error, "#{value} is not a valid integer value"}
end
end
def coerce_foo(value), do: {:error, "Cannot coerce value given for `foo` field (#{inspect(value)})"}
@impl Estructura.Full.Validatable
def validate_foo(value) when value >= 0, do: {:ok, value}
def validate_foo(_), do: {:error, ":foo must be positive"}
@impl Estructura.Full.Validatable
def validate_bar(value), do: {:ok, value}
@impl Estructura.Full.Validatable
def validate_baz(value), do: {:ok, value}
@impl Estructura.Full.Validatable
def validate_zzz(value), do: {:ok, value}
end
defmodule Estructura.Collectable.List do
@moduledoc false
use Estructura, collectable: :into
defstruct into: []
end
defmodule Estructura.Collectable.Map do
@moduledoc false
use Estructura, collectable: :into
defstruct into: %{}
end
defmodule Estructura.Collectable.MapSet do
@moduledoc false
use Estructura, collectable: :into
defstruct into: MapSet.new()
end
defmodule Estructura.Collectable.Bitstring do
@moduledoc false
use Estructura, collectable: :into
defstruct into: ""
end
|
test/support/structs.ex
| 0.812644 | 0.430058 |
structs.ex
|
starcoder
|
defmodule Tw.V1_1.TwitterAPIError do
@moduledoc """
`Exception` which wraps an error response from Twitter API.
See [the Twitter API documentation](https://developer.twitter.com/docs/basics/response-codes) for details.
"""
defexception [:message, :errors, :response]
alias Tw.HTTP.Response
@type t :: %__MODULE__{
__exception__: true,
message: binary(),
errors: list(%{message: binary(), code: pos_integer()}),
response: Response.t()
}
@spec from_response(Response.t(), iodata() | nil) :: t
@doc false
def from_response(response, decoded_body)
def from_response(response, %{errors: errors}) when errors != [] do
[%{message: message} | _] = errors
exception(message: message, errors: errors, response: response)
end
def from_response(response, _), do: exception(message: "Unknown Twitter API Error", errors: [], response: response)
@spec rate_limit_exceeded?(t()) :: boolean
def rate_limit_exceeded?(%__MODULE__{} = error) do
error.response.status == 429 && Enum.any?(error.errors, &(&1.code == 88))
end
for {name, code} <- [
no_user_matched?: 17,
resource_not_found?: 34,
user_not_found?: 50,
member_not_found?: 108,
subscriber_not_found?: 109
] do
@spec unquote(name)(t()) :: boolean
def unquote(name)(%__MODULE__{} = error) do
Enum.any?(error.errors, &(&1.code == unquote(code)))
end
end
@doc """
Return `DateTime` when the rate limit is reset.
If the given error is not related to rate limiting, return `nil`.
"""
@spec rate_limit_reset_at(t()) :: DateTime.t() | nil
def rate_limit_reset_at(%__MODULE__{} = error) do
with [v] <- Response.get_header(error.response, "x-rate-limit-reset"),
{unix, ""} <- Integer.parse(v),
{:ok, dt} <- DateTime.from_unix(unix, :second) do
dt
else
_ -> nil
end
end
@doc """
Return time until rate limit is reset in milliseconds.
If the given error is not related to rate limiting, return `nil`.
## Examples
TwitterAPIError.rate_limit_reset_in(error)
|> Process.sleep()
"""
@spec rate_limit_reset_in(t()) :: non_neg_integer() | nil
def rate_limit_reset_in(%__MODULE__{} = error, base_fn \\ fn -> DateTime.utc_now() |> DateTime.to_unix(:second) end) do
with [v] <- Response.get_header(error.response, "x-rate-limit-reset"),
{target, ""} <- Integer.parse(v) do
:timer.seconds(target - base_fn.())
else
_ -> nil
end
end
end
|
lib/tw/v1_1/twitter_api_error.ex
| 0.884751 | 0.412087 |
twitter_api_error.ex
|
starcoder
|
defmodule PlugDeviseSession.Rememberable do
@moduledoc """
Helps issuing and reading Devise's remember user cookie.
Important module assumptions:
* All `Plug.Conn` structures should have a valid `secret_key_base` set.
* User authorization info is a three element tuple of the form: `{id, auth_key, timestamp}`.
* Remember timestamps are required to be in the `Etc/UTC` time zone.
"""
@type id :: integer
@type auth_key :: String.t()
@type timestamp :: DateTime.t()
@type scope :: atom | String.t()
@type user_auth_info :: {id, auth_key, timestamp}
alias Plug.Conn
alias Plug.Crypto.KeyGenerator
alias PlugRailsCookieSessionStore.MessageVerifier
@cookie_attributres [:domain, :max_age, :path, :secure, :http_only, :extra]
@default_opts [
key_digest: :sha,
key_iterations: 1000,
key_length: 64,
serializer: ExMarshal,
signing_salt: "signed cookie"
]
@doc """
Removes the remember user cookie.
## Options
* `:domain` - domain the remember user cookie was issued in.
"""
@spec forget_user(Plug.Conn.t(), scope, domain: String.t()) :: Plug.Conn.t()
def forget_user(conn, scope \\ :user, opts \\ []) do
cookie_opts =
[http_only: true]
|> Keyword.merge(opts)
|> Keyword.take(@cookie_attributres)
Conn.delete_resp_cookie(conn, "remember_#{scope}_token", cookie_opts)
end
@doc """
Sets a signed remember user cookie on the connection.
## Options
* `:domain` - domain to issue the remember user cookie in.
* `:extra` - lets specify arbitrary options that are added to cookie.
* `:key_digest` - digest algorithm to use for deriving the signing key. Accepts any value supported by `Plug.Crypto.KeyGenerator.generate/3`, defaults to `:sha`.
* `:key_iterations` - number of iterations for signing key derivation. Accepts any value supported by `Plug.Crypto.KeyGenerator.generate/3`, defaults to 1000.
* `:key_length` - desired length of derived signing key. Accepts any value supported by `Plug.Crypto.KeyGenerator.generate/3`, defaults to 64.
* `:max_age` - desired validity of remember user cookie in seconds, defaults to 2 weeks.
* `:path` - send cookie only on matching URL path.
* `:secure` - a secure cookie is only sent to the server over the HTTPS protocol.
* `:serializer` - module used for cookie data serialization, defaults to `PlugDeviseSession.Marshal` which in turn uses `ExMarshal` (a Rails-compatible marshal module).
* `:signing_salt` - salt used for signing key derivation. Should be set to the value used by Rails, defaults to "signed cookie".
"""
@spec remember_user(
Plug.Conn.t(),
user_auth_info,
scope,
domain: String.t(),
key_digest: atom,
key_iterations: integer,
key_length: integer,
max_age: integer,
path: String.t(),
secure: boolean,
serializer: module,
signing_salt: binary
) :: Plug.Conn.t()
def remember_user(conn, {id, auth_key, timestamp}, scope \\ :user, opts \\ []) do
options = Keyword.merge(@default_opts, opts)
serializer = Keyword.fetch!(options, :serializer)
signing_key = generate_key(conn, options)
cookie_value =
[[id], auth_key, encode_timestamp(timestamp)]
|> serializer.encode()
|> MessageVerifier.sign(signing_key)
|> URI.encode_www_form()
cookie_opts =
[http_only: true, max_age: 1_209_600]
|> Keyword.merge(options)
|> Keyword.take(@cookie_attributres)
Conn.put_resp_cookie(conn, "remember_#{scope}_token", cookie_value, cookie_opts)
end
defp encode_timestamp(%DateTime{time_zone: "Etc/UTC", utc_offset: 0} = timestamp) do
microseconds = DateTime.to_unix(timestamp, :microsecond)
Float.to_string(microseconds / 1_000_000.0)
end
@doc """
Recovers user authentication info from remember cookie.
## Options
* `:key_digest` - digest algorithm to use for deriving the signing key. Accepts any value supported by `Plug.Crypto.KeyGenerator.generate/3`, defaults to `:sha`.
* `:key_iterations` - number of iterations for signing key derivation. Accepts any value supported by `Plug.Crypto.KeyGenerator.generate/3`, defaults to 1000.
* `:key_length` - desired length of derived signing key. Accepts any value supported by `Plug.Crypto.KeyGenerator.generate/3`, defaults to 64.
* `:serializer` - module used for cookie data serialization, defaults to `PlugDeviseSession.Marshal` which in turn uses `ExMarshal` (a Rails-compatible marshal module).
* `:signing_salt` - salt used for signing key derivation. Should be set to the value used by Rails, defaults to "signed cookie".
"""
@spec recover_user(
Plug.Conn.t(),
scope,
key_digest: atom,
key_iterations: integer,
key_length: integer,
serializer: module,
signing_salt: binary
) :: {:ok, user_auth_info} | {:error, :unauthorized}
def recover_user(conn, scope \\ :user, opts \\ []) do
options = Keyword.merge(@default_opts, opts)
serializer = Keyword.fetch!(options, :serializer)
verification_key = generate_key(conn, options)
with cookie_value when is_binary(cookie_value) <- conn.cookies["remember_#{scope}_token"],
decoded_cookie_value <- URI.decode_www_form(cookie_value),
{:ok, contents} <- MessageVerifier.verify(decoded_cookie_value, verification_key) do
[[id], auth_key, timestamp] = serializer.decode(contents)
{:ok, {id, auth_key, decode_timestamp(timestamp)}}
else
_ -> {:error, :unauthorized}
end
end
defp decode_timestamp(timestamp) when is_binary(timestamp) do
{seconds, ""} = Float.parse(timestamp)
miliseconds = seconds * 1_000_000
miliseconds
|> trunc()
|> DateTime.from_unix!(:microsecond)
end
defp generate_key(%Plug.Conn{secret_key_base: secret_key_base}, opts) do
signing_salt = Keyword.fetch!(opts, :signing_salt)
key_options = [
digest: Keyword.fetch!(opts, :key_digest),
iterations: Keyword.fetch!(opts, :key_iterations),
length: Keyword.fetch!(opts, :key_length)
]
KeyGenerator.generate(secret_key_base, signing_salt, key_options)
end
end
|
lib/plug_devise_session/rememberable.ex
| 0.867738 | 0.438304 |
rememberable.ex
|
starcoder
|
defmodule SendGrid.Marketing.Contacts do
@moduledoc """
Module to interact with contacts.
See SendGrid's [Contact API Docs](https://sendgrid.api-docs.io/v3.0/contacts)
for more detail.
"""
@base_api_url "/v3/marketing/contacts"
@doc """
Adds one or multiple contacts to one or multiple lists available in Marketing Campaigns.
When adding a contact, an email address must be provided at a minimum.
The process is asynchrnous and SendGrid will return a Job ID to check the status.
## Options
* `:api_key` - API key to use with the request.
## Examples
{:ok, recipient_id} = add(["111-222-333"], [%{email: "<EMAIL>", first_name: "Test"}])
"""
@spec add(list(String.t()), list(), [SendGrid.api_key()]) ::
{:ok, String.t()} | {:error, [String.t(), ...]}
def add(list_ids, contacts, opts \\ []) when is_list(opts) do
data = %{list_ids: list_ids, contacts: contacts}
with {:ok, response} <- SendGrid.put(@base_api_url, data, opts) do
handle_result(response)
end
end
@doc """
Deletes a contact.
The process is asynchrnous and SendGrid will return a Job ID to check the status.
## Options
* `:api_key` - API key to use with the request.
"""
@spec delete(list(), [SendGrid.api_key()]) :: {:ok, String.t()} | {:error, any()}
def delete(contact_ids, opts \\ []) when is_list(opts) do
ids = Enum.join(contact_ids, ",")
url = "#{@base_api_url}?ids=#{ids}"
with {:ok, response} <- SendGrid.delete(url, opts) do
handle_result(response)
end
end
@doc """
Deletes all contacts.
The process is asynchrnous and SendGrid will return a Job ID to check the status.
## Options
* `:api_key` - API key to use with the request.
"""
@spec delete_all([SendGrid.api_key()]) :: {:ok, String.t()} | {:error, any()}
def delete_all(opts \\ []) when is_list(opts) do
url = "#{@base_api_url}?delete_all_contacts=true"
with {:ok, response} <- SendGrid.delete(url, opts) do
handle_result(response)
end
end
# Handles the result when it's valid.
defp handle_result(%{body: %{"job_id" => job_id}}) do
{:ok, job_id}
end
# Handles the result when errors are present.
defp handle_result(%{body: %{"error_count" => count} = body}) when count > 0 do
errors = Enum.map(body["errors"], & &1["message"])
{:error, errors}
end
defp handle_result(data), do: {:error, data}
end
|
lib/sendgrid/marketing_campaigns/contacts.ex
| 0.825097 | 0.447158 |
contacts.ex
|
starcoder
|
require Utils
require Program
defmodule D5 do
@moduledoc """
--- Day 5: Sunny with a Chance of Asteroids ---
You're starting to sweat as the ship makes its way toward Mercury. The Elves suggest that you get the air conditioner working by upgrading your ship computer to support the Thermal Environment Supervision Terminal.
The Thermal Environment Supervision Terminal (TEST) starts by running a diagnostic program (your puzzle input). The TEST diagnostic program will run on your existing Intcode computer after a few modifications:
First, you'll need to add two new instructions:
Opcode 3 takes a single integer as input and saves it to the position given by its only parameter. For example, the instruction 3,50 would take an input value and store it at address 50.
Opcode 4 outputs the value of its only parameter. For example, the instruction 4,50 would output the value at address 50.
Programs that use these instructions will come with documentation that explains what should be connected to the input and output. The program 3,0,4,0,99 outputs whatever it gets as input, then halts.
Second, you'll need to add support for parameter modes:
Each parameter of an instruction is handled based on its parameter mode. Right now, your ship computer already understands parameter mode 0, position mode, which causes the parameter to be interpreted as a position - if the parameter is 50, its value is the value stored at address 50 in memory. Until now, all parameters have been in position mode.
Now, your ship computer will also need to handle parameters in mode 1, immediate mode. In immediate mode, a parameter is interpreted as a value - if the parameter is 50, its value is simply 50.
Parameter modes are stored in the same value as the instruction's opcode. The opcode is a two-digit number based only on the ones and tens digit of the value, that is, the opcode is the rightmost two digits of the first value in an instruction. Parameter modes are single digits, one per parameter, read right-to-left from the opcode: the first parameter's mode is in the hundreds digit, the second parameter's mode is in the thousands digit, the third parameter's mode is in the ten-thousands digit, and so on. Any missing modes are 0.
Parameters that an instruction writes to will never be in immediate mode.
Finally, some notes:
It is important to remember that the instruction pointer should increase by the number of values in the instruction after the instruction finishes. Because of the new instructions, this amount is no longer always 4.
Integers can be negative: 1101,100,-1,4,0 is a valid program (find 100 + -1, store the result in position 4).
The TEST diagnostic program will start by requesting from the user the ID of the system to test by running an input instruction - provide it 1, the ID for the ship's air conditioner unit.
It will then perform a series of diagnostic tests confirming that various parts of the Intcode computer, like parameter modes, function correctly. For each test, it will run an output instruction indicating how far the result of the test was from the expected value, where 0 means the test was successful. Non-zero outputs mean that a function is not working correctly; check the instructions that were run before the output instruction to see which one failed.
Finally, the program will output a diagnostic code and immediately halt. This final output isn't an error; an output followed immediately by a halt means the program finished. If all outputs were zero except the diagnostic code, the diagnostic program ran successfully.
After providing 1 to the only input instruction and passing all the tests, what diagnostic code does the program produce?
--- Part Two ---
The air conditioner comes online! Its cold air feels good for a while, but then the TEST alarms start to go off. Since the air conditioner can't vent its heat anywhere but back into the spacecraft, it's actually making the air inside the ship warmer.
Instead, you'll need to use the TEST to extend the thermal radiators. Fortunately, the diagnostic program (your puzzle input) is already equipped for this. Unfortunately, your Intcode computer is not.
Your computer is only missing a few opcodes:
Opcode 5 is jump-if-true: if the first parameter is non-zero, it sets the instruction pointer to the value from the second parameter. Otherwise, it does nothing.
Opcode 6 is jump-if-false: if the first parameter is zero, it sets the instruction pointer to the value from the second parameter. Otherwise, it does nothing.
Opcode 7 is less than: if the first parameter is less than the second parameter, it stores 1 in the position given by the third parameter. Otherwise, it stores 0.
Opcode 8 is equals: if the first parameter is equal to the second parameter, it stores 1 in the position given by the third parameter. Otherwise, it stores 0.
Like all instructions, these instructions need to support parameter modes as described above.
Normally, after an instruction is finished, the instruction pointer increases by the number of values in that instruction. However, if the instruction modifies the instruction pointer, that value is used and the instruction pointer is not automatically increased.
This time, when the TEST diagnostic program runs its input instruction to get the ID of the system to test, provide it 5, the ID for the ship's thermal radiator controller. This diagnostic test suite only outputs one number, the diagnostic code.
What is the diagnostic code for system ID 5?
"""
@behaviour Day
def solve(input) do
input = input |> Utils.to_ints()
%Program{output: [part_1 | _]} = Program.run(Program.new(input, 1))
%Program{output: [part_2]} = Program.run(Program.new(input, 5))
{
part_1,
part_2
}
end
end
|
lib/days/05.ex
| 0.844297 | 0.898188 |
05.ex
|
starcoder
|
defmodule FileSize.Ecto.ByteWithUnit do
@moduledoc """
An Ecto type that represents a file size in bytes, supporting storage of
different units. The value is stored as map in the database (i.e. jsonb when
using PostgreSQL).
## Example
defmodule MySchema do
use Ecto.Schema
schema "my_table" do
field :file_size, FileSize.Ecto.ByteWithUnit
end
end
"""
use Ecto.Type
alias FileSize.Byte
alias FileSize.Ecto.Byte, as: ByteType
alias FileSize.Ecto.Utils
@impl true
def type, do: :map
@impl true
def cast(term)
def cast(%Byte{} = size) do
{:ok, size}
end
def cast(%{"bytes" => bytes, "unit" => unit}) do
cast(%{bytes: bytes, unit: unit})
end
def cast(%{"value" => value, "unit" => unit}) do
cast(%{value: value, unit: unit})
end
def cast(%{bytes: bytes, unit: unit}) when is_integer(bytes) do
with {:ok, unit} <- parse_unit(unit) do
{:ok, FileSize.from_bytes(bytes, unit)}
end
end
def cast(%{value: value, unit: unit}) do
with {:ok, value} <- Utils.assert_value(value),
{:ok, unit} <- parse_unit(unit) do
{:ok, FileSize.new(value, unit)}
end
end
def cast(str) when is_binary(str) do
case FileSize.parse(str) do
{:ok, %Byte{} = size} -> {:ok, size}
_ -> :error
end
end
def cast(term) do
ByteType.cast(term)
end
@impl true
def dump(term)
def dump(%Byte{} = size) do
{:ok,
%{
"bytes" => FileSize.to_integer(size),
"unit" => Utils.serialize_unit(size.unit)
}}
end
def dump(_term), do: :error
@impl true
def embed_as(_format), do: :dump
@impl true
defdelegate equal?(size, other_size), to: Utils
@impl true
def load(term)
def load(%{"bytes" => bytes, "unit" => unit_str})
when is_integer(bytes) and is_binary(unit_str) do
with {:ok, unit} <- parse_unit(unit_str) do
{:ok, FileSize.from_bytes(bytes, unit)}
end
end
def load(_term), do: :error
defp parse_unit(unit) do
Utils.parse_unit_for_type(unit, Byte)
end
end
|
lib/file_size/ecto/byte_with_unit.ex
| 0.847479 | 0.543469 |
byte_with_unit.ex
|
starcoder
|
defmodule Scholar.Metrics.Distance do
@moduledoc """
Distance metrics between 1-D tensors.
"""
import Nx.Defn
import Scholar.Shared
@doc """
Standard euclidean distance.
$$
D(x, y) = \\sqrt{\\sum_i (x_i - y_i)^2}
$$
## Examples
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([3, 2])
iex> Scholar.Metrics.Distance.euclidean(x, y)
#Nx.Tensor<
f32
2.0
>
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([1, 2])
iex> Scholar.Metrics.Distance.euclidean(x, y)
#Nx.Tensor<
f32
0.0
>
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([1, 2, 3])
iex> Scholar.Metrics.Distance.euclidean(x, y)
** (ArgumentError) expected input shapes to be equal, got {2} != {3}
"""
@spec euclidean(Nx.t(), Nx.t()) :: Nx.t()
defn euclidean(x, y) do
assert_same_shape!(x, y)
diff = x - y
if Nx.all(diff == 0) do
0.0
else
Nx.LinAlg.norm(diff)
end
end
@doc """
Squared euclidean distance.
$$
D(x, y) = \\sum_i (x_i - y_i)^2
$$
## Examples
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([3, 2])
iex> Scholar.Metrics.Distance.squared_euclidean(x, y)
#Nx.Tensor<
f32
4.0
>
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([1.0, 2.0])
iex> Scholar.Metrics.Distance.squared_euclidean(x, y)
#Nx.Tensor<
f32
0.0
>
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([1, 2, 3])
iex> Scholar.Metrics.Distance.squared_euclidean(x, y)
** (ArgumentError) expected input shapes to be equal, got {2} != {3}
"""
@spec squared_euclidean(Nx.t(), Nx.t()) :: Nx.t()
defn squared_euclidean(x, y) do
assert_same_shape!(x, y)
x
|> Nx.subtract(y)
|> Nx.power(2)
|> Nx.sum()
|> as_float()
end
@doc """
Manhattan, taxicab, or l1 distance.
$$
D(x, y) = \\sum_i |x_i - y_i|
$$
## Examples
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([3, 2])
iex> Scholar.Metrics.Distance.manhattan(x, y)
#Nx.Tensor<
f32
2.0
>
iex> x = Nx.tensor([1.0, 2.0])
iex> y = Nx.tensor([1, 2])
iex> Scholar.Metrics.Distance.manhattan(x, y)
#Nx.Tensor<
f32
0.0
>
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([1, 2, 3])
iex> Scholar.Metrics.Distance.manhattan(x, y)
** (ArgumentError) expected input shapes to be equal, got {2} != {3}
"""
@spec manhattan(Nx.t(), Nx.t()) :: Nx.t()
defn manhattan(x, y) do
assert_same_shape!(x, y)
x
|> Nx.subtract(y)
|> Nx.abs()
|> Nx.sum()
|> as_float()
end
@doc """
Chebyshev or l-infinity distance.
$$
D(x, y) = \\max_i |x_i - y_i|
$$
## Examples
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([3, 2])
iex> Scholar.Metrics.Distance.chebyshev(x, y)
#Nx.Tensor<
f32
2.0
>
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([1, 2])
iex> Scholar.Metrics.Distance.chebyshev(x, y)
#Nx.Tensor<
f32
0.0
>
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([1, 2, 3])
iex> Scholar.Metrics.Distance.chebyshev(x, y)
** (ArgumentError) expected input shapes to be equal, got {2} != {3}
"""
@spec chebyshev(Nx.t(), Nx.t()) :: Nx.t()
defn chebyshev(x, y) do
assert_same_shape!(x, y)
x
|> Nx.subtract(y)
|> Nx.LinAlg.norm(ord: :inf)
end
@doc """
Minkowski distance.
$$
D(x, y) = \\left(\\sum_i |x_i - y_i|^p\\right)^{\\frac{1}{p}}
$$
## Examples
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([5, 2])
iex> Scholar.Metrics.Distance.minkowski(x, y)
#Nx.Tensor<
f32
4.0
>
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([1, 2])
iex> Scholar.Metrics.Distance.minkowski(x, y)
#Nx.Tensor<
f32
0.0
>
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([1, 2, 3])
iex> Scholar.Metrics.Distance.minkowski(x, y)
** (ArgumentError) expected input shapes to be equal, got {2} != {3}
"""
@spec minkowski(Nx.t(), Nx.t(), integer()) :: Nx.t()
defn minkowski(x, y, p \\ 2) do
assert_same_shape!(x, y)
x
|> Nx.subtract(y)
|> Nx.abs()
|> Nx.power(p)
|> Nx.sum()
|> Nx.power(1.0 / p)
end
@doc """
Cosine distance.
$$
1 - \\frac{u \\cdot v}{\\|u\\|_2 \\|v\\|_2}
$$
## Examples
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([5, 2])
iex> Scholar.Metrics.Distance.cosine(x, y)
#Nx.Tensor<
f32
0.2525906562805176
>
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([1, 2])
iex> Scholar.Metrics.Distance.cosine(x, y)
#Nx.Tensor<
f32
0.0
>
iex> x = Nx.tensor([1, 2])
iex> y = Nx.tensor([1, 2, 3])
iex> Scholar.Metrics.Distance.cosine(x, y)
** (ArgumentError) expected input shapes to be equal, got {2} != {3}
"""
@spec cosine(Nx.t(), Nx.t()) :: Nx.t()
defn cosine(x, y) do
assert_same_shape!(x, y)
norm_x = Nx.LinAlg.norm(x)
norm_y = Nx.LinAlg.norm(y)
cond do
norm_x == 0.0 and norm_y == 0.0 ->
0.0
norm_x == 0.0 or norm_y == 0.0 ->
1.0
true ->
numerator = Nx.dot(x, y)
denominator = norm_x * norm_y
1.0 - numerator / denominator
end
end
defnp as_float(x) do
transform(x, fn x ->
x_f = Nx.Type.to_floating(x.type)
Nx.as_type(x, x_f)
end)
end
end
|
lib/scholar/metrics/distance.ex
| 0.895905 | 0.809163 |
distance.ex
|
starcoder
|
defmodule Resourceful.Type do
@moduledoc """
`Resourceful.Type` is a struct and set of functions for representing and
mapping internal data structures to data structures more appropriate for edge
clients (e.g. API clients). As a result, field names are _always_ strings and
not atoms.
In addition to mapping data field names, it validates that client
representations conform to various constraints set by the type. These include
transversing field graphs, limiting which fields can be queried, and how deep
down the graph queries can go.
The naming conventions and some of design philosophy is geared heavily toward
APIs over HTTP and [JSON:API specification](https://jsonapi.org/). However,
there is nothing JSON-API specific about types.
## Fields
A "field" refers to an attribute or a relationship on a given type. These
share a common namespace and in some respects can be treated interchangeably.
There is a distinction between "local" fields and "query" fields.
Local fields are those which are directly on the current type. For example, a
type of album may have local attributes such as a title and release date and
local relationships such as artist and songs.
Query fields are a combination of local fields and fields anywhere in the
resource graph. So, in the above example, query fields would include something
like an album's title and the related artist's name.
### Relationships and Registries
In order to use relationships, a type must be included in a
`Resourceful.Registry` and, in general, types are meant to be used in
conjunction with a registry. In most functions dealing with relationships and
related types, a type's `name` (just a string) is used rather than passing
a type struct. The struct itself will be looked up from the registry.
### Queries
The term "query" is used to refer to filtering and sorting collections of
resources. Since queries ultimately work on attributes, fields eligible to be
queried must be attributes. You could sort songs by an album's title but you
wouldn't reasonably sort them by an album resource.
Fields given for a query can be represented as a list of strings or as a dot
separated string. So, when looking at a song, the artist's name could be
accessed through `"album.artist.name"` or `["album", "artist", "name"]`. As
with many things related to types, string input from API sources is going to
be the most common form of input.
## "Root" Types
Resource graphs are put together from the perspective of a "root" type. Any
type can be a root type. In the example of an API, if you were looking at an
album, it would be the root with its songs and artist further down the graph.
## Building Types
In addition to functions that actually do something with types, there are a
number of functions used for transforming types such as `max_depth/2`. As
types are designed with registries in mind, types can be built at compile-time
using transformation functions in a manner that may be easier to read than
`new/2` with options.
## Ecto Schemas
There is some overlap with `Ecto.Schema`. In fact, attribute types use the
same type system. While schemas can be used for edge data, primarily when
coupled with change sets, types are more specifically tailored to the task.
Types, combined with `Resourceful.Collection` can be combined to construct a
queryable API with concerns that are specific to working with the edge.
The query format is specifically limited for this purpose.
"""
defmodule FieldError do
defexception message: "field could not be found"
end
defmodule InvalidName do
defexception message: "names cannot contain periods (\".\")"
end
defmodule InvalidMapTo do
defexception message: "only atoms and strings may be used for mapping"
end
import Map, only: [put: 3]
alias __MODULE__
alias __MODULE__.{Attribute, GraphedField, Relationship}
alias Resourceful.Error
alias Resourceful.Collection.{Filter, Sort}
@typedoc """
A field is an attribute or a relationship. They share the same namespace
within a type.
"""
@type field() :: %Attribute{} | %Relationship{}
@type field_graph() :: %{String.t() => %GraphedField{}}
@type field_name() :: String.t() | [String.t()]
@type queryable() :: %Attribute{} | %GraphedField{field: %Attribute{}}
@enforce_keys [
:cache,
:fields,
:id,
:max_filters,
:max_sorters,
:meta,
:name,
:max_depth
]
defstruct @enforce_keys ++ [:registry]
@doc """
Creates a new `Resourceful.Type` with valid attributes.
See functions of the same name for more information on key functionality.
For fields, see `Resourceful.Type.Attribute` and
`Resourceful.Type.Relationship`.
"""
@spec new(String.t(), keyword()) :: %Type{}
def new(name, opts \\ []) do
fields = opt_fields(Keyword.get(opts, :fields, []))
%Type{
cache: %{},
fields: fields,
id: opt_id(Keyword.get(opts, :id, default_id(fields))),
meta: opt_meta(Keyword.get(opts, :meta, %{})),
max_depth: opt_max(Keyword.get(opts, :max_depth, 1)),
max_filters: opt_max_or_nil(Keyword.get(opts, :max_filters, 4)),
max_sorters: opt_max_or_nil(Keyword.get(opts, :max_sorters, 2)),
name: validate_name!(name)
}
end
defp default_id(%{"id" => %Attribute{}}), do: "id"
defp default_id(_), do: nil
defp opt_field(%mod{} = field)
when mod in [Attribute, Relationship],
do: field
defp opt_fields(%{} = fields) do
fields
|> Map.values()
|> opt_fields()
end
defp opt_fields(fields) do
fields
|> Enum.map(&opt_field/1)
|> Enum.reduce(%{}, fn field, map -> put(map, field.name, field) end)
end
defp opt_id(nil), do: nil
defp opt_id([id_attribute | []]), do: opt_id(id_attribute)
defp opt_id(id_attribute) when is_atom(id_attribute), do: to_string(id_attribute)
defp opt_id(id_attribute) when is_binary(id_attribute), do: id_attribute
defp opt_max(int) when is_integer(int) and int >= 0, do: int
defp opt_max_or_nil(nil), do: nil
defp opt_max_or_nil(int), do: opt_max(int)
defp opt_meta(%{} = map), do: map
@doc """
Sets a key in the `cache` map. Because types generally intended to be static
at compile time, it can make sense to cache certain values and have functions
look for cached values in the cache map.
For instance, `finalize/1` creates a `MapSet` for `related_types` which
`related_types/1` will use instead of computed the `MapSet`.
Caches are not meant to be memoized, rather set on a type once it is
considered complete.
"""
@spec cache(%Type{}, atom(), any()) :: %Type{}
def cache(type, key, value) when is_atom(key) do
put_in_struct(type, :cache, key, value)
end
@doc """
Fetches a local attribute or, if a registry is set, a graphed attribute.
"""
@spec fetch_attribute(%Type{}, field_name()) ::
{:ok, %Attribute{} | %GraphedField{field: %Attribute{}}} | Error.t()
def fetch_attribute(type, name) do
fetch_field(type, name, error_type: :attribute_not_found, field_type: [Attribute])
end
@doc """
Fetches a local field or, if a registry is set, a graphed field.
"""
@spec fetch_field(%Type{}, field_name(), keyword()) ::
{:ok, field() | %GraphedField{}} | Error.t()
def fetch_field(type, name, opts \\ [])
def fetch_field(%{registry: nil} = type, name, opts) do
fetch_local_field(type, name, opts)
end
def fetch_field(type, name, opts), do: fetch_graphed_field(type, name, opts)
@doc """
Same as `fetch_field/2` but raises `FieldError` if the field isn't present.
"""
@spec fetch_field!(%Type{}, field_name()) :: field() | %GraphedField{}
def fetch_field!(type, name), do: fetch!(name, fetch_field(type, name))
@doc """
Fetches a field with related graph data using the resource's field graphs.
Unless you have a specific reason for fetching only graphed fields, use
`fetch_field/3` instead.
"""
@spec fetch_graphed_field(%Type{}, field_name(), keyword()) ::
{:ok, %GraphedField{}} | Error.t()
def fetch_graphed_field(type, name, opts \\ [])
def fetch_graphed_field(type, name, opts) when is_list(name) do
fetch_graphed_field(type, string_name(name), opts)
end
def fetch_graphed_field(type, name, opts) do
with {:ok, field_graph} <- field_graph(type),
{:ok, graphed_field} = ok <- Map.fetch(field_graph, name),
true <- field_is?(graphed_field.field, opts) do
ok
else
_ -> not_found_error(type, name, opts)
end
end
@doc """
Same as `fetch_graphed_field/2` but raises `FieldError` if the graphed field
isn't present.
Unless you have a specific reason for fetching only graphed fields, use
`fetch_field!/3` instead.
"""
@spec fetch_graphed_field!(%Type{}, field_name()) :: %GraphedField{}
def fetch_graphed_field!(type, name), do: fetch!(name, fetch_graphed_field(type, name))
@doc """
Fetches a local field by name.
Unless you have a specific reason for fetching local fields, use
`fetch_field/3` instead.
"""
@spec fetch_local_field(%Type{}, String.t(), keyword()) :: {:ok, field()} | Error.t()
def fetch_local_field(type, name, opts \\ []) do
with {:ok, field} = ok <- Map.fetch(type.fields, name),
true <- field_is?(field, opts) do
ok
else
_ -> not_found_error(type, name, opts)
end
end
@doc """
Same as `fetch_local_field/2` but raises `FieldError` if the local field isn't
present.
Unless you have a specific reason for fetching local fields, use
`fetch_field/3` instead.
"""
@spec fetch_local_field!(%Type{}, field_name()) :: %GraphedField{}
def fetch_local_field!(type, name, opts \\ []) do
fetch!(name, fetch_local_field(type, name, opts))
end
@doc """
Fetches another type by name from a type's registry.
"""
@spec fetch_related_type(%Type{}, String.t()) :: {:ok, %Type{}} | Error.t()
def fetch_related_type(%Type{name: name} = type, type_name)
when type_name == name,
do: {:ok, type}
def fetch_related_type(%Type{} = type, type_name) do
with {:ok, registry} <- fetch_registry(type), do: registry.fetch(type_name)
end
@doc """
Fetches a local relationship or, if a registry is set, a graphed relationship.
"""
@spec fetch_relationship(%Type{}, field_name()) ::
{:ok, %Relationship{} | %GraphedField{field: %Relationship{}}} | Error.t()
def fetch_relationship(type, name) do
fetch_field(type, name, error_type: :relationship_not_found, field_type: [Relationship])
end
@doc """
Fetches the field graph for a given type if the type exists and has a
registry.
"""
@spec field_graph(%Type{}) :: field_graph()
def field_graph(type) do
with {:ok, registry} <- fetch_registry(type),
do: registry.fetch_field_graph(type.name)
end
@doc """
Checks if a type has a local field.
"""
@spec has_local_field?(%Type{}, String.t()) :: boolean()
def has_local_field?(%Type{} = type, name), do: Map.has_key?(type.fields, name)
@doc """
Sets the attribute to be used as the ID attribute for a given type. The ID
field has slightly special usage in that extensions will use it for both
identification and equality. There are also conveniences for working directly
with IDs such as `get_id/2`.
A limitation of types is that currently composite ID fields are not supported.
"""
@spec id(%Type{}, String.t()) :: %Type{}
def id(type, id_attribute), do: put(type, :id, opt_id(id_attribute))
@doc """
Validates and returns the mapped names from a graph
"""
@spec map_field(%Type{}, field_name()) ::
{:ok, [atom() | String.t()]} | Error.t()
def map_field(type, name) do
with {:ok, field_or_graph} <- fetch_field(type, name),
do: {:ok, field_or_graph.map_to}
end
@doc """
Maps the ID value for a given resource. This is just shorthand for using
`map_value/3` on whatever field is designated as the ID.
"""
@spec map_id(any(), %Type{}) :: any()
def map_id(resource, type), do: map_value(resource, type, type.id)
@doc """
Maps a value for a given field name for a resource.
"""
@spec map_value(map(), %Type{}, field_name()) :: any()
def map_value(resource, %Type{} = type, name) do
case map_field(type, name) do
{:ok, path} when is_list(path) -> get_with_path(resource, path)
{:ok, key} -> Map.get(resource, key)
_ -> nil
end
end
defp get_with_path(resource, []), do: resource
defp get_with_path(%{} = resource, [key | path]) do
resource
|> Map.get(key)
|> get_with_path(path)
end
defp get_with_path(_, _), do: nil
@doc """
Takes mappable resource, a type, and a list of fields. Returns a list of
tuples with the field name and the mapped value. This is returned instead
of a map to preserve the order of the input list. If order is irrelevant, use
`to_map/2` instead.
"""
@spec map_values(map(), %Type{}, [field_name()]) :: [{any(), any()}]
def map_values(resource, type, fields \\ [])
def map_values(resource, type, []) do
map_values(resource, type, Map.keys(type.fields))
end
def map_values(resource, type, fields) when is_list(fields) do
Enum.map(fields, &{&1, map_value(resource, type, &1)})
end
@doc """
Sets `max_depth` on a type.
`max_depth` is specifically a reference to the depth of relationships that
will be transversed. This means the default `max_depth` of `1` would expose
all immediate relationships and their attributes.
For example, a song type with a `max_depth` of `1` would be able to graph
through `album` and query against `album.title` but would not be able to
access `album.artist` or any of its attributes. Increasing the `max_depth` to
`2` would expose `album.artist.name`.
"""
@spec max_depth(%Type{}, integer()) :: %Type{}
def max_depth(type, max_depth), do: put(type, :max_depth, opt_max(max_depth))
@doc """
Sets `max_filters` on a type. This is the total number of filters allowed in
a single query.
"""
@spec max_filters(%Type{}, integer()) :: %Type{}
def max_filters(type, max_filters) do
put(type, :max_filters, opt_max_or_nil(max_filters))
end
@doc """
Sets `max_sorters` on a type. This is the total number of sorts allowed in a
single query.
"""
@spec max_sorters(%Type{}, integer()) :: %Type{}
def max_sorters(type, max_sorters) do
put(type, :max_sorters, opt_max_or_nil(max_sorters))
end
@doc """
Adds a value to the `meta` map. Meta information is not used by types directly
in this module. It is intended to add more information that can be used by
extensions and other implementations. For example, JSON:API resources provide
linkage and describing that linkage is an appropriate use of the meta map.
Cached values should _not_ be put in the meta map. Though both `cache` and
`meta` could essentially be used for the same thing, caches are expected to be
set specially when registering a type in `Resourceful.Registry` because
`without_cache/1` is called before finalizing a type.
"""
@spec meta(%Type{}, atom(), any()) :: %Type{}
def meta(type, key, value) when is_atom(key), do: put_in_struct(type, :meta, key, value)
@doc """
Sets `name` on a type. Name must be strings and _cannot_ contain periods.
Atoms will be automatically converted to strings.
"""
@spec name(%Type{}, String.t()) :: %Type{}
def name(type, name), do: put(type, :name, validate_name!(name))
@doc """
Puts a new field in the `fields` map using the field's name as the
key. This will replace a field of the same name if present.
"""
@spec put_field(%Type{}, field()) :: %Type{}
def put_field(%Type{} = type, %module{} = field)
when module in [Attribute, Relationship],
do: put_in_struct(type, :fields, field.name, field)
@doc """
Sets the `registry` module for a type. In general, this functional will be
called by a `Resourceful.Registry` and not directly.
"""
@spec register(%Type{}, module()) :: %Type{}
def register(type, module) when is_atom(module), do: put(type, :registry, module)
@doc """
Returns a name as a dot-separated string.
"""
@spec string_name(field_name()) :: String.t()
def string_name(name) when is_binary(name), do: name
def string_name(name), do: Enum.join(name, ".")
@doc """
Like `map_values/3` only returns a map with keys in the name of the attributes
with with values of the mapped values.
"""
@spec to_map(any(), %Type{}, list()) :: map()
def to_map(resource, type, field_names \\ []) do
type
|> map_values(resource, field_names)
|> Map.new()
end
@doc """
Validates a single filter on an attribute.
"""
@spec validate_filter(%Type{}, any()) :: {:ok, Filter.t()} | Error.t()
def validate_filter(type, filter) do
with {:ok, {field_name, op, val}} <- Filter.cast(filter),
{:ok, attr_or_graph} <- fetch_attribute(type, field_name),
{:ok, _} = ok <- Attribute.validate_filter(attr_or_graph, op, val),
do: ok
end
@doc """
Returns a valid mapping name for a field. Any atom or string is valid and
should map to the whatever the underlying resources will look like.
"""
@spec validate_map_to!(atom() | String.t()) :: atom() | String.t()
def validate_map_to!(map_to)
when is_atom(map_to) or is_binary(map_to),
do: map_to
def validate_map_to!(_), do: raise(InvalidMapTo)
@doc """
Validates that the max number of filters hasn't been exceeded.
"""
@spec validate_max_filters(list(), %Type{}, map()) :: list()
def validate_max_filters(list, type, context \\ %{}) do
check_max(list, type.max_filters, :max_filters_exceeded, context)
end
@doc """
Validates that the max number of sorters hasn't been exceeded.
"""
@spec validate_max_sorters(list(), %Type{}, map()) :: list()
def validate_max_sorters(list, type, context \\ %{}) do
check_max(list, type.max_sorters, :max_sorters_exceeded, context)
end
@doc """
Returns a valid string name for a type or field. Technically any string
without a period is valid, but like most names, don't go nuts with URL
characters, whitespace, etc.
"""
@spec validate_name!(atom() | String.t()) :: String.t()
def validate_name!(name) when is_atom(name) do
name
|> to_string()
|> validate_name!()
end
def validate_name!(name) when is_binary(name) do
if String.match?(name, ~r/\./), do: raise(InvalidName)
name
end
@doc """
Validates a single sorter on an attribute.
"""
@spec validate_sorter(%Type{}, any()) :: {:ok, Sort.t()} | Error.t()
def validate_sorter(type, sorter) do
with {:ok, {order, field_name}} <- Sort.cast(sorter),
{:ok, attr_or_graph} <- fetch_attribute(type, field_name),
{:ok, _} = ok <- Attribute.validate_sorter(attr_or_graph, order),
do: ok
end
@doc """
Returns an existing type with an empty `cache` key.
"""
@spec without_cache(%Type{}) :: %Type{}
def without_cache(%Type{} = type), do: put(type, :cache, %{})
defp check_max(list, max, error_type, context) when length(list) > max do
[Error.with_context(error_type, put(context, :max_allowed, max)) | list]
end
defp check_max(list, _, _, _), do: list
defp fetch!(_, {:ok, ok}), do: ok
defp fetch!(name, {:error, _}) do
raise FieldError, message: "field #{inspect(name)} not found"
end
defp fetch_registry(%{registry: nil} = type) do
type_error(:no_type_registry, type)
end
defp fetch_registry(%{registry: registry}), do: {:ok, registry}
defp field_is?(%module{}, opts) do
module in Keyword.get(opts, :field_type, [Attribute, Relationship])
end
defp field_error(error, type, name, context \\ %{}) do
type_error(error, type, Map.put(context, :key, string_name(name)))
end
defp not_found_error(type, name, opts) do
opts
|> Keyword.get(:error_type, :field_not_found)
|> field_error(type, name)
end
defp put_in_struct(type, struct_key, map_key, value) do
put_in(type, [Access.key(struct_key), map_key], value)
end
defp type_error(error, type, context \\ %{}) do
Error.with_context(error, Map.put(context, :resource_type, type.name))
end
end
|
lib/resourceful/type.ex
| 0.90713 | 0.77586 |
type.ex
|
starcoder
|
defmodule Expostal do
@moduledoc """
Address parsing and expansion module for Openvenue's Libpostal, which does parses addresses.
"""
@compile {:autoload, false}
@on_load {:init, 0}
app = Mix.Project.config()[:app]
def init do
path = :filename.join(:code.priv_dir(unquote(app)), 'expostal')
:ok = :erlang.load_nif(path, 0)
end
@doc """
Loads the large dataset from disk for libpostal and prepares it for future calls.
If you do not run this explicitly, then it will be run by `parse_address/1` or `expand_address/1`
on their first run. This is a very slow process (it can take 10s of seconds), so if you value
the responsiveness of your application, you can spawn a secondary thread to run this bootstrap
in the background on your application startup:
```
spawn(fn -> Expostal.bootstrap end)
```
This will prevent you IEx shell from hanging during startup and will make the library much more
likely to be ready for immediate usage for your first call.
"""
def bootstrap do
case :erlang.phash2(1, 1) do
0 -> raise "Nif not loaded"
1 -> :ok
2 -> :error
end
end
@doc """
Parse given address into a map of address components
## Examples
iex> Expostal.parse_address("615 Rene Levesque Ouest, Montreal, QC, Canada")
%{city: "montreal", country: "canada", house_number: "615",
road: "rene levesque ouest", state: "qc"}
"""
@spec parse_address(address :: String.t()) :: map
def parse_address(address), do: _parse_address(address)
def _parse_address(_) do
case :erlang.phash2(1, 1) do
0 -> raise "Nif not loaded"
1 -> %{}
end
end
@doc """
Expand given address into a list of expansions
## Examples
iex> Expostal.expand_address("781 Franklin Ave Crown Hts Brooklyn NY")
["781 franklin avenue crown heights brooklyn ny",
"781 franklin avenue crown heights brooklyn new york"]
"""
@spec expand_address(address :: String.t()) :: [String.t()]
def expand_address(address), do: _expand_address(address)
def _expand_address(address) do
case :erlang.phash2(1, 1) do
0 -> raise "Nif not loaded"
1 -> [address]
end
end
@doc """
Returns a tuple with probability of the most probable language
for a given address and a language list
## Examples
iex> Expostal.classify_language("agricola pl.")
{0.508300861587544, ["en", "fr", "es", "de"]}
"""
@spec classify_language(address :: String.t()) :: {float, [String.t()]}
def classify_language(address) do
try do
_classify_language(address)
rescue
_ ->
{:error, :argument_error}
end
end
def _classify_language(address) do
case :erlang.phash2(1, 1) do
0 -> raise "Nif not loaded"
1 -> [address]
end
end
end
|
lib/expostal.ex
| 0.845289 | 0.629718 |
expostal.ex
|
starcoder
|
defmodule Markex.Element do
@moduledoc """
Creating and working with elements for 2D markup
"""
alias __MODULE__
@typedoc """
List of strings, where all string the same length
"""
@type t :: list(String.t())
@doc """
Creates new element using regular string or list of strings
In the latter case, automatically makes all strings the same length.
## Examples
iex> Markex.Element.new("Hello, world!")
["Hello, world!"]
iex> Markex.Element.new(["Hello, world!", "or something idk", "."])
[
"Hello, world! ",
"or something idk",
". "
]
"""
@doc since: "1.0.0"
@spec new(String.t()) :: Element.t()
def new(content) when is_binary(content) do
[content]
end
@doc since: "1.0.0"
@spec new(list(String.t())) :: Element.t()
def new(content) when is_list(content) do
width = Enum.reduce(content, 0, &max(String.length(&1), &2))
Enum.map(content, &String.pad_trailing(&1, width))
end
@doc """
Fills an element of length `w` and height `h` with char `ch`
## Examples
iex> Markex.Element.new("#", 5, 5)
[
"#####",
"#####",
"#####",
"#####",
"#####"
]
"""
@doc since: "1.0.0"
@spec new(String.t(), non_neg_integer(), non_neg_integer()) :: Element.t()
def new(_ch, 0, _h) do
[]
end
def new(ch, w, h) do
ch
|> String.duplicate(w)
|> List.duplicate(h)
end
@doc """
Return string representation of `element`
## Examples
iex> Markex.Element.new("#", 2, 2) |> Markex.Element.to_string()
"##\\n##"
"""
@doc since: "1.0.0"
@spec to_string(Element.t()) :: String.t()
def to_string(element) do
Enum.join(element, "\n")
end
@doc """
The correct way to get the height of an `element`
Also see `Markex.Element.size/1`
"""
@doc since: "1.0.0"
@spec height(Element.t()) :: non_neg_integer()
def height(element) do
length(element)
end
@doc """
The correct and safe way to get the width of an `element`
Also see `Markex.Element.size/1`
"""
@doc since: "1.0.0"
@spec width(Element.t()) :: non_neg_integer()
def width(element) do
if height(element) != 0 do
String.length(List.first(element))
else
0
end
end
@doc """
The correct and safe way to get the width and height of an `element`
See also `Markex.Element.width/1` and `Markex.Element.height/1`.
## Examples
iex> Markex.Element.new("#", 4, 5) |> Markex.Element.size()
{4, 5}
"""
@doc since: "1.0.0"
@spec size(Element.t()) :: {non_neg_integer(), non_neg_integer()}
def size(element) do
{width(element), height(element)}
end
@doc """
Adds space to the sides of the `element` so that the width matches `n`
## Examples
iex> Markex.Element.new("#") |> Markex.Element.wider(6)
[
" # "
]
"""
@doc since: "1.1.0"
@spec wider(Element.t(), pos_integer(), :center | :left | :right) :: Element.t()
def wider(element, n, align \\ :center)
def wider(element, n, :center) do
add_space = n - width(element)
left = Integer.floor_div(add_space, 2)
element
|> Enum.map(&String.pad_leading(&1, width(element) + left))
|> Enum.map(&String.pad_trailing(&1, n))
end
def wider(element, n, :left) do
element
|> Enum.map(&String.pad_trailing(&1, n))
end
def wider(element, n, :right) do
element
|> Enum.map(&String.pad_leading(&1, n))
end
@doc """
Adds space to the top and bottom of the `element` so that the height matches `n`
## Examples
iex> Markex.Element.new("#") |> Markex.Element.higher(6)
[
" ",
" ",
"#",
" ",
" ",
" "
]
"""
@doc since: "1.1.0"
@spec higher(Element.t(), pos_integer(), :center | :top | :bottom) :: Element.t()
def higher(element, n, align \\ :center)
def higher(element, n, :center) do
add_space = n - height(element)
up = Integer.floor_div(add_space, 2)
down = add_space - up
blank = String.duplicate(" ", width(element))
List.duplicate(blank, up)
|> Enum.concat(element)
|> Enum.concat(List.duplicate(blank, down))
end
def higher(element, n, :top) do
add_space = n - height(element)
blank = String.duplicate(" ", width(element))
element
|> Enum.concat(List.duplicate(blank, add_space))
end
def higher(element, n, :bottom) do
add_space = n - height(element)
blank = String.duplicate(" ", width(element))
List.duplicate(blank, add_space)
|> Enum.concat(element)
end
@doc """
Positions `this` on top of `that`, makes the elements wider as needed
The `align` argument can be used to position the elements correctly relative
to each other and can be `:center`, `:left`, or `:right`.
See also `Markex.Element.Operators.<~>/2` and `Markex.Element.wider/2`.
## Examples
iex> Markex.Element.over(Markex.Element.new("#", 1, 2), Markex.Element.new("$", 3, 2))
[
" # ",
" # ",
"$$$",
"$$$"
]
"""
@doc since: "1.1.0"
@spec over(Element.t(), Element.t(), :center | :left | :right) :: Element.t()
def over(this, that, align \\ :center) do
w = max(width(this), width(that))
this = wider(this, w, align)
that = wider(that, w, align)
Enum.concat(this, that)
end
@doc """
Positions `this` to the left of `that`, makes the elements higher if needed
The `align` argument can be used to position the elements correctly relative
to each other and can be `:center`, `:top`, or `:bottom`.
See also `Markex.Element.Operators.<|>/2` and `Markex.Element.higher/2`.
## Examples
iex> Markex.Element.beside(Markex.Element.new("#"), Markex.Element.new("$", 2, 3))
[
" $$",
"#$$",
" $$"
]
"""
@doc since: "1.1.0"
@spec beside(Element.t(), Element.t(), :center | :top | :bottom) :: Element.t()
def beside(this, that, align \\ :center) do
h = max(height(this), height(that))
this = higher(this, h, align)
that = higher(that, h, align)
Enum.zip(this, that)
|> Enum.map(fn {l1, l2} -> l1 <> l2 end)
end
end
|
lib/markex/element.ex
| 0.914565 | 0.647478 |
element.ex
|
starcoder
|
defmodule RDF.Graph.Builder do
alias RDF.{Description, Graph, Dataset, PrefixMap, IRI}
defmodule Error do
defexception [:message]
end
defmodule Helper do
defdelegate a(), to: RDF.NS.RDF, as: :type
defdelegate a(s, o), to: RDF.NS.RDF, as: :type
defdelegate a(s, o1, o2), to: RDF.NS.RDF, as: :type
defdelegate a(s, o1, o2, o3), to: RDF.NS.RDF, as: :type
defdelegate a(s, o1, o2, o3, o4), to: RDF.NS.RDF, as: :type
defdelegate a(s, o1, o2, o3, o4, o5), to: RDF.NS.RDF, as: :type
def exclude(_), do: nil
end
def build({:__block__, _, block}, opts) do
{declarations, data} = Enum.split_with(block, &declaration?/1)
{base, declarations} = extract_base(declarations)
base_string = base_string(base)
data = resolve_relative_iris(data, base_string)
declarations = resolve_relative_iris(declarations, base_string)
{prefixes, declarations} = extract_prefixes(declarations)
quote do
alias RDF.XSD
alias RDF.NS.{RDFS, OWL}
import RDF.Sigils
import Helper
unquote(declarations)
RDF.Graph.Builder.do_build(
unquote(data),
unquote(opts),
unquote(prefixes),
unquote(base_string)
)
end
end
def build(single, opts) do
build({:__block__, [], List.wrap(single)}, opts)
end
@doc false
def do_build(data, opts, prefixes, base) do
RDF.graph(graph_opts(opts, prefixes, base))
|> Graph.add(Enum.filter(data, &rdf?/1))
end
defp graph_opts(opts, prefixes, base) do
opts
|> set_base_opt(base)
|> set_prefix_opt(prefixes)
end
defp set_base_opt(opts, nil), do: opts
defp set_base_opt(opts, base), do: Keyword.put(opts, :base_iri, base)
defp set_prefix_opt(opts, []), do: opts
defp set_prefix_opt(opts, prefixes) do
Keyword.update(opts, :prefixes, RDF.default_prefixes(prefixes), fn opt_prefixes ->
PrefixMap.new(prefixes)
|> PrefixMap.merge!(opt_prefixes, :ignore)
end)
end
defp base_string(nil), do: nil
defp base_string(base) when is_binary(base), do: base
defp base_string(base) when is_atom(base), do: apply(base, :__base_iri__, [])
defp base_string({:sigil_I, _, [{_, _, [base]}, _]}), do: base
defp base_string(_) do
raise Error,
message: "invalid @base expression; only literal values are allowed as @base value"
end
defp resolve_relative_iris(ast, base) do
Macro.prewalk(ast, fn
{:sigil_I, meta_outer, [{:<<>>, meta_inner, [iri]}, list]} = sigil ->
if IRI.absolute?(iri) do
sigil
else
absolute = iri |> IRI.absolute(base) |> IRI.to_string()
{:sigil_I, meta_outer, [{:<<>>, meta_inner, [absolute]}, list]}
end
other ->
other
end)
end
defp extract_base(declarations) do
{base, declarations} =
Enum.reduce(declarations, {nil, []}, fn
{:@, line, [{:base, _, [{:__aliases__, _, ns}] = aliases}]}, {_, declarations} ->
{Module.concat(ns), [{:alias, line, aliases} | declarations]}
{:@, _, [{:base, _, [base]}]}, {_, declarations} ->
{base, declarations}
declaration, {base, declarations} ->
{base, [declaration | declarations]}
end)
{base, Enum.reverse(declarations)}
end
defp extract_prefixes(declarations) do
{prefixes, declarations} =
Enum.reduce(declarations, {[], []}, fn
{:@, line, [{:prefix, _, [[{prefix, {:__aliases__, _, ns} = aliases}]]}]},
{prefixes, declarations} ->
{[prefix(prefix, ns) | prefixes], [{:alias, line, [aliases]} | declarations]}
{:@, line, [{:prefix, _, [{:__aliases__, _, ns}] = aliases}]}, {prefixes, declarations} ->
{[prefix(ns) | prefixes], [{:alias, line, aliases} | declarations]}
declaration, {prefixes, declarations} ->
{prefixes, [declaration | declarations]}
end)
{prefixes, Enum.reverse(declarations)}
end
defp prefix(namespace) do
namespace
|> Enum.reverse()
|> hd()
|> to_string()
|> Macro.underscore()
|> String.to_atom()
|> prefix(namespace)
end
defp prefix(prefix, namespace), do: {prefix, Module.concat(namespace)}
defp declaration?({:=, _, _}), do: true
defp declaration?({:@, _, [{:prefix, _, _}]}), do: true
defp declaration?({:@, _, [{:base, _, _}]}), do: true
defp declaration?({:alias, _, _}), do: true
defp declaration?({:import, _, _}), do: true
defp declaration?({:require, _, _}), do: true
defp declaration?({:use, _, _}), do: true
defp declaration?(_), do: false
defp rdf?(nil), do: false
defp rdf?(:ok), do: false
defp rdf?(%Description{}), do: true
defp rdf?(%Graph{}), do: true
defp rdf?(%Dataset{}), do: true
defp rdf?(statements) when is_map(statements), do: true
defp rdf?(statements) when is_tuple(statements), do: true
defp rdf?(list) when is_list(list), do: true
defp rdf?(invalid) do
raise Error, message: "invalid RDF data: #{inspect(invalid)}"
end
end
|
lib/rdf/graph_builder.ex
| 0.524882 | 0.467271 |
graph_builder.ex
|
starcoder
|
defmodule EEx.Engine do
@moduledoc ~S"""
Basic EEx engine that ships with Elixir.
An engine needs to implement six functions:
* `init(opts)` - called at the beginning of every text
and it must return the initial state.
* `handle_body(state)` - receives the state of the document
and it must return a quoted expression.
* `handle_text(state, text)` - it receives the state,
the text and must return a new quoted expression.
* `handle_expr(state, marker, expr)` - it receives the state,
the marker, the expr and must return a new state.
* `handle_begin(state)` - called every time there a new state
is needed with an empty buffer. Typically called for do/end
blocks, case expressions, anonymous functions, etc
* `handle_end(state)` - opposite of `handle_begin(state)` and
it must return quoted expression
The marker is what follows exactly after `<%`. For example,
`<% foo %>` has an empty marker, but `<%= foo %>` has `"="`
as marker. The allowed markers so far are:
* `""`
* `"="`
* `"/"`
* `"|"`
Markers `"/"` and `"|"` are only for use in custom EEx engines
and are not implemented by default. Using them without the
implementation raises `EEx.SyntaxError`.
If your engine does not implement all markers, please ensure that
`handle_expr/3` falls back to `EEx.Engine.handle_expr/3`
to raise the proper error message.
Read `handle_expr/3` below for more information about the markers
implemented by default by this engine.
`EEx.Engine` can be used directly if one desires to use the
default implementations for the functions above.
"""
@type state :: term
@callback init(opts :: keyword) :: state
@callback handle_body(state) :: Macro.t()
@callback handle_text(state, text :: String.t()) :: state
@callback handle_expr(state, marker :: String.t(), expr :: Macro.t()) :: state
@callback handle_begin(state) :: state
@callback handle_end(state) :: Macro.t()
@doc false
defmacro __using__(_) do
quote do
@behaviour EEx.Engine
def init(opts) do
EEx.Engine.init(opts)
end
def handle_body(quoted) do
EEx.Engine.handle_body(quoted)
end
def handle_begin(quoted) do
EEx.Engine.handle_begin(quoted)
end
def handle_end(quoted) do
EEx.Engine.handle_end(quoted)
end
def handle_text(buffer, text) do
EEx.Engine.handle_text(buffer, text)
end
def handle_expr(buffer, marker, expr) do
EEx.Engine.handle_expr(buffer, marker, expr)
end
defoverridable EEx.Engine
end
end
@doc """
Handles assigns in quoted expressions.
A warning will be printed on missing assigns.
Future versions will raise.
This can be added to any custom engine by invoking
`handle_assign/1` with `Macro.prewalk/2`:
def handle_expr(buffer, token, expr) do
expr = Macro.prewalk(expr, &EEx.Engine.handle_assign/1)
EEx.Engine.handle_expr(buffer, token, expr)
end
"""
@spec handle_assign(Macro.t()) :: Macro.t()
def handle_assign({:@, meta, [{name, _, atom}]}) when is_atom(name) and is_atom(atom) do
line = meta[:line] || 0
quote(line: line, do: EEx.Engine.fetch_assign!(var!(assigns), unquote(name)))
end
def handle_assign(arg) do
arg
end
@doc false
# TODO: Raise on 2.0
@spec fetch_assign!(Access.t(), Access.key()) :: term | nil
def fetch_assign!(assigns, key) do
case Access.fetch(assigns, key) do
{:ok, val} ->
val
:error ->
keys = Enum.map(assigns, &elem(&1, 0))
IO.warn(
"assign @#{key} not available in EEx template. " <>
"Please ensure all assigns are given as options. " <>
"Available assigns: #{inspect(keys)}"
)
nil
end
end
@doc """
Returns an empty string as initial buffer.
"""
def init(_opts) do
""
end
@doc """
Returns an empty string as the new buffer.
"""
def handle_begin(_previous) do
""
end
@doc """
End of the new buffer.
"""
def handle_end(quoted) do
quoted
end
@doc """
The default implementation simply returns the given expression.
"""
def handle_body(quoted) do
quoted
end
@doc """
The default implementation simply concatenates text to the buffer.
"""
def handle_text(buffer, text) do
quote(do: unquote(buffer) <> unquote(text))
end
@doc """
Implements expressions according to the markers.
<% Elixir expression - inline with output %>
<%= Elixir expression - replace with result %>
<%/ Elixir expression - raise EEx.SyntaxError, to be implemented by custom engines %>
<%| Elixir expression - raise EEx.SyntaxError, to be implemented by custom engines %>
All other markers are not implemented by this engine.
"""
def handle_expr(buffer, "=", expr) do
quote do
tmp1 = unquote(buffer)
tmp1 <> String.Chars.to_string(unquote(expr))
end
end
def handle_expr(buffer, "", expr) do
quote do
tmp2 = unquote(buffer)
unquote(expr)
tmp2
end
end
def handle_expr(_buffer, marker, _expr) when marker in ["/", "|"] do
raise EEx.SyntaxError,
"unsupported EEx syntax <%#{marker} %> (the syntax is valid but not supported by the current EEx engine)"
end
end
|
lib/eex/lib/eex/engine.ex
| 0.745861 | 0.662733 |
engine.ex
|
starcoder
|
defmodule Supervisor do
@moduledoc """
A behaviour module for implementing supervision functionality.
A supervisor is a process which supervises other processes called
child processes. Supervisors are used to build a hierarchical process
structure called a supervision tree, a nice way to structure fault-tolerant
applications.
A supervisor implemented using this module will have a standard set
of interface functions and include functionality for tracing and error
reporting. It will also fit into a supervision tree.
## Example
In order to define a supervisor, we need to first define a child process
that is going to be supervised. In order to do so, we will define a GenServer
that represents a stack:
defmodule Stack do
use GenServer
def start_link(state, opts \\ []) do
GenServer.start_link(__MODULE__, state, opts)
end
def handle_call(:pop, _from, [h|t]) do
{:reply, h, t}
end
def handle_cast({:push, h}, t) do
{:noreply, [h|t]}
end
end
We can now define our supervisor and start it as follows:
# Import helpers for defining supervisors
import Supervisor.Spec
# We are going to supervise the Stack server which
# will be started with a single argument [:hello]
# and the default name of :sup_stack.
children = [
worker(Stack, [[:hello], [name: :sup_stack]])
]
# Start the supervisor with our one child
{:ok, pid} = Supervisor.start_link(children, strategy: :one_for_one)
Notice that when starting the GenServer, we are registering it
with name `:sup_stack`, which allows us to call it directly and
get what is on the stack:
GenServer.call(:sup_stack, :pop)
#=> :hello
GenServer.cast(:sup_stack, {:push, :world})
#=> :ok
GenServer.call(:sup_stack, :pop)
#=> :world
However, there is a bug in our stack server. If we call `:pop` and
the stack is empty, it is going to crash because no clause matches.
Let's try it:
GenServer.call(:sup_stack, :pop)
** (exit) exited in: GenServer.call(:sup_stack, :pop, 5000)
Luckily, since the server is being supervised by a supervisor, the
supervisor will automatically start a new one, with the default stack
of `[:hello]` like before:
GenServer.call(:sup_stack, :pop) == :hello
Supervisors support different strategies; in the example above, we
have chosen `:one_for_one`. Furthermore, each supervisor can have many
workers and supervisors as children, each of them with their specific
configuration, shutdown values, and restart strategies.
Continue reading this moduledoc to learn more about supervision strategies
and then follow to the `Supervisor.Spec` module documentation to learn
about the specification for workers and supervisors.
## Module-based supervisors
In the example above, a supervisor was dynamically created by passing
the supervision structure to `start_link/2`. However, supervisors
can also be created by explicitly defining a supervision module:
defmodule MyApp.Supervisor do
use Supervisor
def start_link do
Supervisor.start_link(__MODULE__, [])
end
def init([]) do
children = [
worker(Stack, [[:hello]])
]
supervise(children, strategy: :one_for_one)
end
end
You may want to use a module-based supervisor if:
* You need to do some particular action on supervisor
initialization, like setting up an ETS table.
* You want to perform partial hot-code swapping of the
tree. For example, if you add or remove children,
the module-based supervision will add and remove the
new children directly, while the dynamic supervision
requires the whole tree to be restarted in order to
perform such swaps.
## Strategies
* `:one_for_one` - if a child process terminates, only that
process is restarted.
* `:one_for_all` - if a child process terminates, all other child
processes are terminated and then all child processes (including
the terminated one) are restarted.
* `:rest_for_one` - if a child process terminates, the "rest" of
the child processes, i.e. the child processes after the terminated
one in start order, are terminated. Then the terminated child
process and the rest of the child processes are restarted.
* `:simple_one_for_one` - similar to `:one_for_one` but suits better
when dynamically attaching children. This strategy requires the
supervisor specification to contain only one child. Many functions
in this module behave slightly differently when this strategy is
used.
## Simple one for one
The simple one for one supervisor is useful when you want to dynamically
start and stop supervisor children. For example, imagine you want to
dynamically create multiple stacks. We can do so by defining a simple one
for one supervisor:
# Import helpers for defining supervisors
import Supervisor.Spec
# This time, we don't pass any argument because
# the argument will be given when we start the child
children = [
worker(Stack, [], restart: :transient)
]
# Start the supervisor with our one child
{:ok, sup_pid} = Supervisor.start_link(children, strategy: :simple_one_for_one)
There are a couple differences here:
* The simple one for one specification can define only one child which
works as a template for when we call `start_child/2`
* We have defined the child to have restart strategy of transient. This
means that, if the child process exits due to a `:normal`, `:shutdown`
or `{:shutdown, term}` reason, it won't be restarted. This is useful
as it allows our workers to politely shutdown and be removed from the
simple one for one supervisor, without being restarted. You can find
more information about restart strategies on `Supervisor.Spec`
With the supervisor defined, let's dynamically start stacks:
{:ok, pid} = Supervisor.start_child(sup_pid, [[:hello, :world], []])
GenServer.call(pid, :pop) #=> :hello
GenServer.call(pid, :pop) #=> :world
{:ok, pid} = Supervisor.start_child(sup_pid, [[:something, :else], []])
GenServer.call(pid, :pop) #=> :something
GenServer.call(pid, :pop) #=> :else
Supervisor.count_children(sup_pid)
#=> %{active: 2, specs: 1, supervisors: 0, workers: 2}
## Exit reasons
From the example above, you may have noticed that the transient restart
strategy for the worker does not restart the child in case it crashes with
reason `:normal`, `:shutdown` or `{:shutdown, term}`.
So one may ask: which exit reason should I choose when existing my worker?
There are three options:
* `:normal` - on such cases, the exit won't be logged, there is no restart
on transient mode and linked processes do not exit
* `:shutdown` or `{:shutdown, term}` - on such cases, the exit won't be
logged, there is no restart on transient mode and linked processes exit
with the same reason unless trapping exits
* any other term - on such cases, the exit will be logged, there are
restarts on transient mode and linked processes exit with the same reason
unless trapping exits
## Name Registration
A supervisor is bound to the same name registration rules as a `GenServer`.
Read more about it in the `GenServer` docs.
"""
@doc false
defmacro __using__(_) do
quote location: :keep do
@behaviour :supervisor
import Supervisor.Spec
end
end
@typedoc "Return values of `start_link` functions"
@type on_start :: {:ok, pid} | :ignore |
{:error, {:already_started, pid} | {:shutdown, term} | term}
@typedoc "Return values of `start_child` functions"
@type on_start_child :: {:ok, child} | {:ok, child, info :: term} |
{:error, {:already_started, child} | :already_present | term}
@type child :: pid | :undefined
@typedoc "The Supervisor name"
@type name :: atom | {:global, term} | {:via, module, term}
@typedoc "Options used by the `start*` functions"
@type options :: [name: name,
strategy: Supervisor.Spec.strategy,
max_restarts: non_neg_integer,
max_seconds: non_neg_integer]
@typedoc "The supervisor reference"
@type supervisor :: pid | name | {atom, node}
@doc """
Starts a supervisor with the given children.
A strategy is required to be given as an option. Furthermore,
the `:max_restarts` and `:max_seconds` value can be configured
as described in `Supervisor.Spec.supervise/2` docs.
The options can also be used to register a supervisor name.
The supported values are described under the `Name Registration`
section in the `GenServer` module docs.
If the supervisor and its child processes are successfully created
(i.e. if the start function of all child processes returns `{:ok, child}`,
`{:ok, child, info}`, or `:ignore`) the function returns
`{:ok, pid}`, where `pid` is the pid of the supervisor. If there
already exists a process with the specified name, the function returns
`{:error, {:already_started, pid}}`, where pid is the pid of that
process.
If any of the child process start functions fail or return an error tuple or
an erroneous value, the supervisor will first terminate all already
started child processes with reason `:shutdown` and then terminate
itself and return `{:error, {:shutdown, reason}}`.
Note that the `Supervisor` is linked to the parent process
and will exit not only on crashes but also if the parent process
exits with `:normal` reason.
"""
@spec start_link([Supervisor.Spec.spec], options) :: on_start
def start_link(children, options) when is_list(children) do
spec = Supervisor.Spec.supervise(children, options)
start_link(Supervisor.Default, spec, options)
end
@doc """
Starts a supervisor module with the given `arg`.
To start the supervisor, the `init/1` callback will be invoked
in the given module. The `init/1` callback must return a
supervision specification which can be created with the help
of `Supervisor.Spec` module.
If the `init/1` callback returns `:ignore`, this function returns
`:ignore` as well and the supervisor terminates with reason `:normal`.
If it fails or returns an incorrect value, this function returns
`{:error, term}` where `term` is a term with information about the
error, and the supervisor terminates with reason `term`.
The `:name` option can also be given in order to register a supervisor
name, the supported values are described under the `Name Registration`
section in the `GenServer` module docs.
Other failure conditions are specified in `start_link/2` docs.
"""
@spec start_link(module, term) :: on_start
@spec start_link(module, term, options) :: on_start
def start_link(module, arg, options \\ []) when is_list(options) do
case Keyword.get(options, :name) do
nil ->
:supervisor.start_link(module, arg)
atom when is_atom(atom) ->
:supervisor.start_link({:local, atom}, module, arg)
other when is_tuple(other) ->
:supervisor.start_link(other, module, arg)
end
end
@doc """
Dynamically adds and starts a child specification to the supervisor.
`child_spec` should be a valid child specification (unless the supervisor
is a `:simple_one_for_one` supervisor, see below). The child process will
be started as defined in the child specification.
In the case of `:simple_one_for_one`, the child specification defined in
the supervisor will be used and instead of a `child_spec`, an arbitrary list
of terms is expected. The child process will then be started by appending
the given list to the existing function arguments in the child specification.
If a child specification with the specified id already exists,
`child_spec` is discarded and the function returns an error with `:already_started`
or `:already_present` if the corresponding child process is running or not.
If the child process starts, function returns `{:ok, child}` or `{:ok, child, info}`,
the child specification and pid is added to the supervisor and the function returns
the same value.
If the child process starts, function returns `:ignore`, the child specification is
added to the supervisor, the pid is set to undefined and the function returns
`{:ok, :undefined}`.
If the child process starts, function returns an error tuple or an erroneous value,
or if it fails, the child specification is discarded and the function returns
`{:error, error}` where `error` is a term containing information about the error
and child specification.
"""
@spec start_child(supervisor, Supervisor.Spec.spec | [term]) :: on_start_child
def start_child(supervisor, child_spec_or_args) do
call(supervisor, {:start_child, child_spec_or_args})
end
@doc """
Terminates the given pid or child id.
If the supervisor is not a `simple_one_for_one`, the child id is expected
and the process, if there is one, is terminated; the child specification is
kept unless the child is temporary.
In case of a `simple_one_for_one` supervisor, a pid is expected. If the child
specification identifier is given instead of a `pid`, the function will
return `{:error, :simple_one_for_one}`.
A non-temporary child process may later be restarted by the supervisor. The child
process can also be restarted explicitly by calling `restart_child/2`. Use
`delete_child/2` to remove the child specification.
If successful, the function returns `:ok`. If there is no child specification or
pid, the function returns `{:error, :not_found}`.
"""
@spec terminate_child(supervisor, pid | Supervisor.Spec.child_id) :: :ok | {:error, error}
when error: :not_found | :simple_one_for_one
def terminate_child(supervisor, pid_or_child_id) do
call(supervisor, {:terminate_child, pid_or_child_id})
end
@doc """
Deletes the child specification identified by `child_id`.
The corresponding child process must not be running, use `terminate_child/2`
to terminate it.
If successful, the function returns `:ok`. This function may error with an
appropriate error tuple if the `child_id` is not found, or if the current
process is running or being restarted.
This operation is not supported by `simple_one_for_one` supervisors.
"""
@spec delete_child(supervisor, Supervisor.Spec.child_id) :: :ok | {:error, error}
when error: :not_found | :simple_one_for_one | :running | :restarting
def delete_child(supervisor, child_id) do
call(supervisor, {:delete_child, child_id})
end
@doc """
Restarts a child process identified by `child_id`.
The child specification must exist and the corresponding child process must not
be running.
Note that for temporary children, the child specification is automatically deleted
when the child terminates, and thus it is not possible to restart such children.
If the child process start function returns `{:ok, child}` or
`{:ok, child, info}`, the pid is added to the supervisor and the function returns
the same value.
If the child process start function returns `:ignore`, the pid remains set to
`:undefined` and the function returns `{:ok, :undefined}`.
This function may error with an appropriate error tuple if the `child_id` is not
found, or if the current process is running or being restarted.
If the child process start function returns an error tuple or an erroneous value,
or if it fails, the function returns `{:error, error}`.
This operation is not supported by `simple_one_for_one` supervisors.
"""
@spec restart_child(supervisor, Supervisor.Spec.child_id) ::
{:ok, child} | {:ok, child, term} | {:error, error}
when error: :not_found | :simple_one_for_one | :running | :restarting | term
def restart_child(supervisor, child_id) do
call(supervisor, {:restart_child, child_id})
end
@doc """
Returns a list with information about all children.
Note that calling this function when supervising a large number of children
under low memory conditions can cause an out of memory exception.
This function returns a list of tuples containing:
* `id` - as defined in the child specification or `:undefined` in the case
of a `simple_one_for_one` supervisor
* `child` - the pid of the corresponding child process, the atom
`:restarting` if the process is about to be restarted, or `:undefined` if
there is no such process
* `type` - `:worker` or `:supervisor` as defined in the child specification
* `modules` - as defined in the child specification
"""
@spec which_children(supervisor) ::
[{Supervisor.Spec.child_id | :undefined,
child | :restarting,
Supervisor.Spec.worker,
Supervisor.Spec.modules}]
def which_children(supervisor) do
call(supervisor, :which_children)
end
@doc """
Returns a map containing count values for the supervisor.
The map contains the following keys:
* `:specs` - the total count of children, dead or alive
* `:active` - the count of all actively running child processes managed by
this supervisor
* `:supervisors` - the count of all supervisors whether or not the child
process is still alive
* `:workers` - the count of all workers, whether or not the child process
is still alive
"""
@spec count_children(supervisor) ::
%{specs: non_neg_integer, active: non_neg_integer,
supervisors: non_neg_integer, workers: non_neg_integer}
def count_children(supervisor) do
call(supervisor, :count_children) |> :maps.from_list
end
@compile {:inline, call: 2}
defp call(supervisor, req) do
GenServer.call(supervisor, req, :infinity)
end
end
|
lib/elixir/lib/supervisor.ex
| 0.790611 | 0.684593 |
supervisor.ex
|
starcoder
|
defmodule Kuddle.Config.Types do
@moduledoc """
Annotation cast types.
User specific types can be registered by setting the kuddle_config types:
## Example
config :kuddle_config,
types: [
typename: {Module, cast_function_name},
geopoint: {MyGeoPoint, :cast},
]
The cast function must return {:ok, any()} or :error if it cannot cast the given value.
Kuddle Config has some default types they can be overwritten by setting the default_types:
config :kuddle_config,
default_types: [
date: {Date, :from_iso8601},
utc_datetime: {Kuddle.Config.Types.DateTime, :cast},
naive_datetime: {NaiveDateTime, :from_iso8601},
time: {Time, :from_iso8601},
decimal: {Kuddle.Config.Types.Decimal, :cast},
atom: {Kuddle.Config.Types.Atom, :cast},
boolean: {Kuddle.Config.Types.Boolean, :cast},
tuple: {Kuddle.Config.Types.Tuple, :cast},
list: {Kuddle.Config.Types.List, :cast},
]
The purpose of the default_types is to provide some sane default which doesn't require any
additional configuration from you, the user.
However they can be disabled by setting the default_types config.
"""
@default_types [
date: {Date, :from_iso8601},
utc_datetime: {Kuddle.Config.Types.DateTime, :cast},
naive_datetime: {NaiveDateTime, :from_iso8601},
time: {Time, :from_iso8601},
decimal: {Kuddle.Config.Types.Decimal, :cast},
atom: {Kuddle.Config.Types.Atom, :cast},
boolean: {Kuddle.Config.Types.Boolean, :cast},
tuple: {Kuddle.Config.Types.Tuple, :cast},
list: {Kuddle.Config.Types.List, :cast},
]
default_types = Application.get_env(:kuddle_config, :default_types, @default_types)
user_types = Application.get_env(:kuddle_config, :types, [])
all_types = Keyword.merge(default_types, user_types)
@doc """
Cast given value to a different type, normally the input will a string.
"""
@spec cast(atom() | String.t(), any()) :: {:ok, any()} | :error
def cast(type, value) do
case internal_cast(type, value) do
{:ok, value} ->
{:ok, value}
{:error, _reason} ->
:error
:error ->
:error
end
end
@spec internal_cast(atom() | String.t(), any()) :: {:ok, any()} | {:error, term} | :error
def internal_cast(type, value)
for {type, {module, func_name}} <- all_types do
str = Atom.to_string(type)
def internal_cast(unquote(str), value) do
internal_cast(unquote(type), value)
end
def internal_cast(unquote(type), value) do
unquote(module).unquote(func_name)(value)
end
end
def internal_cast(_type, _) do
:error
end
end
|
lib/kuddle/config/types.ex
| 0.883707 | 0.413566 |
types.ex
|
starcoder
|
defprotocol Ecto.DataType do
@moduledoc """
Casts and dumps a given struct into an Ecto type.
While `Ecto.Type` allows developers to cast/load/dump
any value from the storage into the struct based on the
schema, `Ecto.DataType` allows developers to convert
existing data types into primitive Ecto types without
the schema information.
For example, Elixir's native `Date` struct implements
the Ecto.DataType protocol so it is properly converted
to a tuple when directly passed to adapters:
defimpl Ecto.DataType, for: Date do
def dump(%Date{day: day, month: month, year: year}) do
{:ok, {year, month, day}}
end
end
"""
@fallback_to_any true
@doc """
Invoked when the data structure has not been dumped along
the way and must fallback to its database representation.
"""
@spec dump(term) :: {:ok, term} | :error
def dump(value)
end
defimpl Ecto.DataType, for: Any do
# The default representation is itself, which
# means we are delegating to the database. If
# the database does not support, it will raise.
def dump(value) do
{:ok, value}
end
end
defimpl Ecto.DataType, for: List do
def dump(list), do: dump(list, [])
defp dump([h|t], acc) do
case Ecto.DataType.dump(h) do
{:ok, h} -> dump(t, [h|acc])
:error -> :error
end
end
defp dump([], acc) do
{:ok, Enum.reverse(acc)}
end
end
defimpl Ecto.DataType, for: NaiveDateTime do
def dump(%NaiveDateTime{year: year, month: month, day: day,
hour: hour, minute: minute, second: second, microsecond: {usec, _}}) do
{:ok, {{year, month, day}, {hour, minute, second, usec}}}
end
end
defimpl Ecto.DataType, for: DateTime do
def dump(%DateTime{year: year, month: month, day: day, time_zone: "Etc/UTC",
hour: hour, minute: minute, second: second, microsecond: {usec, _}}) do
{:ok, {{year, month, day}, {hour, minute, second, usec}}}
end
end
defimpl Ecto.DataType, for: Date do
def dump(%Date{year: year, month: month, day: day}) do
{:ok, {year, month, day}}
end
end
defimpl Ecto.DataType, for: Time do
def dump(%Time{hour: hour, minute: minute, second: second, microsecond: {usec, _}}) do
{:ok, {hour, minute, second, usec}}
end
end
|
deps/ecto/lib/ecto/data_type.ex
| 0.873754 | 0.615723 |
data_type.ex
|
starcoder
|
defmodule Releases.Plugin.LinkConfig do
@moduledoc """
Distillery plugin to link the `vm.args` or `sys.config` file on deploy hosts.
Because distillery uses `:systools_make.make_tar(...)` to create the release
tar which resoves all links using the `:dereference` option, the release
tar needs to be repackaged including the links. To be able use this plugin,
it must be added in the `rel/config.exs` distillery config as plugin like this:
```
environment :prod do
..
plugin Releases.Plugin.LinkConfig
end
```
"""
use Distillery.Releases.Plugin
def before_assembly(_, _), do: nil
def after_assembly(_, _), do: nil
def before_package(_, _), do: nil
def after_package(%Release{version: version, profile: profile, name: name}, _) do
# repackage release tar including link, because tar is generated using `:systools_make.make_tar(...)`
# which resoves the links using the `:dereference` option when creating the tar using the
# `:erl_tar` module.
output_dir = profile.output_dir
tmp_dir = "_edeliver_release_patch"
tmp_path = Path.join [output_dir, "releases", version, tmp_dir]
files_to_link = [
{System.get_env("LINK_VM_ARGS"), Path.join([tmp_path, "releases", version, "vm.args"])},
{System.get_env("LINK_SYS_CONFIG"), Path.join([tmp_path, "releases", version, "sys.config"])},
] |> Enum.filter(fn {source, _dest} ->
case source do
<<_,_::binary>> -> true
_ -> false
end
end)
if Enum.count(files_to_link) > 0 do
info "Repackaging release with links to config files"
try do
tar_file = Path.join [output_dir, "releases", version, "#{name}.tar.gz"]
true = File.exists? tar_file
:ok = File.mkdir_p tmp_path
ln_binary = <<_,_::binary>> = System.find_executable "ln"
debug "Extracting release tar to #{tmp_dir}"
:ok = :erl_tar.extract(tar_file, [{:cwd, to_charlist(tmp_path)}, :compressed])
directories_to_include = for dir <- File.ls!(tmp_path), do: {to_charlist(dir), to_charlist(Path.join(tmp_path, dir))}
for {source, destination} <- files_to_link do
debug "Linking #{source} to #{destination}"
{_, 0} = System.cmd ln_binary, ["-sf", source, destination], stderr_to_stdout: true
end
debug "Recreating release tar including links"
:ok = :erl_tar.create(tar_file, directories_to_include, [:compressed])
after
tmp_path_exists? = File.exists?(tmp_path) && File.dir?(tmp_path)
tmp_path_empty? = tmp_path_exists? && File.ls!(tmp_path) == []
tmp_path_contains_rel? = File.exists?(Path.join(tmp_path, "lib")) || File.exists?(Path.join(tmp_path, "releases"))
if tmp_path_exists? && (tmp_path_empty? || tmp_path_contains_rel?) do
debug "Removing tmp dir used for repackaging tar: #{tmp_path}"
File.rm_rf!(tmp_path)
end
end
end
nil
end
def after_package(_, _), do: nil
def after_cleanup(_, _), do: nil
end
|
lib/distillery/plugins/link_config.ex
| 0.569134 | 0.662972 |
link_config.ex
|
starcoder
|
defmodule Scenic.Component.Input.Toggle do
@moduledoc """
Add toggle to a Scenic graph.
## Data
`on?`
* `on?` - `true` if the toggle is on, pass `false` if not.
## Styles
Toggles honor the following styles. The `:light` and `:dark` styles look nice. The other bundled themes...not so much. You can also [supply your own theme](Scenic.Toggle.Components.html#toggle/3-theme).
* `:hidden` - If `false` the toggle is rendered. If true, it is skipped. The default
is `false`.
* `:theme` - The color set used to draw. See below. The default is `:dark`
## Additional Styles
Toggles also honor the following additional styles.
* `:border_width` - the border width. Defaults to `2`.
* `:padding` - the space between the border and the thumb. Defaults to `2`
* `:thumb_radius` - the radius of the thumb. This determines the size of the entire toggle. Defaults to `10`.
## Theme
To pass in a custom theme, supply a map with at least the following entries:
* `:border` - the color of the border around the toggle
* `:background` - the color of the track when the toggle is `off`.
* `:text` - the color of the thumb.
* `:thumb` - the color of the track when the toggle is `on`.
Optionally, you can supply the following entries:
* `:thumb_pressed` - the color of the thumb when pressed. Defaults to `:gainsboro`.
## Usage
You should add/modify components via the helper functions in
[`Scenic.Components`](Scenic.Components.html#toggle/3)
## Examples
The following example creates a toggle.
graph
|> toggle(true, translate: {20, 20})
The next example makes a larger toggle.
graph
|> toggle(true, translate: {20, 20}, thumb_radius: 14)
"""
use Scenic.Component, has_children: false
alias Scenic.Graph
alias Scenic.Primitive
alias Scenic.Primitive.Group
alias Scenic.Primitive.Style.Theme
alias Scenic.ViewPort
import Scenic.Primitives
@default_thumb_pressed_color :gainsboro
@default_thumb_radius 8
@default_padding 2
@default_border_width 2
defmodule State do
@moduledoc false
defstruct graph: nil,
contained?: false,
id: nil,
on?: false,
pressed?: false,
theme: nil,
thumb_translate: nil,
color: nil
@type t :: %__MODULE__{
graph: Graph.t(),
contained?: boolean,
id: atom,
on?: boolean,
pressed?: boolean,
theme: map,
thumb_translate: %{on: {number, number}, off: {number, number}},
color: %{
thumb: %{default: term, active: term},
border: term,
track: %{off: term, on: term}
}
}
end
# --------------------------------------------------------
@doc false
def info(data) do
"""
#{IO.ANSI.red()}Toggle data must be: on?
#{IO.ANSI.yellow()}Received: #{inspect(data)}
#{IO.ANSI.default_color()}
"""
end
# --------------------------------------------------------
@doc false
@spec verify(any) :: {:ok, boolean} | :invalid_data
def verify(on? = data) when is_boolean(on?) do
{:ok, data}
end
def verify(_), do: :invalid_data
# --------------------------------------------------------
@doc false
@spec init(any, Keyword.t() | map | nil) :: {:ok, State.t()}
def init(on?, opts) do
id = opts[:id]
styles = opts[:styles]
# theme is passed in as an inherited style
theme =
(styles[:theme] || Theme.preset(:primary))
|> Theme.normalize()
# get toggle specific styles
thumb_radius = Map.get(styles, :thumb_radius, @default_thumb_radius)
padding = Map.get(styles, :padding, @default_padding)
border_width = Map.get(styles, :border_width, @default_border_width)
# calculate the dimensions of the track
track_height = thumb_radius * 2 + 2 * padding + 2 * border_width
track_width = thumb_radius * 4 + 2 * padding + 2 * border_width
track_border_radius = thumb_radius + padding + border_width
color = %{
thumb: %{
default: theme.text,
pressed: Map.get(theme, :thumb_pressed, @default_thumb_pressed_color)
},
border: theme.border,
track: %{
off: theme.background,
on: theme.thumb
}
}
thumb_translate = %{
off: {thumb_radius + padding + border_width, thumb_radius + padding + border_width},
on: {thumb_radius * 3 + padding + border_width, thumb_radius + padding + border_width}
}
{initial_track_fill, initial_thumb_translate} =
case on? do
true -> {color.track.on, thumb_translate.on}
false -> {color.track.off, thumb_translate.off}
end
graph =
Graph.build()
|> Group.add_to_graph(
fn graph ->
graph
|> rrect({track_width, track_height, track_border_radius},
fill: initial_track_fill,
stroke: {border_width, theme.border},
id: :track
)
|> circle(thumb_radius,
fill: color.thumb.default,
id: :thumb,
translate: initial_thumb_translate
)
end,
translate: {border_width, -(thumb_radius + padding + border_width)}
)
# |> text(text, fill: theme.text, translate: {20, 0})
state = %State{
contained?: false,
id: id,
graph: graph,
on?: on?,
pressed?: false,
theme: theme,
thumb_translate: thumb_translate,
color: color
}
{:ok, state, push: graph}
end
# --------------------------------------------------------
@doc false
def handle_input({:cursor_enter, _uid}, _, %{pressed?: true} = state) do
state = Map.put(state, :contained?, true)
graph = update_graph(state)
{:noreply, %{state | graph: graph}, push: graph}
end
# --------------------------------------------------------
def handle_input({:cursor_exit, _uid}, _, %{pressed?: true} = state) do
state = Map.put(state, :contained?, false)
graph = update_graph(state)
{:noreply, %{state | graph: graph}, push: graph}
end
# --------------------------------------------------------
def handle_input({:cursor_button, {:left, :press, _, _}}, context, state) do
state =
state
|> Map.put(:pressed?, true)
|> Map.put(:contained?, true)
graph = update_graph(state)
ViewPort.capture_input(context, [:cursor_button, :cursor_pos])
{:noreply, %{state | graph: graph}, push: graph}
end
# --------------------------------------------------------
def handle_input(
{:cursor_button, {:left, :release, _, _}},
context,
%{contained?: contained?, id: id, on?: on?, pressed?: pressed?} = state
) do
state = Map.put(state, :pressed?, false)
ViewPort.release_input(context, [:cursor_button, :cursor_pos])
# only do the action if the cursor is still contained in the target
state =
case pressed? && contained? do
true ->
on? = !on?
send_event({:value_changed, id, on?})
Map.put(state, :on?, on?)
false ->
state
end
graph = update_graph(state)
{:noreply, %{state | graph: graph}, push: graph}
end
# --------------------------------------------------------
def handle_input(_event, _context, state) do
{:noreply, state}
end
@spec update_graph(State.t()) :: Graph.t()
defp update_graph(%{
color: color,
contained?: contained?,
graph: graph,
on?: on?,
pressed?: pressed?,
thumb_translate: thumb_translate
}) do
graph =
case pressed? && contained? do
true ->
Graph.modify(graph, :thumb, &Primitive.put_style(&1, :fill, color.thumb.pressed))
false ->
Graph.modify(graph, :thumb, &Primitive.put_style(&1, :fill, color.thumb.default))
end
case on? do
true ->
graph
|> Graph.modify(:track, &Primitive.put_style(&1, :fill, color.track.on))
|> Graph.modify(:thumb, &Primitive.put_transform(&1, :translate, thumb_translate.on))
false ->
graph
|> Graph.modify(:track, &Primitive.put_style(&1, :fill, color.track.off))
|> Graph.modify(:thumb, &Primitive.put_transform(&1, :translate, thumb_translate.off))
end
end
end
|
lib/scenic/component/input/toggle.ex
| 0.908171 | 0.681091 |
toggle.ex
|
starcoder
|
defmodule ExPersona.Client.Parser do
@moduledoc """
This module contains functions used in parsing the results of API calls.
In this context, "parsing" occurs after JSON responses have been decoded into a `Map`.
"""
alias ExPersona.Client.Result
@typedoc """
A single result from a parsed API call.
"""
@type parsed_single_result :: {:ok, struct()} | {:error, String.t()}
@typedoc """
A result from a parsed API call that is streamable (i.e., a pagninated list).
"""
@type parsed_list_result :: {:ok, struct(), Streamable.t()} | {:error, String.t()}
@typedoc """
Either a single or list result.
"""
@type parsed_result :: parsed_single_result | parsed_list_result
@type parser_func :: (Result.t() -> parsed_result)
@doc """
Provide a default parser in case an `ExPersona.Client.Operation` doesn't specify one.
This just returns the body of the response. This is useful for downloading files, for
instance, where there's no transformation that should be done on the result.
"""
@spec default_parse(Result.t()) :: parsed_result()
def default_parse(%Result{body: body}), do: {:ok, body}
@doc """
Turn a parser designed for a single record into one that can handle lists.
This is used in cases where the expected result of an `ExPersona.Client.Operation` is one
that can handle pagination, and we'd like to just specify a single parser. For instance,
the `ExPersona.Inquiry.list/0` function just creates this struct:
%Operation{path: "inquiries", parser: Parser.list_parser(&Inquiry.parse/1)}
where `Inquiry.parse/1` describes how to handle creating one `ExPersona.Inquiry` from a
`ExPersona.Client.Result`.
"""
@spec list_parser(parser_func()) :: (Result.t() -> parsed_list_result())
def list_parser(func) do
fn %Result{parsed: %{"data" => data}} = result ->
data
|> Enum.reduce_while([], fn datum, acc -> reduce_result(datum, acc, result, func) end)
|> case do
{:error, msg} ->
{:error, msg}
list ->
{:ok, list, Result.to_streamable(result)}
end
end
end
defp reduce_result(datum, acc, result, parser) do
%Result{result | parsed: %{"data" => datum}}
|> parser.()
|> case do
{:ok, resp} ->
{:cont, acc ++ [resp]}
{:error, msg} ->
{:halt, {:error, msg}}
end
end
end
|
lib/ex_persona/client/parser.ex
| 0.826922 | 0.548129 |
parser.ex
|
starcoder
|
defmodule Beeline.Honeycomb do
@moduledoc """
A Honeycomb.io exporter for Beeline telemetry
This exporter works by attaching a `:telemetry` handler with
`:telemetry.attach/4`. This attaches a function to handle events to each
`Beeline.HealthChecker` process. The work of creating and emitting the event
to Honeycomb is done in the HealthChecker process.
This module defines a module-based Task which can be started in a supervision
tree. For example, in your `MyApp.Application`'s `start/2` function, you
can add this module to the list of `children`:
```elixir
def start(_type, _args) do
children = [
{Beeline.Honeycomb, []},
{MyApp.MyBeelineTopology, []}
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
end
```
## Options
The `Opencensus.Honeycomb.Event` `:samplerate` key can be configured in the
keyword list passed to `start_link/1` or as the list in
`{Beeline.Honeycomb, []}`. `:samplerate` should be a positive integer
and is defaulted to `1`, meaning that all events are recorded. See
the `t:Opencensus.Honeycomb.Event.t/0` documentation for more information.
"""
use Task
@sender Application.get_env(
:beeline_honeycomb,
:honeycomb_sender,
Opencensus.Honeycomb.Sender
)
@doc false
def start_link(opts) do
Task.start_link(__MODULE__, :attach, [opts])
end
@doc false
def attach(opts) do
:telemetry.attach(
"beeline-honeycomb-exporter",
[:beeline, :health_check, :stop],
&__MODULE__.handle_event/4,
opts
)
end
@doc false
def handle_event(_event, measurement, metadata, state) do
previous_local_event_number =
case metadata[:prior_position] do
# coveralls-ignore-start
n when n >= 0 ->
n
_ ->
nil
# coveralls-ignore-stop
end
event = %Opencensus.Honeycomb.Event{
time: metadata[:measurement_time],
samplerate: state[:samplerate] || 1,
data: %{
event_listener: inspect(metadata[:producer]),
hostname: metadata[:hostname],
interval: metadata[:interval],
drift: metadata[:drift],
previous_local_event_number: previous_local_event_number,
local_event_number: metadata[:current_position],
latest_event_number: metadata[:head_position],
listener_is_alive: metadata[:alive?],
listener_has_moved:
metadata[:current_position] != metadata[:prior_position],
delta: metadata[:head_position] - metadata[:current_position],
durationMs:
System.convert_time_unit(
measurement.duration,
:native,
:microsecond
) / 1_000
}
}
@sender.send_batch([event])
state
end
end
|
lib/beeline/honeycomb.ex
| 0.892852 | 0.799011 |
honeycomb.ex
|
starcoder
|
defmodule Exq.Redis.Connection do
@moduledoc """
The Connection module encapsulates interaction with a live Redis connection or pool.
"""
require Logger
alias Exq.Support.Config
def flushdb!(redis) do
{:ok, res} = q(redis, ["flushdb"])
res
end
def decr!(redis, key) do
{:ok, count} = q(redis, ["DECR", key])
count
end
def incr!(redis, key) do
{:ok, count} = q(redis, ["INCR", key])
count
end
def get!(redis, key) do
{:ok, val} = q(redis, ["GET", key])
val
end
def set!(redis, key, val \\ 0) do
q(redis, ["SET", key, val])
end
def del!(redis, key) do
q(redis, ["DEL", key])
end
def expire!(redis, key, time \\ 10) do
q(redis, ["EXPIRE", key, time])
end
def llen!(redis, list) do
{:ok, len} = q(redis, ["LLEN", list])
len
end
def keys!(redis, search \\ "*") do
{:ok, keys} = q(redis, ["KEYS", search])
keys
end
def scan!(redis, cursor, search, count) do
{:ok, keys} = q(redis, ["SCAN", cursor, "MATCH", search, "COUNT", count])
keys
end
def scard!(redis, set) do
{:ok, count} = q(redis, ["SCARD", set])
count
end
def smembers!(redis, set) do
{:ok, members} = q(redis, ["SMEMBERS", set])
members
end
def sadd!(redis, set, member) do
{:ok, res} = q(redis, ["SADD", set, member])
res
end
def srem!(redis, set, member) do
{:ok, res} = q(redis, ["SREM", set, member])
res
end
def sismember!(redis, set, member) do
{:ok, res} = q(redis, ["SISMEMBER", set, member])
res
end
def lrange!(redis, list, range_start \\ "0", range_end \\ "-1") do
{:ok, items} = q(redis, ["LRANGE", list, range_start, range_end])
items
end
def lrem!(redis, list, value, count \\ 1) do
{:ok, res} = q(redis, ["LREM", list, count, value])
res
end
def rpush!(redis, key, value) do
{:ok, res} = q(redis, ["RPUSH", key, value])
res
end
def lpush!(redis, key, value) do
{:ok, res} = q(redis, ["LPUSH", key, value])
res
end
def lpop(redis, key) do
q(redis, ["LPOP", key])
end
def rpoplpush(redis, key, backup) do
q(redis, ["RPOPLPUSH", key, backup])
end
def zadd(redis, set, score, member) do
q(redis, ["ZADD", set, score, member])
end
def zadd!(redis, set, score, member) do
{:ok, res} = q(redis, ["ZADD", set, score, member])
res
end
def zcard!(redis, set) do
{:ok, count} = q(redis, ["ZCARD", set])
count
end
def zcount!(redis, set, min \\ "-inf", max \\ "+inf") do
{:ok, count} = q(redis, ["ZCOUNT", set, min, max])
count
end
def zrangebyscore!(redis, set, min \\ "0", max \\ "+inf") do
{:ok, items} = q(redis, ["ZRANGEBYSCORE", set, min, max])
items
end
def zrangebyscore(redis, set, min \\ "0", max \\ "+inf") do
q(redis, ["ZRANGEBYSCORE", set, min, max])
end
def zrangebyscorewithscore!(redis, set, min \\ "0", max \\ "+inf") do
{:ok, items} = q(redis, ["ZRANGEBYSCORE", set, min, max, "WITHSCORES"])
items
end
def zrange!(redis, set, range_start \\ "0", range_end \\ "-1") do
{:ok, items} = q(redis, ["ZRANGE", set, range_start, range_end])
items
end
def zrem!(redis, set, member) do
{:ok, res} = q(redis, ["ZREM", set, member])
res
end
def zrem(redis, set, member) do
q(redis, ["ZREM", set, member])
end
def q(redis, command) do
Redix.command(redis, command, timeout: Config.get(:redis_timeout))
end
def qp(redis, command) do
Redix.pipeline(redis, command, timeout: Config.get(:redis_timeout))
end
def qp!(redis, command) do
Redix.pipeline!(redis, command, timeout: Config.get(:redis_timeout))
end
end
|
lib/exq/redis/connection.ex
| 0.709824 | 0.614076 |
connection.ex
|
starcoder
|
defmodule Data.Constructor do
alias Data.Parser.KV
alias FE.Result
@doc """
Define and run a smart constructor on a Key-Value input, returning either
well-defined `structs` or descriptive errors. The motto: parse, don't validate!
Given a list of `Data.Parser.KV.field_spec/2`s, a `module`, and an input
`map` or `Keyword`, create and run a parser which will either parse
successfully and return an `{:ok, %__MODULE__{}}` struct, or fail and return
an `{:error, Error.t}` with details about the parsing failure.
## Examples
iex> defmodule SensorReading do
...> defstruct [:sensor_id, :microfrobs, :datetime]
...> def new(input) do
...> Data.Constructor.struct([
...> {:sensor_id, Data.Parser.BuiltIn.string()},
...> {:microfrobs, Data.Parser.BuiltIn.integer()},
...> {:datetime, Data.Parser.BuiltIn.datetime()}],
...> __MODULE__,
...> input)
...> end
...> end
...>
...> {:ok, reading} = SensorReading.new(sensor_id: "1234-1234-1234",
...> microfrobs: 23,
...> datetime: ~U[2018-12-20 12:00:00Z])
...>
...> reading.datetime
~U[2018-12-20 12:00:00Z]
...>
...> reading.microfrobs
23
...> reading.sensor_id
"1234-1234-1234"
...> {:error, e} = SensorReading.new(%{"sensor_id" => nil,
...> "microfrobs" => 23,
...> "datetime" => "2018-12-20 12:00:00Z"})
...> Error.reason(e)
:failed_to_parse_field
...> Error.details(e)
%{field: :sensor_id,
input: %{"datetime" => "2018-12-20 12:00:00Z",
"microfrobs" => 23,
"sensor_id" => nil}}
...> {:just, inner_error} = Error.caused_by(e)
...> Error.reason(inner_error)
:not_a_string
"""
@spec struct([KV.field_spec(any, any)], module(), KV.input()) :: Result.t(struct, Error.t())
def struct(field_specs, struct_module, input) do
field_specs
|> KV.new()
|> Result.and_then(fn parser -> parser.(input) end)
|> Result.map(&struct(struct_module, &1))
end
end
|
lib/data/constructor.ex
| 0.789153 | 0.542136 |
constructor.ex
|
starcoder
|
defmodule Re.Listings.Queries do
@moduledoc """
Module for grouping listing queries
"""
alias Re.{
Images,
Listing
}
import Ecto.Query
@full_preload [
:address,
:listings_favorites,
:tags,
:interests,
images: Images.Queries.listing_preload()
]
@orderable_fields ~w(id price property_tax maintenance_fee rooms bathrooms restrooms area
garage_spots suites dependencies balconies updated_at price_per_area
inserted_at floor)a
def active(query \\ Listing), do: where(query, [l], l.status == "active")
def order_by(query, %{order_by: orders}) do
orders
|> Enum.reduce(query, &order_by_criterias/2)
|> order_by()
end
def order_by(query, _), do: order_by(query)
defp order_by_criterias(%{field: field, type: type}, query) when field in @orderable_fields do
order_by(query, [l], {^type, ^field})
end
defp order_by_criterias(_, query), do: query
def order_by(query \\ Listing) do
query
|> order_by([l], desc_nulls_last: l.liquidity_ratio)
end
def order_by_id(query \\ Listing), do: order_by(query, [l], asc: l.id)
def preload_relations(query \\ Listing, relations \\ @full_preload)
def preload_relations(query, relations), do: preload(query, ^relations)
def randomize_within_score(%{entries: entries} = result) do
randomized_entries =
entries
|> Enum.chunk_by(& &1.score)
|> Enum.map(&Enum.shuffle/1)
|> List.flatten()
%{result | entries: randomized_entries}
end
def excluding(query, %{"excluded_listing_ids" => excluded_listing_ids}),
do: from(l in query, where: l.id not in ^excluded_listing_ids)
def excluding(query, %{excluded_listing_ids: excluded_listing_ids}),
do: from(l in query, where: l.id not in ^excluded_listing_ids)
def excluding(query, _), do: query
def max_id(query), do: from(l in query, select: max(l.id))
def limit(query, %{"page_size" => page_size}), do: from(l in query, limit: ^page_size)
def limit(query, %{page_size: page_size}), do: from(l in query, limit: ^page_size)
def limit(query, _), do: query
def offset(query, %{"offset" => offset}), do: from(l in query, offset: ^offset)
def offset(query, %{offset: offset}), do: from(l in query, offset: ^offset)
def offset(query, _), do: query
def remaining_count(query) do
query
|> exclude(:preload)
|> exclude(:order_by)
|> exclude(:limit)
|> exclude(:distinct)
|> exclude(:group_by)
|> count()
end
def count(query \\ Listing), do: from(l in query, select: count(l.id, :distinct))
def per_development(query \\ Listing, development_uuid),
do: from(l in query, where: l.development_uuid == ^development_uuid)
def per_user(query \\ Listing, user_id), do: from(l in query, where: l.user_id == ^user_id)
def by_city(query, listing) do
from(
l in query,
join: a in assoc(l, :address),
where: ^listing.address.city == a.city
)
end
def average_price_per_area_by_neighborhood() do
active()
|> join(:inner, [l], a in assoc(l, :address))
|> select(
[l, a],
%{
neighborhood_slug: a.neighborhood_slug,
average_price_per_area: fragment("avg(?/?)::float", l.price, l.area)
}
)
|> group_by([l, a], a.neighborhood_slug)
end
@doc """
To be able to keep the uuids order: https://stackoverflow.com/questions/866465/order-by-the-in-value-list
"""
def with_uuids(query, uuids) do
uuids_formatted = Enum.map(uuids, &(&1 |> Ecto.UUID.dump() |> elem(1)))
from(
l in query,
where: l.uuid in ^uuids,
order_by: fragment("array_position(?::uuid[], ?::uuid)", ^uuids_formatted, l.uuid)
)
end
end
|
apps/re/lib/listings/queries/queries.ex
| 0.728748 | 0.546254 |
queries.ex
|
starcoder
|
require Logger
defmodule IntercomX.Company do
use IntercomX.Client
@doc """
Create / update an Company
## Parameters
* `remote_created_at`, `timestamp` - The time the company was created by you
* `company_id`, `String` - (Required) The company id you have defined for the company
* `name`, `String` - (Required) The name of the company
* `monthly_spend`, `Integer` - How much revenue the company generates for your business. Note that this will truncate floats. i.e. it only allow for whole integers, 155.98 will be truncated to 155. Note that this has an upper limit of 2**31-1 or 2147483647.
* `plan`, `String` - The name of the plan you have associated with the company
* `size`, `Integer` - The number of employees in this company
* `website`, `Integer` - The URL for this company's website. Please note that the value specified here is not validated. Accepts any string.
* `industry`, `String` - The industry that this company operates in
* `custom_attributes`, `Object` - A hash of key/value pairs containing any other data about the company you want Intercom to store.
## Example
iex> IntercomX.Company.create(%{:name => "Elixir Inc.", :company_id => "5"})
"""
def update(params) when is_map(params) do
create(params)
end
def create(params) when is_map(params) do
with {:ok, res} <- post("/companies", params) do
{:ok, res.body}
else
{:error, reason} ->
{:error, reason}
end
end
@doc """
List companies
## Parameters
* `page`, `Interger` - What page of results to fetch, defaults to first page.
* `per_page`, `Interger` - How many results per page, defaults to 50.
* `order`, `String` - `asc` or `desc` Return the companies in ascending or descending order, defaults to desc.
## Example
iex> IntercomX.Company.list(%{page: 2})
"""
def list(params \\ %{}) do
with {:ok, res} <- get("/companies", params) do
{:ok, res.body}
else
{:error, reason} ->
{:error, reason}
end
end
@doc """
Find a Company by company_id or name
## Default
* `id`, `String` -> Value of the company's `id` field
## With parameters
* `company_id`, `String` - The company id you have given to the company
* `name`, `String` - The name of the company
## Example
iex> IntercomX.Company.find("234234sdf") // by id
iex> IntercomX.Company.find(%{company_id: "6"}) // by params
"""
def find(id) when is_bitstring(id) do
with {:ok, res} <- get("/companies/" <> id) do
{:ok, res.body}
else
{:error, reason} ->
{:error, reason}
end
end
def find(params) when is_map(params) do
queryString = params
|> Map.keys()
|> Enum.reduce("?", fn opt, qs ->
qs <> "#{opt}=#{params[opt]}&"
|> String.slice(0..-2)
end)
with {:ok, res} <- get("/companies" <> queryString) do
{:ok, res.body}
else
{:error, reason} ->
{:error, reason}
end
end
@doc """
List Company users
## Parameters
* `company_id`, `String` - (Required) Your company id, example: "5da99211909121c6fff7aacd"
* `type`, `???` - (Required) The value must be user
## Example
iex> IntercomX.Company.listUsers(%{:company_id => "5da99211909121c6fff7aacd", :type => "user"})
"""
def listUsers(params) when is_map(params) do
with {:ok, res} <- get("/companies/" <> params[:company_id] <> "/users", %{type: params[:type]}) do
{:ok, res.body}
else
{:error, reason} ->
{:error, reason}
end
end
def process_response_body(body) do
Poison.decode(body, keys: :atoms)
end
end
|
lib/intercomx/resources/company.ex
| 0.841956 | 0.423577 |
company.ex
|
starcoder
|
defmodule Que.Worker do
@moduledoc """
Defines a Worker for processing Jobs.
The defined worker is responsible for processing passed jobs, and
handling the job's success and failure callbacks. The defined
worker must export a `perform/1` callback otherwise compilation
will fail.
## Basic Worker
```
defmodule MyApp.Workers.SignupMailer do
use Que.Worker
def perform(email) do
Mailer.send_email(to: email, message: "Thank you for signing up!")
end
end
```
You can also pattern match and use guard clauses like normal methods:
```
defmodule MyApp.Workers.NotificationSender do
use Que.Worker
def perform(type: :like, to: user, count: count) do
User.notify(user, "You have \#{count} new likes on your posts")
end
def perform(type: :message, to: user, from: sender) do
User.notify(user, "You received a new message from \#{sender.name}")
end
def perform(to: user) do
User.notify(user, "New activity on your profile")
end
end
```
## Concurrency
By default, workers process one Job at a time. You can specify a custom
value by passing the `concurrency` option.
```
defmodule MyApp.Workers.PageScraper do
use Que.Worker, concurrency: 4
def perform(url), do: Scraper.scrape(url)
end
```
If you want all Jobs to be processed concurrently without any limit,
you can set the concurrency option to `:infinity`. The concurrency
option must either be a positive integer or `:infinity`, otherwise
it will raise an error during compilation.
## Handle Job Success & Failure
The worker can also export optional `on_success/1` and `on_failure/2`
callbacks that handle appropriate cases.
```
defmodule MyApp.Workers.CampaignMailer do
use Que.Worker
def perform({campaign, user}) do
Mailer.send_campaign_email(campaign, user: user)
end
def on_success({campaign, user}) do
CampaignReport.compile(campaign, status: :success, user: user)
end
def on_failure({campaign, user}, error) do
CampaignReport.compile(campaign, status: :failed, user: user)
Logger.debug("Campaign email to \#{user.id} failed: \#{inspect(error)}")
end
end
```
## Failed Job Retries
Failed Jobs are NOT automatically retried. If you want a job to be
retried when it fails, you can simply enqueue it again.
To get a list of all failed jobs, you can call `Que.Persistence.failed/0`.
"""
@typedoc "A valid worker module"
@type t :: module
@doc """
Checks if the specified module is a valid Que Worker
## Example
```
defmodule MyWorker do
use Que.Worker
def perform(_args), do: nil
end
Que.Worker.valid?(MyWorker)
# => true
Que.Worker.valid?(SomeOtherModule)
# => false
```
"""
@spec valid?(module :: module) :: boolean
def valid?(module) do
try do
module.__que_worker__
rescue
UndefinedFunctionError -> false
end
end
@doc """
Raises an error if the passed module is not a valid `Que.Worker`
"""
@spec validate!(module :: module) :: :ok | no_return
def validate!(module) do
if Que.Worker.valid?(module) do
:ok
else
raise Que.Error.InvalidWorker, "#{ExUtils.Module.name(module)} is an Invalid Worker"
end
end
@doc false
defmacro __using__(opts \\ []) do
quote bind_quoted: [opts: opts] do
@after_compile __MODULE__
@concurrency opts[:concurrency] || 1
def concurrency, do: @concurrency
def __que_worker__, do: true
## Default implementations of on_success and on_failure callbacks
def on_success(_arg) do
end
def on_failure(_arg, _err) do
end
def on_setup(_job) do
end
def on_teardown(_job) do
end
defoverridable [on_success: 1, on_failure: 2, on_setup: 1, on_teardown: 1]
# Make sure the Worker is valid
def __after_compile__(_env, _bytecode) do
# Raises error if the Worker doesn't export a perform/1 method
unless Module.defines?(__MODULE__, {:perform, 1}) do
raise Que.Error.InvalidWorker,
"#{ExUtils.Module.name(__MODULE__)} must export a perform/1 method"
end
# Raise error if the concurrency option in invalid
unless @concurrency == :infinity or (is_integer(@concurrency) and @concurrency > 0) do
raise Que.Error.InvalidWorker,
"#{ExUtils.Module.name(__MODULE__)} has an invalid concurrency value"
end
end
end
end
@doc """
Main callback that processes the Job.
This is a required callback that must be implemented by the worker.
If the worker doesn't export `perform/1` method, compilation will
fail. It takes one argument which is whatever that's passed to
`Que.add`.
You can define it like any other method, use guard clauses and also
use pattern matching with multiple method definitions.
"""
@callback perform(arguments :: term) :: term
@doc """
Optional callback that is executed when the job is processed
successfully.
"""
@callback on_success(arguments :: term) :: term
@doc """
Optional callback that is executed if an error is raised during
job is processed (in `perform` callback)
"""
@callback on_failure(arguments :: term, error :: tuple) :: term
@doc """
Optional callback that is executed before the job is started.
"""
@callback on_setup(job :: term) :: term
@doc """
Optional callback that is executed after the job finishes,
both on success and failure.
"""
@callback on_teardown(job :: term) :: term
end
|
lib/que/worker.ex
| 0.76454 | 0.761538 |
worker.ex
|
starcoder
|
defmodule PollutionDataStream do
@moduledoc """
Loading data from file to the pollution server using Stream.
"""
@doc """
Gets lines from CSV file as stream.
Returns stream.
"""
def import_lines_from_CSV do
File.stream!("pollution.csv")
end
@doc """
Makes map containing information about 1 measurement from given `line`.
Returns map with three items: `:datetime`, `:location`, `:pollution_level`.
"""
def parse_line(line) do
[date_str, time_str, x_str, y_str, value_str] = String.split(line, ",")
date = date_str
|> String.split("-")
|> Enum.reverse()
|> Enum.map(&Integer.parse/1)
|> Enum.map(&(elem(&1, 0)))
|> List.to_tuple()
time = time_str
|> String.split(":")
|> Enum.map(&Integer.parse/1)
|> Enum.map(&(elem(&1, 0)))
|> List.to_tuple()
datetime = {date, time}
location = [x_str, y_str]
|> Enum.map(&Float.parse/1)
|> Enum.map(&(elem(&1, 0)))
|> List.to_tuple()
pollution_level = elem(Integer.parse(value_str), 0)
%{:datetime => datetime, :location => location, :pollution_level => pollution_level}
end
@doc """
Extracts all unique stations from `stream`.
Returns stream of tuples {x, y} where x and y are coordinates of a station.
"""
def get_stations(stream) do
stations_coords = stream
|> Stream.flat_map(&(String.split(&1, ",")))
|> Stream.map(&Float.parse/1)
|> Stream.filter(&(elem(&1, 1) == ""))
|> Stream.map(fn {x, _} -> x end)
first_coordinate = Stream.take_every(stations_coords, 2)
second_coordinate = Stream.drop_every(stations_coords, 2)
stations = Stream.zip(first_coordinate, second_coordinate)
|> Stream.uniq()
stations
end
@doc """
Makes name of station from given `station_location`.
Returns string which is a name of the station.
"""
def generate_station_name(station_location) do
"station_#{elem(station_location, 0)}_#{elem(station_location, 1)}"
end
@doc """
Adds given `stations` (stream of tuples representing locations) to the pollution server.
"""
def add_stations(stations) do
add_station_fn = fn station -> :pollution_gen_server.addStation(generate_station_name(station), station) end
Enum.each(stations, add_station_fn)
end
@doc """
Adds given `measurements` (stream of maps) to the pollution server.
"""
def add_measurements(measurements) do
add_measurement_fn = fn measurement -> :pollution_gen_server.
addValue(measurement.location, measurement.datetime, "PM10", measurement.pollution_level) end
Enum.each(measurements, add_measurement_fn)
end
@doc """
Main function which gets stream from the file and saves all measurements to the pollution server.
Function prints time needed to load stations and measurements to the pollution server.
"""
def add_measurements_from_file do
stream = import_lines_from_CSV()
stations = get_stations stream
measurements = Stream.map(stream, &parse_line/1)
:pollution_sup.start_link()
add_stations_time = fn -> add_stations(stations) end
|> :timer.tc
|> elem(0)
|> Kernel./(1_000_000)
add_measurements_time = fn -> add_measurements(measurements) end
|> :timer.tc
|> elem(0)
|> Kernel./(1_000_000)
:timer.sleep(200);
IO.puts "Time of adding stations: #{add_stations_time}"
IO.puts "Time of adding measurements: #{add_measurements_time}"
end
end
|
src/pollution_data_stream.ex
| 0.890862 | 0.60542 |
pollution_data_stream.ex
|
starcoder
|
defmodule Braintree.Search do
@moduledoc """
This module performs advanced search on a resource.
For additional reference see:
https://developers.braintreepayments.com/reference/general/searching/search-fields/ruby
"""
alias Braintree.HTTP
alias Braintree.ErrorResponse, as: Error
@doc """
Perform an advanced search on a given resource and create new structs
based on the initializer given.
## Example
search_params = %{first_name: %{is: "Jenna"}}
{:ok, customers} = Braintree.Search.perform(search_params, "customers", &Braintree.Customer.new/1)
"""
@spec perform(map, String.t(), fun(), Keyword.t()) :: {:ok, [any]} | {:error, Error.t()}
def perform(params, resource, initializer, opts \\ []) when is_map(params) do
with {:ok, payload} <- HTTP.post(resource <> "/advanced_search_ids", %{search: params}, opts) do
fetch_all_records(payload, resource, initializer, opts)
end
end
defp fetch_all_records(%{"search_results" => %{"ids" => []}}, _resource, _initializer, _opts) do
{:error, :not_found}
end
defp fetch_all_records(
%{"search_results" => %{"page_size" => page_size, "ids" => ids}},
resource,
initializer,
opts
) do
records =
ids
|> Enum.chunk_every(page_size)
|> Enum.flat_map(fn ids_chunk ->
fetch_records_chunk(ids_chunk, resource, initializer, opts)
end)
{:ok, records}
end
# Credit card verification is an odd case because path to endpoints is
# different from the object name in the XML.
defp fetch_records_chunk(ids, "verifications", initializer, opts) when is_list(ids) do
search_params = %{search: %{ids: ids}}
with {:ok, %{"credit_card_verifications" => data}} <-
HTTP.post("verifications/advanced_search", search_params, opts) do
initializer.(data)
end
end
defp fetch_records_chunk(ids, resource, initializer, opts) when is_list(ids) do
search_params = %{search: %{ids: ids}}
with {:ok, %{^resource => data}} <-
HTTP.post(resource <> "/advanced_search", search_params, opts) do
initializer.(data)
end
end
end
|
lib/search.ex
| 0.87596 | 0.436022 |
search.ex
|
starcoder
|
defmodule ETH.Transaction.Parser do
import ETH.Utils
@moduledoc """
This module converts the input to a transaction map encoded with ethereum hex encodings.
It can also convert the input to a transaction list if needed.
"""
def parse("0x" <> encoded_transaction_rlp) do
[nonce, gas_price, gas_limit, to, value, data, v, r, s] =
encoded_transaction_rlp
|> Base.decode16!(case: :mixed)
|> ExRLP.decode()
%{
nonce: nonce,
gas_price: gas_price,
gas_limit: gas_limit,
to: to,
value: value,
data: data,
v: v,
r: r,
s: s
}
end
def parse(<<transaction_rlp>>) do
[nonce, gas_price, gas_limit, to, value, data, v, r, s] =
transaction_rlp
|> ExRLP.decode()
%{
nonce: nonce,
gas_price: gas_price,
gas_limit: gas_limit,
to: to,
value: value,
data: data,
v: v,
r: r,
s: s
}
end
def parse(_transaction_list = [nonce, gas_price, gas_limit, to, value, data]) do
%{
nonce: to_buffer(nonce),
gas_price: to_buffer(gas_price),
gas_limit: to_buffer(gas_limit),
to: to_buffer(to),
value: to_buffer(value),
data: to_buffer(data)
}
end
def parse(_transaction_list = [nonce, gas_price, gas_limit, to, value, data, v, r, s]) do
%{
nonce: to_buffer(nonce),
gas_price: to_buffer(gas_price),
gas_limit: to_buffer(gas_limit),
to: to_buffer(to),
value: to_buffer(value),
data: to_buffer(data),
v: to_buffer(v),
r: to_buffer(r),
s: to_buffer(s)
}
end
def parse(
_transaction = %{
nonce: nonce,
gas_price: gas_price,
gas_limit: gas_limit,
to: to,
value: value,
data: data,
v: v,
r: r,
s: s
}
) do
%{
nonce: to_buffer(nonce),
gas_price: to_buffer(gas_price),
gas_limit: to_buffer(gas_limit),
to: to_buffer(to),
value: to_buffer(value),
data: to_buffer(data),
v: to_buffer(v),
r: to_buffer(r),
s: to_buffer(s)
}
end
def parse(
_transaction = %{
nonce: nonce,
gas_price: gas_price,
gas_limit: gas_limit,
to: to,
value: value,
data: data,
chain_id: chain_id
}
) do
%{
nonce: to_buffer(nonce),
gas_price: to_buffer(gas_price),
gas_limit: to_buffer(gas_limit),
to: to_buffer(to),
value: to_buffer(value),
data: to_buffer(data),
chain_id: chain_id
}
end
def to_list("0x" <> encoded_transaction_rlp) do
encoded_transaction_rlp |> Base.decode16!(case: :mixed) |> ExRLP.decode()
end
def to_list(<<transaction_rlp>>), do: transaction_rlp |> ExRLP.decode()
def to_list(
_transaction = %{
nonce: nonce,
gas_price: gas_price,
gas_limit: gas_limit,
to: to,
value: value,
data: data,
v: v,
r: r,
s: s,
chain_id: chain_id
}
) do
[nonce, gas_price, gas_limit, to, value, data, v, r, s, chain_id]
|> Enum.map(fn value -> to_buffer(value) end)
end
def to_list(
transaction = %{
nonce: nonce,
gas_price: gas_price,
gas_limit: gas_limit,
value: value,
data: data,
chain_id: chain_id
}
) do
to = Map.get(transaction, :to, "")
v = Map.get(transaction, :v, <<28>>)
r = Map.get(transaction, :r, "")
s = Map.get(transaction, :s, "")
[nonce, gas_price, gas_limit, to, value, data, v, r, s, chain_id]
|> Enum.map(fn value -> to_buffer(value) end)
end
end
|
lib/eth/transaction/parser.ex
| 0.507568 | 0.531331 |
parser.ex
|
starcoder
|
defmodule StarkInfra.CreditNote.Invoice do
alias __MODULE__, as: Invoice
alias StarkInfra.Utils.Check
alias StarkInfra.Utils.API
@moduledoc """
Groups Invoice related functions
"""
@doc """
When you initialize an Invoice struct, the entity will not be automatically
sent to the Stark Infra API. The 'create' function sends the structs
to the Stark Infra API and returns the list of created structs.
To create scheduled Invoices, which will display the discount, interest, etc. on the final users banking interface,
use dates instead of datetimes on the "due" and "discounts" fields.
## Parameters (required):
- `:amount` [integer]: Invoice value in cents. Minimum = 0 (any value will be accepted). ex: 1234 (= R$ 12.34)
- `:due` [DateTime, Date or string, default now + 2 days]: Invoice due date in UTC ISO format. ex: ~U[2021-03-26 19:32:35.418698Z] for immediate invoices and ~D[2020-10-28] for scheduled invoices
## Parameters (optional):
- `:expiration` [integer, default 59 days]: time interval in seconds between due date and expiration date. ex 123456789
- `:fine` [float, default 0.0]: Invoice fine for overdue payment in %. ex: 2.5
- `:interest` [float, default 0.0]: Invoice monthly interest for overdue payment in %. ex: 5.2
- `:tags` [list of strings, default nil]: list of strings for tagging
- `:descriptions` [list of Descriptions or maps, default nil]: list of Descriptions or maps with "key":string and (optional) "value":string pairs
## Attributes (return-only):
- `:id` [string, default nil]: unique id returned when Invoice is created. ex: "5656565656565656"
- `:tax_id` [string]: payer tax ID (CPF or CNPJ) with or without formatting. ex: "01234567890" or "20.018.183/0001-80"
- `:name` [string]: payer name. ex: "<NAME>."
- `:pdf` [string]: public Invoice PDF URL. ex: "https://invoice.starkbank.com/pdf/d454fa4e524441c1b0c1a729457ed9d8"
- `:link` [string]: public Invoice webpage URL. ex: "https://my-workspace.sandbox.starkbank.com/invoicelink/d454fa4e524441c1b0c1a729457ed9d8"
- `:nominal_amount` [integer]: Invoice emission value in cents (will change if invoice is updated, but not if it's paid). ex: 400000
- `:fine_amount` [integer]: Invoice fine value calculated over nominal_amount. ex: 20000
- `:interest_amount` [integer]: Invoice interest value calculated over nominal_amount. ex: 10000
- `:discount_amount` [integer]: Invoice discount value calculated over nominal_amount. ex: 3000
- `:discounts` [list of Discounts or maps, default nil]: list of Discounts or maps with "percentage":float and "due":string pairs
- `:brcode` [string]: BR Code for the Invoice payment. ex: "00020101021226800014br.gov.bcb.pix2558invoice.starkbank.com/f5333103-3279-4db2-8389-5efe335ba93d5204000053039865802BR5913Arya Stark6009Sao Paulo6220051656565656565656566304A9A0"
- `:status` [string]: current Invoice status. ex: "registered" or "paid"
- `:fee` [integer]: fee charged by this Invoice. ex: 200 (= R$ 2.00)
- `:transaction_ids` [list of strings]: ledger transaction ids linked to this Invoice (if there are more than one, all but the first are reversals or failed reversal chargebacks). ex: ["19827356981273"]
- `:created` [DateTime]: creation DateTime for the CreditNote. ex: ~U[2020-3-10 10:30:0:0]
- `:updated` [DateTime]: latest update DateTime for the CreditNote. ex: ~U[2020-3-10 10:30:0:0]
"""
@enforce_keys [
:amount,
:due
]
defstruct [
:id,
:amount,
:due,
:expiration,
:fine,
:interest,
:tags,
:descriptions,
:tax_id,
:name,
:pdf,
:link,
:nominal_amount,
:fine_amount,
:interest_amount,
:discount_amount,
:discounts,
:brcode,
:status,
:fee,
:transaction_ids,
:created,
:updated
]
@type t() :: %__MODULE__{}
@doc false
def resource_maker(json) do
%Invoice{
id: json[:id],
amount: json[:amount],
due: json[:due] |> Check.date_or_datetime(),
expiration: json[:expiration],
fine: json[:fine],
interest: json[:interest],
tags: json[:tags],
descriptions: json[:descriptions] |> Enum.map(fn descriptions -> API.from_api_json(descriptions, &Invoice.Description.resource_maker/1) end),
tax_id: json[:tax_id],
name: json[:name],
pdf: json[:pdf],
link: json[:link],
nominal_amount: json[:nominal_amount],
fine_amount: json[:fine_amount],
interest_amount: json[:interest_amount],
discount_amount: json[:discount_amount],
discounts: json[:discounts] |> Enum.map(fn discounts -> API.from_api_json(discounts, &Invoice.Discount.resource_maker/1) end),
brcode: json[:brcode],
status: json[:status],
fee: json[:fee],
transaction_ids: json[:transaction_ids],
created: json[:created] |> Check.datetime(),
updated: json[:updated] |> Check.datetime()
}
end
end
|
lib/credit_note/invoice/invoice.ex
| 0.822831 | 0.708339 |
invoice.ex
|
starcoder
|
defmodule AWS.CloudTrail do
@moduledoc """
AWS CloudTrail
This is the CloudTrail API Reference.
It provides descriptions of actions, data types, common parameters, and common
errors for CloudTrail.
CloudTrail is a web service that records AWS API calls for your AWS account and
delivers log files to an Amazon S3 bucket. The recorded information includes the
identity of the user, the start time of the AWS API call, the source IP address,
the request parameters, and the response elements returned by the service.
As an alternative to the API, you can use one of the AWS SDKs, which consist of
libraries and sample code for various programming languages and platforms (Java,
Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient way to create
programmatic access to AWSCloudTrail. For example, the SDKs take care of
cryptographically signing requests, managing errors, and retrying requests
automatically. For information about the AWS SDKs, including how to download and
install them, see the [Tools for Amazon Web Services page](http://aws.amazon.com/tools/).
See the [AWS CloudTrail User Guide](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-user-guide.html)
for information about the data that is included with each AWS API call listed in
the log files.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "CloudTrail",
api_version: "2013-11-01",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "cloudtrail",
global?: false,
protocol: "json",
service_id: "CloudTrail",
signature_version: "v4",
signing_name: "cloudtrail",
target_prefix: "com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101"
}
end
@doc """
Adds one or more tags to a trail, up to a limit of 50.
Overwrites an existing tag's value when a new value is specified for an existing
tag key. Tag key names must be unique for a trail; you cannot have two keys with
the same name but different values. If you specify a key without a value, the
tag will be created with the specified key and a value of null. You can tag a
trail that applies to all AWS Regions only from the Region in which the trail
was created (also known as its home region).
"""
def add_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddTags", input, options)
end
@doc """
Creates a trail that specifies the settings for delivery of log data to an
Amazon S3 bucket.
"""
def create_trail(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateTrail", input, options)
end
@doc """
Deletes a trail.
This operation must be called from the region in which the trail was created.
`DeleteTrail` cannot be called on the shadow trails (replicated trails in other
regions) of a trail that is enabled in all regions.
"""
def delete_trail(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteTrail", input, options)
end
@doc """
Retrieves settings for one or more trails associated with the current region for
your account.
"""
def describe_trails(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTrails", input, options)
end
@doc """
Describes the settings for the event selectors that you configured for your
trail.
The information returned for your event selectors includes the following:
* If your event selector includes read-only events, write-only
events, or all events. This applies to both management events and data events.
* If your event selector includes management events.
* If your event selector includes data events, the Amazon S3 objects
or AWS Lambda functions that you are logging for data events.
For more information, see [Logging Data and Management Events for Trails
](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html)
in the *AWS CloudTrail User Guide*.
"""
def get_event_selectors(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetEventSelectors", input, options)
end
@doc """
Describes the settings for the Insights event selectors that you configured for
your trail.
`GetInsightSelectors` shows if CloudTrail Insights event logging is enabled on
the trail, and if it is, which insight types are enabled. If you run
`GetInsightSelectors` on a trail that does not have Insights events enabled, the
operation throws the exception `InsightNotEnabledException`
For more information, see [Logging CloudTrail Insights Events for Trails
](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-insights-events-with-cloudtrail.html)
in the *AWS CloudTrail User Guide*.
"""
def get_insight_selectors(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetInsightSelectors", input, options)
end
@doc """
Returns settings information for a specified trail.
"""
def get_trail(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetTrail", input, options)
end
@doc """
Returns a JSON-formatted list of information about the specified trail.
Fields include information on delivery errors, Amazon SNS and Amazon S3 errors,
and start and stop logging times for each trail. This operation returns trail
status from a single region. To return trail status from all regions, you must
call the operation on each region.
"""
def get_trail_status(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetTrailStatus", input, options)
end
@doc """
Returns all public keys whose private keys were used to sign the digest files
within the specified time range.
The public key is needed to validate digest files that were signed with its
corresponding private key.
CloudTrail uses different private/public key pairs per region. Each digest file
is signed with a private key unique to its region. Therefore, when you validate
a digest file from a particular region, you must look in the same region for its
corresponding public key.
"""
def list_public_keys(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListPublicKeys", input, options)
end
@doc """
Lists the tags for the trail in the current region.
"""
def list_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTags", input, options)
end
@doc """
Lists trails that are in the current account.
"""
def list_trails(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTrails", input, options)
end
@doc """
Looks up [management events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-management-events)
or [CloudTrail Insights events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-insights-events)
that are captured by CloudTrail.
You can look up events that occurred in a region within the last 90 days. Lookup
supports the following attributes for management events:
* AWS access key
* Event ID
* Event name
* Event source
* Read only
* Resource name
* Resource type
* User name
Lookup supports the following attributes for Insights events:
* Event ID
* Event name
* Event source
All attributes are optional. The default number of results returned is 50, with
a maximum of 50 possible. The response includes a token that you can use to get
the next page of results.
The rate of lookup requests is limited to two per second, per account, per
region. If this limit is exceeded, a throttling error occurs.
"""
def lookup_events(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "LookupEvents", input, options)
end
@doc """
Configures an event selector or advanced event selectors for your trail.
Use event selectors or advanced event selectors to specify management and data
event settings for your trail. By default, trails created without specific event
selectors are configured to log all read and write management events, and no
data events.
When an event occurs in your account, CloudTrail evaluates the event selectors
or advanced event selectors in all trails. For each trail, if the event matches
any event selector, the trail processes and logs the event. If the event doesn't
match any event selector, the trail doesn't log the event.
Example
1. You create an event selector for a trail and specify that you
want write-only events.
2. The EC2 `GetConsoleOutput` and `RunInstances` API operations
occur in your account.
3. CloudTrail evaluates whether the events match your event
selectors.
4. The `RunInstances` is a write-only event and it matches your
event selector. The trail logs the event.
5. The `GetConsoleOutput` is a read-only event that doesn't match
your event selector. The trail doesn't log the event.
The `PutEventSelectors` operation must be called from the region in which the
trail was created; otherwise, an `InvalidHomeRegionException` exception is
thrown.
You can configure up to five event selectors for each trail. For more
information, see [Logging data and management events for trails
](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html)
and [Quotas in AWS CloudTrail](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html)
in the *AWS CloudTrail User Guide*.
You can add advanced event selectors, and conditions for your advanced event
selectors, up to a maximum of 500 values for all conditions and selectors on a
trail. You can use either `AdvancedEventSelectors` or `EventSelectors`, but not
both. If you apply `AdvancedEventSelectors` to a trail, any existing
`EventSelectors` are overwritten. For more information about advanced event
selectors, see [Logging data events for trails](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html)
in the *AWS CloudTrail User Guide*.
"""
def put_event_selectors(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutEventSelectors", input, options)
end
@doc """
Lets you enable Insights event logging by specifying the Insights selectors that
you want to enable on an existing trail.
You also use `PutInsightSelectors` to turn off Insights event logging, by
passing an empty list of insight types. In this release, only
`ApiCallRateInsight` is supported as an Insights selector.
"""
def put_insight_selectors(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutInsightSelectors", input, options)
end
@doc """
Removes the specified tags from a trail.
"""
def remove_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveTags", input, options)
end
@doc """
Starts the recording of AWS API calls and log file delivery for a trail.
For a trail that is enabled in all regions, this operation must be called from
the region in which the trail was created. This operation cannot be called on
the shadow trails (replicated trails in other regions) of a trail that is
enabled in all regions.
"""
def start_logging(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartLogging", input, options)
end
@doc """
Suspends the recording of AWS API calls and log file delivery for the specified
trail.
Under most circumstances, there is no need to use this action. You can update a
trail without stopping it first. This action is the only way to stop recording.
For a trail enabled in all regions, this operation must be called from the
region in which the trail was created, or an `InvalidHomeRegionException` will
occur. This operation cannot be called on the shadow trails (replicated trails
in other regions) of a trail enabled in all regions.
"""
def stop_logging(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopLogging", input, options)
end
@doc """
Updates the settings that specify delivery of log files.
Changes to a trail do not require stopping the CloudTrail service. Use this
action to designate an existing bucket for log delivery. If the existing bucket
has previously been a target for CloudTrail log files, an IAM policy exists for
the bucket. `UpdateTrail` must be called from the region in which the trail was
created; otherwise, an `InvalidHomeRegionException` is thrown.
"""
def update_trail(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateTrail", input, options)
end
end
|
lib/aws/generated/cloud_trail.ex
| 0.850841 | 0.667784 |
cloud_trail.ex
|
starcoder
|
defmodule ExBech32 do
@moduledoc """
Nif for Bech32 format encoding and decoding.
It uses https://github.com/rust-bitcoin/rust-bech32 rust library
"""
alias ExBech32.Impl
@doc """
Encodes into Bech32 format
It accepts the following three paramters:
- human-readable part
- data to be encoded
- bech32 variant. it can be `:bech32` (BIP-0173) or `:bech32m` (BIP-0350)
## Examples
iex> ExBech32.encode("bech32", <<0, 1, 2>>)
{:ok, "bech321qqqsyrhqy2a"}
iex> ExBech32.encode("bech32m", <<0, 1, 2>>, :bech32m)
{:ok, "bech32m1qqqsyy9kzpq"}
"""
@spec encode(String.t(), binary(), atom()) :: {:ok, String.t()} | {:error, atom()}
def encode(hrp, data, variant \\ :bech32)
def encode(hrp, data, variant) when variant in [:bech32, :bech32m] do
Impl.encode(hrp, data, Atom.to_string(variant))
end
def encode(_hrp, _data, _variant) do
{:error, :unknown_variant}
end
@doc """
Encodes into Bech32 format appending the version to data
It accepts the following three paramters:
- human-readable part
- witness version
- data to be encoded
- bech32 variant. it can be `:bech32` (BIP-0173) or `:bech32m` (BIP-0350)
## Examples
iex> hash = <<155, 40, 145, 113, 34, 76, 127, 94, 72, 185, 33, 104, 237, 9, 209, 84, 242, 199, 72, 211>>
iex> ExBech32.encode_with_version("bc", 0, hash)
{:ok, "bc1qnv5fzufzf3l4uj9ey95w6zw32nevwjxn9497vk"}
iex> ExBech32.encode_with_version("bc", 0, <<1>>, :bech32k)
{:error, :unknown_variant}
"""
@spec encode_with_version(String.t(), non_neg_integer(), binary(), atom()) ::
{:ok, String.t()} | {:error, atom()}
def encode_with_version(hrp, version, data, variant \\ :bech32)
def encode_with_version(hrp, version, data, variant) when variant in [:bech32, :bech32m] do
Impl.encode_with_version(hrp, version, data, Atom.to_string(variant))
end
def encode_with_version(_hrp, _version, _data, _variant) do
{:error, :unknown_variant}
end
@doc """
Decodes bech32 decoded string
## Examples
iex> ExBech32.decode("bech321qqqsyrhqy2a")
{:ok, {"bech32", <<0, 1, 2>>, :bech32}}
iex> ExBech32.decode("bech32m1qqqsyy9kzpq")
{:ok, {"bech32m", <<0, 1, 2>>, :bech32m}}
"""
@spec decode(String.t()) :: {:ok, {String.t(), binary(), atom()}} | {:error, atom()}
def decode(encoded) do
with {:ok, {hrp, data, variant}} <- Impl.decode(encoded) do
{:ok, {hrp, data, String.to_atom(variant)}}
end
end
@doc """
Decodes bech32 decoded string with witness version
## Examples
iex> ExBech32.decode_with_version("bc1qnv5fzufzf3l4uj9ey95w6zw32nevwjxn9497vk")
{:ok, {"bc", 0, <<155, 40, 145, 113, 34, 76, 127, 94, 72, 185, 33, 104, 237, 9, 209, 84, 242, 199, 72, 211>>, :bech32}}
"""
@spec decode_with_version(String.t()) ::
{:ok, {String.t(), non_neg_integer(), binary(), atom()}} | {:error, atom()}
def decode_with_version(encoded) do
with {:ok, {hrp, version, data, variant}} <- Impl.decode_with_version(encoded) do
{:ok, {hrp, version, data, String.to_atom(variant)}}
end
end
end
|
lib/ex_bech32.ex
| 0.914996 | 0.625081 |
ex_bech32.ex
|
starcoder
|
defmodule Crawly.DataStorage.Worker do
@moduledoc """
A worker process which stores items for individual spiders. All items
are pre-processed by item_pipelines.
All pipelines are using the state of this process for their internal needs
(persistancy).
For example, it might be useful to include:
1) DuplicatesFilter pipeline (it filters out already seen items)
2) JSONEncoder pipeline (it converts items to JSON)
"""
alias Crawly.DataStorage.Worker
require Logger
use GenServer
defstruct fd: nil, stored_items: 0
def start_link(spider_name: spider_name) do
GenServer.start_link(__MODULE__, spider_name: spider_name)
end
@spec stats(pid()) :: {:stored_items, non_neg_integer()}
def stats(pid), do: GenServer.call(pid, :stats)
@spec store(pid(), map()) :: :ok
def store(pid, item) do
GenServer.cast(pid, {:store, item})
end
def init(spider_name: spider_name) do
Process.flag(:trap_exit, true)
# Specify a path where items are stored on filesystem
base_path = Application.get_env(:crawly, :base_store_path, "/tmp/")
format = Application.get_env(:crawly, :output_format, "jl")
# Open file descriptor to write items
{:ok, fd} =
File.open("#{base_path}#{inspect(spider_name)}.#{format}", [
:binary,
:write,
:delayed_write,
:utf8
])
case format do
"csv" ->
# Special case. Need to insert headers.
item =
Enum.reduce(Application.get_env(:crawly, :item), "", fn
field, "" ->
"#{inspect(field)}"
field, acc ->
acc <> "," <> "#{inspect(field)}"
end)
write_item(fd, item)
_other ->
:ok
end
{:ok, %Worker{fd: fd}}
end
def handle_cast({:store, item}, state) do
pipelines = Application.get_env(:crawly, :pipelines, [])
state =
case Crawly.Utils.pipe(pipelines, item, state) do
{false, new_state} ->
new_state
{new_item, new_state} ->
write_item(state.fd, new_item)
%Worker{new_state | stored_items: state.stored_items + 1}
end
{:noreply, state}
end
def handle_call(:stats, _from, state) do
{:reply, {:stored_items, state.stored_items}, state}
end
def handle_info({:EXIT, _from, _reason}, state) do
File.close(state.fd)
{:stop, :normal, state}
end
defp write_item(fd, item) when is_binary(item) do
do_write_item(fd, item)
end
defp write_item(fd, item) do
do_write_item(fd, Kernel.inspect(item))
end
defp do_write_item(fd, item) do
try do
IO.write(fd, item)
IO.write(fd, "\n")
Logger.debug(fn -> "Scraped #{inspect(item)}" end)
catch
error, reason ->
stacktrace = :erlang.get_stacktrace()
Logger.error(
"Could not write item: #{inspect(error)}, reason: #{inspect(reason)}, stacktrace: #{
inspect(stacktrace)
}
"
)
end
end
end
|
lib/crawly/data_storage/data_storage_worker.ex
| 0.628407 | 0.400808 |
data_storage_worker.ex
|
starcoder
|
defmodule Erlef.Agenda.Parser do
@moduledoc false
# n.b., This module (at this time) makes no attempt to validate ICS contents and as such
# the results of the combination (at least currently) can not be guaranteed
# to be valid. Rather, it depends on well formed ICS inputs
@doc """
Given a list of ics strings, where each string is an ics file in its entirety, this function combines
all ics vevents and and vtimezones into a single ics string. All other calendar objects are ignored.
"""
@spec combine([String.t()]) :: String.t()
def combine(cals, opts \\ []) do
cals
|> Enum.map(&split_and_trim/1)
|> Enum.flat_map(&get_events_and_timezones/1)
|> Enum.flat_map(fn x -> x end)
|> Enum.reverse()
|> Enum.join("\n")
|> wrap(opts)
end
defp split_and_trim(ics_str),
do: Enum.map(String.split(ics_str, "\n"), fn s -> String.trim(s) end)
defp wrap(body, opts) do
name = Keyword.get(opts, :name, "Erlef Calendar")
"""
BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:PUBLISH
PRODID:-//Erlef/1.0/EN
X-WR-CALNAME:#{name}
#{body}
END:VCALENDAR
"""
end
defp get_events_and_timezones(lines), do: get_events_and_timezones(lines, [])
defp get_events_and_timezones([], acc), do: acc
defp get_events_and_timezones([<<"BEGIN:VTIMEZONE">> = line | lines], acc) do
{lines, timezone} = collect_timezone(lines, [line])
get_events_and_timezones(lines, [timezone | acc])
end
defp get_events_and_timezones([<<"BEGIN:VEVENT">> = line | lines], acc) do
{lines, event} = collect_event(lines, [line])
get_events_and_timezones(lines, [event | acc])
end
defp get_events_and_timezones([_ | lines], acc), do: get_events_and_timezones(lines, acc)
defp collect_event([<<"END:VEVENT">> = line | lines], acc), do: {lines, [line | acc]}
defp collect_event([line | lines], acc), do: collect_event(lines, [line | acc])
defp collect_timezone([<<"END:VTIMEZONE">> = line | lines], acc), do: {lines, [line | acc]}
defp collect_timezone([line | lines], acc), do: collect_timezone(lines, [line | acc])
end
|
lib/erlef/agenda/parser.ex
| 0.610918 | 0.437824 |
parser.ex
|
starcoder
|
defmodule StateMachine.Event do
@moduledoc """
Event is a container for transitions. It is identified by name (atom) and can contain arbitrary number of transtitions.
One important thing is that it's disallowed to have more than one unguarded transition from the state, since this
would introduce a "non-determinism" (or rather just discard the latter transition). We loosely allow guarded transitions
from the same state, but it doesn't guarantee anything: if guards always return true, we're back to where we were before.
"""
alias StateMachine.{Event, Transition, Context, Callback, Guard}
@type t(model) :: %__MODULE__{
name: atom,
transitions: list(Transition.t(model)),
before: list(Callback.t(model)),
after: list(Callback.t(model)),
guards: list(Guard.t(model)),
}
@type callback_pos() :: :before | :after
@enforce_keys [:name]
defstruct [
:name,
transitions: [],
before: [],
after: [],
guards: []
]
@doc """
Checks if the event is allowed in the current context. First it makes sure that all guards
of the event return `true`, then it scans transitions for the matching one. Match is determined
by the source state and passing of all guards as well.
"""
@spec is_allowed?(Context.t(model), t(model) | atom) :: boolean when model: var
def is_allowed?(ctx, event) do
!is_nil(find_transition(ctx, event))
end
@doc """
This is an entry point for transition. By running this function with populated context, event
and optional payload, you tell state machine to try to move to the next state.
It returns an updated context.
"""
@spec trigger(Context.t(model), atom, any) :: Context.t(model) when model: var
def trigger(ctx, event, payload \\ nil) do
context = %{ctx | payload: payload, event: event}
with {_, %Event{} = e} <- {:event, Map.get(context.definition.events, event)},
{_, %Transition{} = t} <- {:transition, find_transition(context, e)}
do
Transition.run(%{context | transition: t})
else
{item, _} -> %{context | status: :failed, error: {item, "Couldn't resolve #{item}"}}
end
end
@doc """
Private function for running Event callbacks.
"""
@spec callback(Context.t(model), callback_pos()) :: Context.t(model) when model: var
def callback(ctx, pos) do
callbacks = Map.get(ctx.definition.events[ctx.event], pos)
Callback.apply_chain(ctx, callbacks, :"#{pos}_event")
end
@spec find_transition(Context.t(model), t(model)) :: Transition.t(model) | nil when model: var
defp find_transition(ctx, event) do
if Guard.check(ctx, event) do
state = ctx.definition.state_getter.(ctx)
Enum.find(event.transitions, fn transition ->
transition.from == state && Transition.is_allowed?(ctx, transition)
end)
end
end
end
|
lib/state_machine/event.ex
| 0.847195 | 0.469824 |
event.ex
|
starcoder
|
defmodule Postgrex.Date do
@moduledoc """
Struct for Postgres date.
## Fields
* `year`
* `month`
* `day`
"""
@type t :: %__MODULE__{year: 0..10000, month: 1..12, day: 1..31}
defstruct [
year: 0,
month: 1,
day: 1]
end
defmodule Postgrex.Time do
@moduledoc """
Struct for Postgres time.
## Fields
* `hour`
* `min`
* `sec`
* `usec`
"""
@type t :: %__MODULE__{hour: 0..23, min: 0..59, sec: 0..59, usec: 0..999_999}
defstruct [
hour: 0,
min: 0,
sec: 0,
usec: 0]
end
defmodule Postgrex.Timestamp do
@moduledoc """
Struct for Postgres timestamp.
## Fields
* `year`
* `month`
* `day`
* `hour`
* `min`
* `sec`
* `usec`
"""
@type t :: %__MODULE__{year: 0..10000, month: 1..12, day: 1..31,
hour: 0..23, min: 0..59, sec: 0..59, usec: 0..999_999}
defstruct [
year: 0,
month: 1,
day: 1,
hour: 0,
min: 0,
sec: 0,
usec: 0]
end
defmodule Postgrex.Interval do
@moduledoc """
Struct for Postgres interval.
## Fields
* `months`
* `days`
* `secs`
"""
@type t :: %__MODULE__{months: integer, days: integer, secs: integer}
defstruct [
months: 0,
days: 0,
secs: 0]
end
defmodule Postgrex.Range do
@moduledoc """
Struct for Postgres range.
## Fields
* `lower`
* `upper`
* `lower_inclusive`
* `upper_inclusive`
"""
@type t :: %__MODULE__{lower: term, upper: term, lower_inclusive: boolean,
upper_inclusive: boolean}
defstruct [
lower: nil,
upper: nil,
lower_inclusive: true,
upper_inclusive: true]
end
defmodule Postgrex.INET do
@moduledoc """
Struct for Postgres inet.
## Fields
* `address`
"""
@type t :: %__MODULE__{address: :inet.ip_address}
defstruct [address: nil]
end
defmodule Postgrex.CIDR do
@moduledoc """
Struct for Postgres cidr.
## Fields
* `address`
* `netmask`
"""
@type t :: %__MODULE__{address: :inet.ip_address,
netmask: 0..128}
defstruct [
address: nil,
netmask: nil]
end
defmodule Postgrex.MACADDR do
@moduledoc """
Struct for Postgres macaddr.
## Fields
* `address`
"""
@type macaddr :: {0..255, 0..255, 0..255, 0..255, 0..255, 0..255}
@type t :: %__MODULE__{address: macaddr }
defstruct [address: nil]
end
defmodule Postgrex.Point do
@moduledoc """
Struct for Postgres point.
## Fields
* `x`
* `y`
"""
@type t :: %__MODULE__{x: float, y: float}
defstruct [
x: nil,
y: nil]
end
defmodule Postgrex.Polygon do
@moduledoc """
Struct for Postgres polygon.
## Fields
* `vertices`
"""
@type t :: %__MODULE__{vertices: [Postgrex.Point.t]}
defstruct [vertices: nil]
end
defmodule Postgrex.Line do
@moduledoc """
Struct for Postgres line.
Note, lines are stored in Postgres in the form `{a,b,c}`, which
parameterizes a line as `a*x + b*y + c = 0`.
## Fields
* `a`
* `b`
* `c`
"""
@type t :: %__MODULE__{a: float, b: float, c: float}
defstruct [
a: nil,
b: nil,
c: nil]
end
defmodule Postgrex.LineSegment do
@moduledoc """
Struct for Postgres line segment.
## Fields
* `point1`
* `point2`
"""
@type t :: %__MODULE__{point1: Postgrex.Point.t, point2: Postgrex.Point.t}
defstruct [
point1: nil,
point2: nil
]
end
defmodule Postgrex.Box do
@moduledoc """
Struct for Postgres rectangular box.
## Fields
* `upper_right`
* `bottom_left`
"""
@type t :: %__MODULE__{
upper_right: Postgrex.Point.t,
bottom_left: Postgrex.Point.t
}
defstruct [
upper_right: nil,
bottom_left: nil
]
end
defmodule Postgrex.Path do
@moduledoc """
Struct for Postgres path.
## Fields
* `open`
* `points`
"""
@type t :: %__MODULE__{points: [Postgrex.Point.t], open: boolean}
defstruct [
points: nil,
open: nil
]
end
defmodule Postgrex.Circle do
@moduledoc """
Struct for Postgres circle.
## Fields
* `center`
* `radius`
"""
@type t :: %__MODULE__{center: Postgrex.Point.t, radius: number}
defstruct [
center: nil,
radius: nil
]
end
defmodule Postgrex.Lexeme do
@moduledoc """
Struct for Postgres Lexeme (A Tsvector type is composed of multiple lexemes)
## Fields
* `word`
* `positions`
"""
@type t :: %__MODULE__{word: String.t, positions: [{pos_integer, :A | :B | :C | nil}]}
defstruct [
word: nil,
positions: nil
]
end
|
lib/postgrex/builtins.ex
| 0.897472 | 0.631963 |
builtins.ex
|
starcoder
|
name: {0_5} 2
I: { 1} 1
T: { 1} 14
I: { 1} 14
T: { 1} 28
;
name: {1_5} 2
I: { 1} 0
T: { 1} 9
I: { 1} 9
T: { 1} 27
;
name: {2_5} 2
I: { 1} 2
T: { 1} 26
I: { 1} 26
T: { 1} 29
;
name: {3_5} 2
I: { 1} 1
T: { 1} 3
I: { 1} 3
T: { 1} 28
;
name: {4_5} 2
I: { 1} 2
T: { 1} 3
I: { 1} 3
T: { 1} 29
;
name: {5_5} 2
I: { 1} 0
T: { 1} 15
I: { 1} 15
T: { 1} 27
;
name: {6_5} 2
I: { 1} 0
T: { 1} 7
I: { 1} 7
T: { 1} 27
;
name: {7_5} 2
I: { 1} 1
T: { 1} 6
I: { 1} 6
T: { 1} 28
;
name: {8_5} 2
I: { 1} 0
T: { 1} 20
I: { 1} 20
T: { 1} 27
;
name: {9_5} 2
I: { 1} 0
T: { 1} 18
I: { 1} 18
T: { 1} 27
;
name: {10_5} 2
I: { 1} 2
T: { 1} 22
I: { 1} 22
T: { 1} 29
;
name: {11_5} 2
I: { 1} 2
T: { 1} 11
I: { 1} 11
T: { 1} 29
;
name: {12_5} 2
I: { 1} 2
T: { 1} 21
I: { 1} 21
T: { 1} 29
;
name: {13_5} 2
I: { 1} 0
T: { 1} 4
I: { 1} 4
T: { 1} 27
;
name: {14_5} 2
I: { 1} 2
T: { 1} 14
I: { 1} 14
T: { 1} 29
;
name: {15_5} 2
I: { 1} 2
T: { 1} 13
I: { 1} 13
T: { 1} 29
;
name: {16_5} 2
I: { 1} 2
T: { 1} 15
I: { 1} 15
T: { 1} 29
;
name: {17_5} 2
I: { 1} 1
T: { 1} 18
I: { 1} 18
T: { 1} 28
;
name: {18_5} 2
I: { 1} 1
T: { 1} 20
I: { 1} 20
T: { 1} 28
;
name: {19_5} 2
I: { 1} 1
T: { 1} 7
I: { 1} 7
T: { 1} 28
;
name: {20_5} 2
I: { 1} 0
T: { 1} 14
I: { 1} 14
T: { 1} 27
;
name: {21_5} 2
I: { 1} 0
T: { 1} 25
I: { 1} 25
T: { 1} 27
;
name: {22_5} 2
I: { 1} 1
T: { 1} 23
I: { 1} 23
T: { 1} 28
;
name: {23_5} 2
I: { 1} 0
T: { 1} 5
I: { 1} 5
T: { 1} 27
;
name: {24_5} 2
I: { 1} 0
T: { 1} 6
I: { 1} 6
T: { 1} 27
;
name: {25_5} 2
I: { 1} 2
T: { 1} 6
I: { 1} 6
T: { 1} 29
;
name: {26_5} 2
I: { 1} 2
T: { 1} 5
I: { 1} 5
T: { 1} 29
;
name: {27_5} 2
I: { 1} 0
T: { 1} 22
I: { 1} 22
T: { 1} 27
;
name: {28_5} 2
I: { 1} 0
T: { 1} 8
I: { 1} 8
T: { 1} 27
;
name: {29_5} 2
I: { 1} 0
T: { 1} 3
I: { 1} 3
T: { 1} 27
;
name: {30_5} 2
I: { 1} 1
T: { 1} 19
I: { 1} 19
T: { 1} 28
;
name: {31_5} 2
I: { 1} 1
T: { 1} 11
I: { 1} 11
T: { 1} 28
;
name: {32_5} 2
I: { 1} 1
T: { 1} 12
I: { 1} 12
T: { 1} 28
;
name: {33_5} 2
I: { 1} 1
T: { 1} 15
I: { 1} 15
T: { 1} 28
;
name: {34_5} 2
I: { 1} 1
T: { 1} 17
I: { 1} 17
T: { 1} 28
;
name: {35_5} 2
I: { 1} 2
T: { 1} 16
I: { 1} 16
T: { 1} 29
;
name: {36_5} 2
I: { 1} 1
T: { 1} 5
I: { 1} 5
T: { 1} 28
;
name: {37_5} 2
I: { 1} 0
T: { 1} 11
I: { 1} 11
T: { 1} 27
;
name: {38_5} 2
I: { 1} 0
T: { 1} 16
I: { 1} 16
T: { 1} 27
;
name: {39_5} 2
I: { 1} 2
T: { 1} 17
I: { 1} 17
T: { 1} 29
;
name: {40_5} 2
I: { 1} 2
T: { 1} 8
I: { 1} 8
T: { 1} 29
;
name: {41_5} 2
I: { 1} 1
T: { 1} 24
I: { 1} 24
T: { 1} 28
;
name: {42_5} 2
I: { 1} 1
T: { 1} 16
I: { 1} 16
T: { 1} 28
;
name: {43_5} 2
I: { 1} 0
T: { 1} 21
I: { 1} 21
T: { 1} 27
;
name: {44_5} 2
I: { 1} 0
T: { 1} 26
I: { 1} 26
T: { 1} 27
;
name: {45_5} 2
I: { 1} 1
T: { 1} 10
I: { 1} 10
T: { 1} 28
;
name: {46_5} 2
I: { 1} 2
T: { 1} 24
I: { 1} 24
T: { 1} 29
;
name: {47_5} 2
I: { 1} 2
T: { 1} 25
I: { 1} 25
T: { 1} 29
;
name: {48_5} 2
I: { 1} 2
T: { 1} 9
I: { 1} 9
T: { 1} 29
;
name: {49_5} 2
I: { 1} 1
T: { 1} 8
I: { 1} 8
T: { 1} 28
;
name: {50_5} 2
I: { 1} 2
T: { 1} 10
I: { 1} 10
T: { 1} 29
;
name: {51_5} 2
I: { 1} 2
T: { 1} 7
I: { 1} 7
T: { 1} 29
;
name: {52_5} 2
I: { 1} 2
T: { 1} 20
I: { 1} 20
T: { 1} 29
;
name: {53_5} 2
I: { 1} 1
T: { 1} 9
I: { 1} 9
T: { 1} 28
;
name: {54_5} 2
I: { 1} 0
T: { 1} 10
I: { 1} 10
T: { 1} 27
;
name: {55_5} 2
I: { 1} 1
T: { 1} 13
I: { 1} 13
T: { 1} 28
;
name: {56_5} 2
I: { 1} 0
T: { 1} 13
I: { 1} 13
T: { 1} 27
;
name: {57_5} 2
I: { 1} 2
T: { 1} 19
I: { 1} 19
T: { 1} 29
;
name: {58_5} 2
I: { 1} 1
T: { 1} 21
I: { 1} 21
T: { 1} 28
;
name: {59_5} 2
I: { 1} 0
T: { 1} 23
I: { 1} 23
T: { 1} 27
;
name: {60_5} 2
I: { 1} 0
T: { 1} 24
I: { 1} 24
T: { 1} 27
;
name: {61_5} 2
I: { 1} 0
T: { 1} 19
I: { 1} 19
T: { 1} 27
;
name: {62_5} 2
I: { 1} 0
T: { 1} 17
I: { 1} 17
T: { 1} 27
;
name: {63_5} 2
I: { 1} 1
T: { 1} 4
I: { 1} 4
T: { 1} 28
;
name: {64_5} 2
I: { 1} 0
T: { 1} 12
I: { 1} 12
T: { 1} 27
;
name: {65_5} 2
I: { 1} 2
T: { 1} 12
I: { 1} 12
T: { 1} 29
;
name: {66_5} 2
I: { 1} 1
T: { 1} 26
I: { 1} 26
T: { 1} 28
;
name: {67_5} 2
I: { 1} 2
T: { 1} 18
I: { 1} 18
T: { 1} 29
;
name: {68_5} 2
I: { 1} 1
T: { 1} 22
I: { 1} 22
T: { 1} 28
;
name: {69_5} 2
I: { 1} 1
T: { 1} 25
I: { 1} 25
T: { 1} 28
;
name: {70_5} 2
I: { 1} 2
T: { 1} 23
I: { 1} 23
T: { 1} 29
;
name: {71_5} 2
I: { 1} 2
T: { 1} 4
I: { 1} 4
T: { 1} 29
;
|
q5_train_block5.ex
| 0.502441 | 0.648898 |
q5_train_block5.ex
|
starcoder
|
defmodule Legato.Query do
defstruct profile: nil, view_id: nil, sampling_level: :default,
metrics: [], dimensions: [], date_ranges: [], order_bys: [], segments: [],
filters: %{
metrics: %Legato.Query.FilterSet{as: :metrics},
dimensions: %Legato.Query.FilterSet{as: :dimensions}
}
defimpl Poison.Encoder, for: __MODULE__ do
def encode(struct, options) do
# This is the format for GA report json
Poison.Encoder.Map.encode(%{
reportRequests: [
%{
view_id: to_string(struct.view_id),
# these are derived:
metrics: struct.metrics,
dimensions: struct.dimensions,
date_ranges: struct.date_ranges,
order_bys: struct.order_bys,
segments: struct.segments,
sampling_level: struct.sampling_level
}
]
}, options)
end
end
alias Legato.Profile
alias Legato.Query.DateRange
alias Legato.Query.Order
alias Legato.Query.Segment
alias Legato.Query.Metric
alias Legato.Query.MetricFilter
alias Legato.Query.Dimension
alias Legato.Query.DimensionFilter
alias Legato.Query.FilterSet
# TODO: Fetch this list from metadata api v3
# https://developers.google.com/analytics/devguides/reporting/metadata/v3/
@dimensions [
# User
:user_type,
:session_count,
:days_since_last_session,
:user_defined_value,
:user_bucket,
# Session
:session_duration_bucket,
# Traffic Sources
:referral_path,
:full_referrer,
:campaign,
:source,
:medium,
:source_medium,
:keyword,
:ad_content,
:social_network,
:has_social_source_referral,
:campaign_code,
# Adwords
:ad_group,
:ad_slot,
:ad_distribution_network,
:ad_match_type,
:ad_keyword_match_type,
:ad_matched_query,
:ad_placement_domain,
:ad_placement_url,
:ad_format,
:ad_targeting_type,
:ad_targeting_option,
:ad_display_url,
:ad_destination_url,
:adwords_customer_id,
:adwords_campaign_id,
:adwords_ad_group_id,
:adwords_creative_id,
:adwords_criteria_id,
:ad_query_word_count,
:is_true_view_video_ad,
# Goal Conversion
:goal_completion_location,
:goal_previous_step1,
:goal_previous_step2,
:goal_previous_step3,
# Platform or Device
:browser,
:browser_version,
:operating_system,
:operating_system_version,
:mobile_device_branding,
:mobile_device_model,
:mobile_input_selector,
:mobile_device_info,
:mobile_device_marketing_name,
:device_category,
:browser_size,
:data_source,
# Geo Network
:continent,
:sub_continent,
:country,
:region,
:metro,
:city,
:latitude,
:longitude,
:network_domain,
:network_location,
:city_id,
:continent_id,
:country_iso_code,
:metro_id,
:region_id,
:region_iso_code,
:sub_continent_code,
# System
:flash_version,
:java_enabled,
:language,
:screen_colors,
:source_property_display_name,
:source_property_tracking_id,
:screen_resolution,
# Page Tracking
:hostname,
:page_path,
:page_path_level1,
:page_path_level2,
:page_path_level3,
:page_path_level4,
:page_title,
:landing_page_path,
:second_page_path,
:exit_page_path,
:previous_page_path,
:page_depth,
# Content Grouping
# :landing_content_group_x_x,
# :previous_content_group_x_x,
# :content_group_x_x,
# Internal Search
:search_used,
:search_keyword,
:search_keyword_refinement,
:search_category,
:search_start_page,
:search_destination_page,
:search_after_destination_page,
# App Tracking
:app_installer_id,
:app_version,
:app_name,
:app_id,
:screen_name,
:screen_depth,
:landing_screen_name,
:exit_screen_name,
# Event Tracking
:event_category,
:event_action,
:event_label,
# Ecommerce
:transaction_id,
:affiliation,
:sessions_to_transaction,
:days_to_transaction,
:product_sku,
:product_name,
:product_category,
:currency_code,
:checkout_options,
:internal_promotion_creative,
:internal_promotion_id,
:internal_promotion_name,
:internal_promotion_position,
:order_coupon_code,
:product_brand,
:product_category_hierarchy,
# :product_category_level_x_x,
:product_coupon_code,
:product_list_name,
:product_list_position,
:product_variant,
:shopping_stage,
# Social Interactions
:social_interaction_network,
:social_interaction_action,
:social_interaction_network_action,
:social_interaction_target,
:social_engagement_type,
# User Timings
:user_timing_category,
:user_timing_label,
:user_timing_variable,
# Exceptions
:exception_description,
# Content Experiments
:experiment_id,
:experiment_variant,
# Custom Variables or Columns
# :dimension_x_x,
# :custom_var_name_x_x,
# :custom_var_value_x_x,
# Time
:date,
:year,
:month,
:week,
:day,
:hour,
:minute,
:nth_month,
:nth_week,
:nth_day,
:nth_minute,
:day_of_week,
:day_of_week_name,
:date_hour,
:year_month,
:year_week,
:iso_week,
:iso_year,
:iso_year_iso_week,
:nth_hour,
# DoubleClick Campaign Manager
:dcm_click_ad,
:dcm_click_ad_id,
:dcm_click_ad_type,
:dcm_click_ad_type_id,
:dcm_click_advertiser,
:dcm_click_advertiser_id,
:dcm_click_campaign,
:dcm_click_campaign_id,
:dcm_click_creative_id,
:dcm_click_creative,
:dcm_click_rendering_id,
:dcm_click_creative_type,
:dcm_click_creative_type_id,
:dcm_click_creative_version,
:dcm_click_site,
:dcm_click_site_id,
:dcm_click_site_placement,
:dcm_click_site_placement_id,
:dcm_click_spot_id,
:dcm_floodlight_activity,
:dcm_floodlight_activity_and_group,
:dcm_floodlight_activity_group,
:dcm_floodlight_activity_group_id,
:dcm_floodlight_activity_id,
:dcm_floodlight_advertiser_id,
:dcm_floodlight_spot_id,
:dcm_last_event_ad,
:dcm_last_event_ad_id,
:dcm_last_event_ad_type,
:dcm_last_event_ad_type_id,
:dcm_last_event_advertiser,
:dcm_last_event_advertiser_id,
:dcm_last_event_attribution_type,
:dcm_last_event_campaign,
:dcm_last_event_campaign_id,
:dcm_last_event_creative_id,
:dcm_last_event_creative,
:dcm_last_event_rendering_id,
:dcm_last_event_creative_type,
:dcm_last_event_creative_type_id,
:dcm_last_event_creative_version,
:dcm_last_event_site,
:dcm_last_event_site_id,
:dcm_last_event_site_placement,
:dcm_last_event_site_placement_id,
:dcm_last_event_spot_id,
# Audience
:user_age_bracket,
:user_gender,
:interest_other_category,
:interest_affinity_category,
:interest_in_market_category,
# Lifetime Value and Cohorts
:acquisition_campaign,
:acquisition_medium,
:acquisition_source,
:acquisition_source_medium,
:acquisition_traffic_channel,
:cohort,
:cohort_nth_day,
:cohort_nth_month,
:cohort_nth_week,
# Channel Grouping
:channel_grouping,
# Related Products
:correlation_model_id,
:query_product_id,
:query_product_name,
:query_product_variation,
:related_product_id,
:related_product_name,
:related_product_variation,
# DoubleClick Bid Manager
:dbm_click_advertiser,
:dbm_click_advertiser_id,
:dbm_click_creative_id,
:dbm_click_exchange,
:dbm_click_exchange_id,
:dbm_click_insertion_order,
:dbm_click_insertion_order_id,
:dbm_click_line_item,
:dbm_click_line_item_id,
:dbm_click_site,
:dbm_click_site_id,
:dbm_last_event_advertiser,
:dbm_last_event_advertiser_id,
:dbm_last_event_creative_id,
:dbm_last_event_exchange,
:dbm_last_event_exchange_id,
:dbm_last_event_insertion_order,
:dbm_last_event_insertion_order_id,
:dbm_last_event_line_item,
:dbm_last_event_line_item_id,
:dbm_last_event_site,
:dbm_last_event_site_id,
# DoubleClick Search
:ds_ad_group,
:ds_ad_group_id,
:ds_advertiser,
:ds_advertiser_id,
:ds_agency,
:ds_agency_id,
:ds_campaign,
:ds_campaign_id,
:ds_engine_account,
:ds_engine_account_id,
:ds_keyword,
:ds_keyword_id
]
@metrics [
# User
:users,
:new_users,
:percent_new_sessions,
:"1day_users",
:"7day_users",
:"14day_users",
:"30day_users",
:sessions_per_user,
# Session
:sessions,
:bounces,
:bounce_rate,
:session_duration,
:avg_session_duration,
:unique_dimension_combinations,
:hits,
# Traffic Sources
:organic_searches,
# Adwords
:impressions,
:ad_clicks,
:ad_cost,
:cpm,
:cpc,
:ctr,
:cost_per_transaction,
:cost_per_goal_conversion,
:cost_per_conversion,
:rpc,
:roas,
# Goal Conversions
# :goal_x_x_starts,
:goal_starts_all,
# :goal_x_x_completions,
:goal_completions_all,
# :goal_x_x_value,
:goal_value_all,
:goal_value_per_session,
# :goal_x_x_conversion_rate,
:goal_conversion_rate_all,
# :goal_x_x_abandons,
:goal_abandons_all,
# :goal_x_x_abandon_rate,
:goal_abandon_rate_all,
# Page Tracking
:page_value,
:entrances,
:entrance_rate,
:pageviews,
:pageviews_per_session,
:unique_pageviews,
:time_on_page,
:avg_time_on_page,
:exits,
:exit_rate,
# Content Grouping
# :content_group_unique_views_x_x,
# Internal Search
:search_result_views,
:search_uniques,
:avg_search_result_views,
:search_sessions,
:percent_sessions_with_search,
:search_depth,
:avg_search_depth,
:search_refinements,
:percent_search_refinements,
:search_duration,
:avg_search_duration,
:search_exits,
:search_exit_rate,
# :search_goal_x_x_conversion_rate,
:search_goal_conversion_rate_all,
:goal_value_all_per_search,
# Site Speed
:page_load_time,
:page_load_sample,
:avg_page_load_time,
:domain_lookup_time,
:avg_domain_lookup_time,
:page_download_time,
:avg_page_download_time,
:redirection_time,
:avg_redirection_time,
:server_connection_time,
:avg_server_connection_time,
:server_response_time,
:avg_server_response_time,
:speed_metrics_sample,
:dom_interactive_time,
:avg_dom_interactive_time,
:dom_content_loaded_time,
:avg_dom_content_loaded_time,
:dom_latency_metrics_sample,
# App Tracking
:screenviews,
:unique_screenviews,
:screenviews_per_session,
:time_on_screen,
:avg_screenview_duration,
# Event Tracking
:total_events,
:unique_events,
:event_value,
:avg_event_value,
:sessions_with_event,
:events_per_session_with_event,
# Ecommerce
:transactions,
:transactions_per_session,
:transaction_revenue,
:revenue_per_transaction,
:transaction_revenue_per_session,
:transaction_shipping,
:transaction_tax,
:total_value,
:item_quantity,
:unique_purchases,
:revenue_per_item,
:item_revenue,
:items_per_purchase,
:local_transaction_revenue,
:local_transaction_shipping,
:local_transaction_tax,
:local_item_revenue,
:buy_to_detail_rate,
:cart_to_detail_rate,
:internal_promotion_c_t_r,
:internal_promotion_clicks,
:internal_promotion_views,
:local_product_refund_amount,
:local_refund_amount,
:product_adds_to_cart,
:product_checkouts,
:product_detail_views,
:product_list_c_t_r,
:product_list_clicks,
:product_list_views,
:product_refund_amount,
:product_refunds,
:product_removes_from_cart,
:product_revenue_per_purchase,
:quantity_added_to_cart,
:quantity_checked_out,
:quantity_refunded,
:quantity_removed_from_cart,
:refund_amount,
:revenue_per_user,
:total_refunds,
:transactions_per_user,
# Social Interactions
:social_interactions,
:unique_social_interactions,
:social_interactions_per_session,
# User Timings
:user_timing_value,
:user_timing_sample,
:avg_user_timing_value,
# Exceptions
:exceptions,
:exceptions_per_screenview,
:fatal_exceptions,
:fatal_exceptions_per_screenview,
# Custom Variables or Columns
# :metric_x_x,
# :calc_metric_,
# DoubleClick Campaign Manager
:dcm_floodlight_quantity,
:dcm_floodlight_revenue,
:dcm_c_p_c,
:dcm_c_t_r,
:dcm_clicks,
:dcm_cost,
:dcm_impressions,
:dcm_r_o_a_s,
:dcm_r_p_c,
# Adsense
:adsense_revenue,
:adsense_ad_units_viewed,
:adsense_ads_viewed,
:adsense_ads_clicks,
:adsense_page_impressions,
:adsense_c_t_r,
:adsense_e_c_p_m,
:adsense_exits,
:adsense_viewable_impression_percent,
:adsense_coverage,
# Ad Exchange
:adx_impressions,
:adx_coverage,
:adx_monetized_pageviews,
:adx_impressions_per_session,
:adx_viewable_impressions_percent,
:adx_clicks,
:adx_c_t_r,
:adx_revenue,
:adx_revenue_per1000_sessions,
:adx_e_c_p_m,
# DoubleClick for Publishers
:dfp_impressions,
:dfp_coverage,
:dfp_monetized_pageviews,
:dfp_impressions_per_session,
:dfp_viewable_impressions_percent,
:dfp_clicks,
:dfp_c_t_r,
:dfp_revenue,
:dfp_revenue_per1000_sessions,
:dfp_e_c_p_m,
# DoubleClick for Publishers Backfill
:backfill_impressions,
:backfill_coverage,
:backfill_monetized_pageviews,
:backfill_impressions_per_session,
:backfill_viewable_impressions_percent,
:backfill_clicks,
:backfill_c_t_r,
:backfill_revenue,
:backfill_revenue_per1000_sessions,
:backfill_e_c_p_m,
# Lifetime Value and Cohorts
:cohort_active_users,
:cohort_appviews_per_user,
:cohort_appviews_per_user_with_lifetime_criteria,
:cohort_goal_completions_per_user,
:cohort_goal_completions_per_user_with_lifetime_criteria,
:cohort_pageviews_per_user,
:cohort_pageviews_per_user_with_lifetime_criteria,
:cohort_retention_rate,
:cohort_revenue_per_user,
:cohort_revenue_per_user_with_lifetime_criteria,
:cohort_session_duration_per_user,
:cohort_session_duration_per_user_with_lifetime_criteria,
:cohort_sessions_per_user,
:cohort_sessions_per_user_with_lifetime_criteria,
:cohort_total_users,
:cohort_total_users_with_lifetime_criteria,
# Related Products
:correlation_score,
:query_product_quantity,
:related_product_quantity,
# DoubleClick Bid Manager
:dbm_c_p_a,
:dbm_c_p_c,
:dbm_c_p_m,
:dbm_c_t_r,
:dbm_clicks,
:dbm_conversions,
:dbm_cost,
:dbm_impressions,
:dbm_r_o_a_s,
# DoubleClick Search
:ds_c_p_c,
:ds_c_t_r,
:ds_clicks,
:ds_cost,
:ds_impressions,
:ds_profit,
:ds_return_on_ad_spend,
:ds_revenue_per_click
]
@doc ~S"""
Start a query with a given Legato.Profile and metrics
## Examples
iex> %Legato.Profile{access_token: "abcde", view_id: 177817} |> Legato.Query.metrics([:pageviews])
%Legato.Query{
profile: %Legato.Profile{access_token: "abcde", view_id: 177817},
view_id: 177817,
metrics: [%Legato.Query.Metric{expression: "ga:pageviews"}]
}
"""
def metrics(%Profile{} = profile, names) do
%__MODULE__{profile: profile, view_id: profile.view_id} |> metrics(names)
end
@doc ~S"""
Add metrics to an existing Legato.Query
## Examples
iex> %Legato.Query{} |> Legato.Query.metrics([:pageviews]) |> Legato.Query.metrics([:exits])
%Legato.Query{
metrics: [%Legato.Query.Metric{expression: "ga:pageviews"}, %Legato.Query.Metric{expression: "ga:exits"}]
}
"""
def metrics(%__MODULE__{} = query, names) do
%{query | metrics: Metric.add(query.metrics, names)}
end
@doc ~S"""
Start a query with a given Legato.Profile and dimensions
## Examples
iex> %Legato.Profile{access_token: "abcde", view_id: 177817} |> Legato.Query.dimensions([:country])
%Legato.Query{
profile: %Legato.Profile{access_token: "abcde", view_id: 177817},
view_id: 177817,
dimensions: [%Legato.Query.Dimension{name: "ga:country"}]
}
"""
def dimensions(%Profile{} = profile, names) do
%__MODULE__{profile: profile, view_id: profile.view_id} |> dimensions(names)
end
@doc ~S"""
Add dimensions to an existing Legato.Query
## Examples
iex> %Legato.Query{} |> Legato.Query.dimensions([:country]) |> Legato.Query.dimensions([:city])
%Legato.Query{
dimensions: [%Legato.Query.Dimension{name: "ga:country"}, %Legato.Query.Dimension{name: "ga:city"}]
}
"""
def dimensions(%__MODULE__{} = query, names) do
%{query | dimensions: Dimension.add(query.dimensions, names)}
end
@doc ~S"""
Add filter to set for dimensions and metrics
Checks for the name of the dimension or metric in a predefined set.
If the value is dynamic (e.g. custom variables like `:dimension_x_x`)
it will not know if it is a dimension or metric name.
This limitation can be circumvented creating the MetricFilter or
DimensionFilter struct yourself.
## Examples
iex> %Legato.Query{} |> Legato.Query.filter(:pageviews, :gt, 10)
%Legato.Query{
filters: %{
dimensions: %Legato.Query.FilterSet{as: :dimensions},
metrics: %Legato.Query.FilterSet{as: :metrics, operator: :or, filters: [
%Legato.Query.MetricFilter{
metric_name: :pageviews,
not: false,
operator: :gt,
comparison_value: 10
}
]}
}
}
iex> %Legato.Query{} |> Legato.Query.filter(:continent, :like, ["North America", "Europe"])
%Legato.Query{
filters: %{
metrics: %Legato.Query.FilterSet{as: :metrics},
dimensions: %Legato.Query.FilterSet{as: :dimensions, operator: :or, filters: [
%Legato.Query.DimensionFilter{
dimension_name: :continent,
not: false,
operator: :like,
case_sensitive: false,
expressions: ["North America", "Europe"]
}
]}
}
}
"""
def filter(query, name, operator, value) when name in(@metrics) do
filter(query, %MetricFilter{
metric_name: name,
operator: (operator || :equal),
comparison_value: value
})
end
def filter(query, name, operator, expressions) when name in(@dimensions) do
filter(query, %DimensionFilter{
dimension_name: name,
operator: (operator || :regexp),
expressions: expressions
})
end
def filter(query, %MetricFilter{} = filter) do
update_in(query.filters.metrics, &FilterSet.add(&1, filter))
end
def filter(query, %DimensionFilter{} = filter) do
update_in(query.filters.dimensions, &FilterSet.add(&1, filter))
end
# add to existing date ranges
def between(query, start_date, end_date) do
%{query | date_ranges: DateRange.add(query.date_ranges, start_date, end_date)}
end
def order_by(query, %Order{} = value) do
%{query | order_bys: query.order_bys ++ [value]}
end
def order_by(query, name, direction \\ :ascending) when is_atom(name) do
%{query | order_bys: query.order_bys ++ [%Order{field_name: name, sort_order: direction}]}
end
@doc ~S"""
Adds a single segment id
## Examples
iex> %Legato.Query{} |> Legato.Query.segment(-3)
%Legato.Query{segments: [%Legato.Query.Segment{segment_id: "gaid:-3"}]}
"""
def segment(query, id) when is_integer(id) do
%{query | segments: [Segment.build(id)]}
end
# :default, :small, :large
def sampling(query, level) do
%{query | sampling_level: level}
end
# TODO: validate presence of profile, view_id, metrics, dimensions
def to_json(query), do: Poison.encode!(query)
end
|
lib/legato/query.ex
| 0.533641 | 0.402157 |
query.ex
|
starcoder
|
defprotocol C3P0.ID do
@moduledoc """
Formalizes fetching the ID from data.
For maps the keys `id`, `"id"` are considered id fields and `guid`, `"guid"` are considered guid fields.
When requesting a guid, if one cannot be found by default it will fall back to the id.
## Using with your own structs
By default your own structs will behave the same way as a map.
However if you need to redefine which field should be considered the id/guid fields you'll need to derive the protocol.
```elixir
defmodule MyStruct do
@derive {C3P0.ID, id_field: :token, guid_field: :arn}
defstruct [:token, :arn, :name]
end
```
"""
@fallback_to_any true
@doc "Find the id of a piece of data"
@spec id(term) :: binary | nil
def id(data)
@doc "Find a global id for a piece of data"
@spec guid(term) :: binary | nil
def guid(data)
end
defimpl C3P0.ID, for: [PID, Reference] do
def id(pid), do: pid
def guid(pid), do: pid
end
defimpl C3P0.ID, for: Any do
defmacro __deriving__(module, _struct, options) do
quote do
defimpl C3P0.ID, for: unquote(module) do
opts = unquote(options)
id_field = Keyword.get(opts, :id_field)
guid_field = Keyword.get(opts, :guid_field) || id_field
unless id_field, do: raise "id field not provided to derive #{unquote(module)}"
@id_field id_field
@guid_field guid_field
def id(item), do: Map.get(item, @id_field)
def guid(item), do: Map.get(item, @guid_field)
end
end
end
def id(id) when is_binary(id), do: id
def id(id) when is_atom(id), do: id
def id(id) when is_number(id), do: id
def id(%{id: id}), do: id
def id(%{"id" => id}), do: id
def id(%{uuid: id}), do: id
def id(%{"uuid" => id}), do: id
def id(%{guid: id}), do: id
def id(%{"guid" => id}), do: id
def id(%{}), do: nil
def id(nil), do: nil
def id(v) do
raise Protocol.UndefinedError, protocol: C3P0.ID, value: v, description: "unknown value for id"
end
def guid(%{guid: id}), do: id
def guid(%{"guid" => id}), do: id
def guid(%{uuid: id}), do: id
def guid(%{"uuid" => id}), do: id
def guid(id), do: C3P0.ID.id(id)
end
|
lib/c3p0/id.ex
| 0.815122 | 0.756268 |
id.ex
|
starcoder
|
defmodule Chaperon.Util do
@moduledoc """
Helper functions used throughout `Chaperon`'s codebase.
"""
@spec preserve_vals_merge(map, map) :: map
def preserve_vals_merge(map1, map2) do
new_map =
for {k, v2} <- map2 do
case map1[k] do
nil ->
{k, v2}
v1 when is_list(v1) and is_list(v2) ->
{k, v2 ++ v1}
v1 when is_list(v1) ->
{k, [v2 | v1]}
v1 ->
{k, [v2, v1]}
end
end
|> Enum.into(%{})
map1
|> Map.merge(new_map)
end
@doc """
Converts a map's values to be prefixed (put in a tuple as the first element).
## Examples
iex> Chaperon.Util.map_prefix_value(%{foo: 1, bar: 2}, :wat)
%{foo: {:wat, 1}, bar: {:wat, 2}}
"""
@spec map_prefix_value(map, any) :: map
def map_prefix_value(map, prefix) do
for {k, v} <- map do
{k, {prefix, v}}
end
|> Enum.into(%{})
end
@doc """
Inserts a given key-value pair (`{k2, v2}` under any values within `map` that
are also maps).
## Example
iex> m = %{a: 1, b: %{baz: 3}, c: %{foo: 1, bar: 2}}
iex> Chaperon.Util.map_nested_put(m, :baz, 10)
%{a: 1, b: %{baz: 10}, c: %{foo: 1, bar: 2, baz: 10}}
iex> Chaperon.Util.map_nested_put(m, :foo, "ok")
%{a: 1, b: %{baz: 3, foo: "ok"}, c: %{foo: "ok", bar: 2}}
"""
@spec map_nested_put(map, any, any) :: map
def map_nested_put(map, k2, v2) do
for {k, v} <- map do
case v do
v when is_map(v) ->
{k, Map.put(v, k2, v2)}
v ->
{k, v}
end
end
|> Enum.into(%{})
end
@doc """
Returns last `amount` elements in a given `Enum` as a `List`.
## Example
iex> alias Chaperon.Util
iex> [] |> Util.last(1)
[]
iex> [1] |> Util.last(1)
[1]
iex> [1,2,3,4] |> Util.last(1)
[4]
iex> [1,2,3,4] |> Util.last(2)
[3,4]
iex> [1,2,3,4] |> Util.last(3)
[2,3,4]
iex> [1,2,3,4] |> Util.last(4)
[1,2,3,4]
iex> [1,2,3,4] |> Util.last(5)
[1,2,3,4]
"""
def last(enum, amount) when is_list(enum) do
case Enum.count(enum) - amount do
n when n > 0 ->
enum
|> Enum.drop(n)
_ ->
enum
end
end
@spec shortened_module_name(module | map, non_neg_integer) :: String.t()
def shortened_module_name(mod, max_nesting \\ 2)
def shortened_module_name(%{name: name}, max_nesting) when is_binary(name) do
name
|> String.split(".")
|> last(max_nesting)
|> Enum.join(".")
end
def shortened_module_name(mod, max_nesting) do
mod
|> Module.split()
|> last(max_nesting)
|> Enum.join(".")
end
@spec module_name(module | %{name: String.t()}) :: String.t()
def module_name(%{name: name}) when is_binary(name), do: name
def module_name(mod) when is_atom(mod) do
mod
|> Module.split()
|> Enum.join(".")
end
@spec local_pid?(pid) :: boolean
def local_pid?(pid) do
case inspect(pid) do
"#PID<0." <> _ ->
true
_ ->
false
end
end
def percentile_name(percentile) do
p =
percentile
|> to_string
|> String.replace(".", "_")
:"percentile_#{p}"
end
end
|
lib/chaperon/util.ex
| 0.846578 | 0.486819 |
util.ex
|
starcoder
|
defmodule Txpost.Envelope do
@moduledoc """
CBOR Envelope module, implements BRFC `5b82a2ed7b16` ([CBOR Tx Envelope](cbor-tx-envelope.md)).
BRFC `5b82a2ed7b16` defines a standard for serializing a CBOR payload in order
to have consistnency when signing the payload with a ECDSA keypair.
The `:payload` attribute is a CBOR encoded binary [`Payload`](`t:Txpost.Payload.t/0`).
The `:pubkey` and `:signature` attributes are optional binaries.
## Examples
Example envelope with an unsigned payload.
%Txpost.Envelope{
payload: <<161, 100, 100, 97, 116, 97, 161, 101, 114, 97, 119, 116, 120, 106, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0>>
}
Example envelope with an signed payload.
%Txpost.Envelope{
payload: <<161, 100, 100, 97, 116, 97, 161, 101, 114, 97, 119, 116, 120, 106, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0>>,
pubkey: <<2, 170, 75, 142, 232, 142, 111, 76, 138, 31, 212, 197, 4, 20, 227, 157, 8, 252, 150, 79, 61, 83, 205, 99, 54, 225, 193, 254, 122, 200, 147, 51, 180>>,
signature: <<48, 68, 2, 32, 24, 134, 241, 47, 243, 122, 86, 199, 199, 220, 173, 209, 38, 189, 238, 84, 197, 20, 218, 193, 190, 35, 88, 95, 214, 137, 204, 206, 156, 21, 223, 5, 2, 32, 67, 243, 10, 255, 17, 52, 68, 176, 250, 253, 199, 208, 16, 167, 132, 183, 206, 49, 147, 241, 61, 117, 231, 254, 197, 52, 109, 45, 247, 78, 210, 62>>
}
"""
alias Txpost.Payload
import Txpost.Utils.Params
import Txpost.Utils.Tags
defstruct [:payload, :pubkey, :signature]
@typedoc "CBOR Envelope"
@type t :: %__MODULE__{
payload: binary,
pubkey: binary | nil,
signature: binary | nil
}
@doc """
Validates the given parameters and returns an [`Envelope`](`t:t/0`) struct or
returns a validation error message.
Parameters can be passed as either a map or keyword list. The payload
attribute can be an already encoded CBOR binary or a [`Payload`](`t:Txpost.Payload.t/0`) struct.
## Examples
iex> Txpost.Envelope.build(%{
...> payload: %Txpost.Payload{data: %{"rawtx" => <<1, 0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 >>}}
...> })
{:ok, %Txpost.Envelope{
payload: <<161, 100, 100, 97, 116, 97, 161, 101, 114, 97, 119, 116, 120, 74, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0>>,
pubkey: nil,
signature: nil
}}
Returns an error when given invalid params.
iex> Txpost.Envelope.build(payload: ["not a valid payload"])
{:error, "Invalid param: payload"}
"""
@spec build(map | keyword) :: {:ok, t} | {:error, String.t}
def build(params) when is_map(params) or is_list(params) do
params
|> normalize_params([:payload, :pubkey, :signature])
|> encode_payload
|> validate_param(:payload, &is_binary/1)
|> validate_param(:pubkey, &is_binary/1, allow_blank: true)
|> validate_param(:signature, &is_binary/1, allow_blank: true)
|> case do
{:ok, params} ->
{:ok, struct(__MODULE__, params)}
{:error, reason} ->
{:error, reason}
end
end
@doc """
Decodes the given CBOR binary and returns an [`Envelope`](`t:t/0`) struct or
returns a validation error message.
## Examples
iex> Txpost.Envelope.decode(<<161, 103, 112, 97, 121, 108, 111, 97, 100, 120, 24, 161, 100, 100, 97, 116, 97, 161, 101, 114, 97, 119, 116, 120, 106, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0>>)
{:ok, %Txpost.Envelope{
payload: <<161, 100, 100, 97, 116, 97, 161, 101, 114, 97, 119, 116, 120, 106, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0>>,
pubkey: nil,
signature: nil
}}
Returns an error when given invalid binary.
iex> Txpost.Envelope.decode(<<0,1,2,3>>)
{:error, "Invalid payload binary"}
"""
@spec decode(binary) :: {:ok, t} | {:error, any}
def decode(data) when is_binary(data) do
case CBOR.decode(data) do
{:ok, data, _} when is_map(data) ->
data
|> detag
|> build
{:ok, _, _} ->
{:error, "Invalid payload binary"}
{:error, _reason} ->
{:error, "Invalid payload binary"}
end
end
@doc """
Decodes the payload of the given [`Envelope`](`t:t/0`) struct and returns a
[`Payload`](`t:t/0`) struct or returns a validation error message.
## Examples
iex> Txpost.Envelope.decode_payload(%Txpost.Envelope{
...> payload: <<161, 100, 100, 97, 116, 97, 161, 101, 114, 97, 119, 116, 120, 106, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0>>
...> })
{:ok, %Txpost.Payload{
data: %{"rawtx" => <<1, 0, 0, 0, 0, 0, 0, 0, 0, 0>>},
meta: %{}
}}
"""
@spec decode_payload(t) :: {:ok, Payload.t} | {:error, any}
def decode_payload(%__MODULE__{payload: data}),
do: Payload.decode(data)
@doc """
Encodes the given [`Envelope`](`t:t/0`) struct and returns a CBOR binary.
## Examples
iex> Txpost.Envelope.encode(%Txpost.Envelope{
...> payload: <<161, 100, 100, 97, 116, 97, 161, 101, 114, 97, 119, 116, 120, 74, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0>>
...> })
<<161, 103, 112, 97, 121, 108, 111, 97, 100, 88, 24, 161, 100, 100, 97, 116, 97, 161, 101, 114, 97, 119, 116, 120, 74, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0>>
"""
@spec encode(t) :: binary
def encode(%__MODULE__{} = env) do
env
|> to_map
|> entag
|> CBOR.encode
end
@doc """
Returns the given [`Envelope`](`t:t/0`) struct as a map with stringified keys.
The pubkey and signature attributes are removed if they are nil.
## Examples
iex> Txpost.Envelope.to_map(%Txpost.Envelope{
...> payload: <<161, 100, 100, 97, 116, 97, 161, 101, 114, 97, 119, 116, 120, 74, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0>>
...> })
%{
"payload" => <<161, 100, 100, 97, 116, 97, 161, 101, 114, 97, 119, 116, 120, 74, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0>>
}
"""
@spec to_map(t) :: map
def to_map(%__MODULE__{} = env) do
env
|> Map.from_struct
|> Enum.reject(fn {_k, v} -> is_nil(v) end)
|> Enum.map(fn {k, v} -> {Atom.to_string(k), v} end)
|> Enum.into(%{})
end
@doc """
Signs the [`Envelope`](`t:t/0`) payload with the given ECDSA private key.
NOT YET IMPLEMENTED
"""
@spec sign(t, binary) :: t
def sign(%__MODULE__{} = env, _private_key) do
IO.warn("Txpost.Envelope.sign/2 not yet implemented", [{__MODULE__, :sign, 2, []}])
{:ok, env}
#|> Map.put(:pubkey, "TODO")
#|> Map.put(:signature, "TODO")
end
@doc """
Verifies the [`Envelope`](`t:t/0`) signature against its payload and public
key, returning a boolean.
If no signature or public key is present, returns `false`.
## Examples
iex> Txpost.Envelope.verify(%Txpost.Envelope{
...> payload: <<161, 100, 100, 97, 116, 97, 161, 101, 114, 97, 119, 116, 120, 106, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0>>,
...> pubkey: <<2, 170, 75, 142, 232, 142, 111, 76, 138, 31, 212, 197, 4, 20, 227, 157, 8, 252, 150, 79, 61, 83, 205, 99, 54, 225, 193, 254, 122, 200, 147, 51, 180>>,
...> signature: <<48, 68, 2, 32, 24, 134, 241, 47, 243, 122, 86, 199, 199, 220, 173, 209, 38, 189, 238, 84, 197, 20, 218, 193, 190, 35, 88, 95, 214, 137, 204, 206, 156, 21, 223, 5, 2, 32, 67, 243, 10, 255, 17, 52, 68, 176, 250, 253, 199, 208, 16, 167, 132, 183, 206, 49, 147, 241, 61, 117, 231, 254, 197, 52, 109, 45, 247, 78, 210, 62>>
...> })
true
"""
@spec verify(t) :: boolean
def verify(%__MODULE__{pubkey: pubkey, signature: sig})
when is_nil(pubkey) or is_nil(sig),
do: false
def verify(%__MODULE__{payload: payload, pubkey: pubkey, signature: sig}),
do: :crypto.verify(:ecdsa, :sha256, payload, sig, [pubkey, :secp256k1])
# Encodes the payload struct as a CBOR binary
defp encode_payload(%{payload: %Payload{}} = params),
do: update_in(params.payload, &Payload.encode/1)
defp encode_payload(params), do: params
end
defmodule Txpost.Envelope.InvalidSignatureError do
@moduledoc "Error raised when Envelope signature is invalid."
defexception message: "invalid CBOR Envelope signature", plug_status: 403
end
|
lib/txpost/envelope.ex
| 0.922426 | 0.54825 |
envelope.ex
|
starcoder
|
defmodule RF24.Util do
alias Circuits.{
SPI,
GPIO
}
use Bitwise
import RF24Registers
@doc """
Write a register.
`addr` can be an atom found in RF24Registers.reg/1 or an uint8 value.
`value` can be a uint8 or a binary
"""
def write_reg(rf24, addr, value) when is_atom(addr) do
write_reg(rf24, reg(addr), value)
end
def write_reg(rf24, addr, value) when value <= 255 do
write_reg(rf24, addr, <<value::8>>)
end
def write_reg(rf24, addr, value) when addr <= 31 and is_binary(value) do
rf24 = select(rf24)
{:ok, _status} = spi_transfer(rf24.spi, <<0fc00:e968:6179::de52:7100, addr::5>>)
{:ok, _return} = spi_transfer(rf24.spi, value)
unselect(rf24)
end
@doc "Reads a value in a register. Takes the same addr values as write_reg"
def read_reg(rf24, addr) do
<<value>> = read_reg_bin(rf24, addr)
value
end
@doc "Same as read_reg but returns the binary value"
def read_reg_bin(rf24, addr) when is_atom(addr) do
read_reg_bin(rf24, reg(addr))
end
def read_reg_bin(rf24, addr) when addr <= 31 do
rf24 = select(rf24)
{:ok, _status} = spi_transfer(rf24.spi, <<0bfdf8:f53e:61e4::18, addr::5>>)
{:ok, value} = spi_transfer(rf24.spi, <<0xFF::8>>)
unselect(rf24)
value
end
@doc "Reads the SETUP_AW register and returns the value in bits"
def read_addr_width(rf24) do
case read_reg_bin(rf24, :SETUP_AW) do
<<_::5, 0::3>> -> raise("invalid address width?")
# 3 bytes 24 bits
<<_::5, 0b01::3>> -> 24
# 4 bytes 32 bits
<<_::5, 0b10::3>> -> 32
# 5 bytes 40 bits
<<_::5, 0b11::3>> -> 40
end
end
@doc "Writes the address width. width must be one of 3, 4, or 5"
def write_address_width(rf24, 3) do
write_reg(rf24, :SETUP_AW, <<0::6, 0b01::2>>)
end
def write_address_width(rf24, 4) do
write_reg(rf24, :SETUP_AW, <<0::6, 0b10::2>>)
end
def write_address_width(rf24, 5) do
write_reg(rf24, :SETUP_AW, <<0::6, 0b11::2>>)
end
def write_rx_pipe_addr(rf24, pipe, addr) when pipe <= 5 and byte_size(addr) in [3, 4, 5] do
reg =
case pipe do
0x0 -> :RX_ADDR_P0
0x1 -> :RX_ADDR_P1
0x2 -> :RX_ADDR_P2
0x3 -> :RX_ADDR_P3
0x4 -> :RX_ADDR_P4
0x5 -> :RX_ADDR_P5
end
write_rx_pipe_addr(rf24, reg, addr)
end
def write_rx_pipe_addr(rf24, pipe, addr) when is_atom(pipe) and byte_size(addr) in [3, 4, 5] do
rf24
|> write_address_width(byte_size(addr))
|> write_reg(pipe, addr)
end
@doc "Wrapper around read_reg to set the rx addr on a pipe"
def read_rx_pipe_addr(rf24, pipe) when pipe <= 5 do
reg =
case pipe do
0x0 -> :RX_ADDR_P0
0x1 -> :RX_ADDR_P1
0x2 -> :RX_ADDR_P2
0x3 -> :RX_ADDR_P3
0x4 -> :RX_ADDR_P4
0x5 -> :RX_ADDR_P5
end
read_rx_pipe_addr(rf24, reg)
end
def read_rx_pipe_addr(rf24, pipe) when pipe in [:RX_ADDR_P0, :RX_ADDR_P1] do
addr_width = read_addr_width(rf24)
rf24 = select(rf24)
{:ok, _status} = spi_transfer(rf24.spi, <<0fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, reg(pipe)::5>>)
{:ok, value} = spi_transfer(rf24.spi, <<0xFF::size(addr_width)>>)
unselect(rf24)
value
end
def read_rx_pipe_addr(rf24, pipe)
when pipe in [:RX_ADDR_P2, :RX_ADDR_P3, :RX_ADDR_P4, :RX_ADDR_P5] do
addr_width = read_addr_width(rf24)
base_width = addr_width - 8
rf24 = select(rf24)
{:ok, _status} = spi_transfer(rf24.spi, <<0fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, reg(:RX_ADDR_P1)::5>>)
{:ok, <<base::size(base_width), _::8>>} = spi_transfer(rf24.spi, <<0xFF::size(addr_width)>>)
unselect(rf24)
<<addr::8>> = read_reg_bin(rf24, pipe)
<<base::size(base_width), addr::8>>
end
@doc "Wrapper around write_reg to set the tx addr"
def write_tx_addr(rf24, address) when byte_size(address) in [3, 4, 5] do
rf24 = write_address_width(rf24, byte_size(address))
rf24 = select(rf24)
{:ok, _status} = spi_transfer(rf24.spi, <<0b001::3, reg(:TX_ADDR)::5>>)
{:ok, _} = spi_transfer(rf24.spi, address)
unselect(rf24)
end
@doc "Wrapper around read_reg to get the tx addr"
def read_tx_addr(rf24) do
adress_width = read_addr_width(rf24)
rf24 = select(rf24)
{:ok, _status} = spi_transfer(rf24.spi, <<0b000::3, reg(:TX_ADDR)::5>>)
{:ok, value} = spi_transfer(rf24.spi, <<0xFF::size(adress_width)>>)
unselect(rf24)
value
end
@doc "Reads the length of the data in the R_RX_PAYLOAD register"
def read_payload_length(rf24) do
select(rf24)
{:ok, <<_, length>>} = spi_transfer(rf24.spi, <<0b01100000, 0xFF>>)
unselect(rf24)
length
end
@doc "Reads `length` payload from the :R_RX_PAYLOAD"
def read_payload(rf24, length) do
rf24 = select(rf24)
{:ok, _status} = spi_transfer(rf24.spi, <<instr(:R_RX_PAYLOAD)>>)
{:ok, payload} = spi_transfer(rf24.spi, :binary.copy(<<0xFF>>, length))
unselect(rf24)
payload
end
@doc """
Set SETUP_RETR register.
Will write the values stored in state if not supplied explicitly.
"""
def write_retries(%{} = rf24) do
write_retries(rf24, rf24.auto_retransmit_delay, rf24.auto_retransmit_count)
end
def write_retries(rf24, delay, count) when delay <= 15 and count <= 15 do
# write_reg(SETUP_RETR, (delay & 0xf) << ARD | (count & 0xf) << ARC);
write_reg(
%{rf24 | auto_retransmit_delay: delay, auto_retransmit_count: count},
:SETUP_RETR,
<<delay::4, count::4>>
)
end
@doc "Sets the RF_CH register. `channel` must be <= 125"
def write_channel(rf24) do
write_channel(rf24, rf24.channel)
end
def write_channel(rf24, channel) when channel <= 125 do
write_reg(%{rf24 | channel: channel}, :RF_CH, channel)
end
@doc """
Sets the datarate.
channel must be one of
:RF24_250KBPS, :RF24_1MBPS, :RF24_2MBPS
"""
def write_data_rate(rf24) do
write_data_rate(rf24, rf24.data_rate)
end
def write_data_rate(rf24, data_rate)
when data_rate in [:RF24_250KBPS, :RF24_1MBPS, :RF24_2MBPS] do
<<cont_wave::1, _::1, _rf_dr_low::1, pll_lock::1, _rf_dr_high::1, rf_pwr::2, _::1>> =
read_reg_bin(rf24, :RF_SETUP)
case data_rate do
:RF24_250KBPS ->
value = <<cont_wave::1, 0::1, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, pll_lock::1, 0::1, rf_pwr::2, 0::1>>
write_reg(%{rf24 | data_rate: :RF24_250KBPS}, :RF_SETUP, value)
:RF24_2MBPS ->
value = <<cont_wave::1, 0::1, 0::1, pll_lock::1, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, rf_pwr::2, 0::1>>
write_reg(%{rf24 | data_rate: :RF24_2MBPS}, :RF_SETUP, value)
:RF24_1MBPS ->
value = <<cont_wave::1, 0::1, 0::1, pll_lock::1, 0::1, rf_pwr::2, 0::1>>
write_reg(%{rf24 | data_rate: :RF24_1MBPS}, :RF_SETUP, value)
end
end
@doc "Configures crc enabled and crc encoding"
def write_crc(rf24) do
<<_::4, _en_crc::1, _crco::1, pwr_up::1, prim_rx::1>> = read_reg_bin(rf24, :NRF_CONFIG)
crc? = if rf24.crc?, do: 1, else: 0
crc_2_bit? = if rf24.crc_2_bit?, do: 1, else: 0
write_reg(rf24, :NRF_CONFIG, <<0::4, crc?::1, crc_2_bit?::1, pwr_up::1, prim_rx::1>>)
end
@doc """
Enter transmit mode.
Packets will not be sent until there is a pulse on the CE pin
"""
def enable_ptx(rf24) do
# unset bit 0 on NRF_CONFIG
<<head::7, _prim_rx::1>> = read_reg_bin(rf24, :NRF_CONFIG)
write_reg(rf24, :NRF_CONFIG, <<head::7, 0::1>>)
end
@doc """
Enter receive mode.
Packets will not be received until there is a pulse on the CE pin
"""
def enable_prx(rf24) do
# set bit 0 on NRF_CONFIG
<<head::7, _prim_rx::1>> = read_reg_bin(rf24, :NRF_CONFIG)
write_reg(rf24, :NRF_CONFIG, <<head::7, 1::1>>)
end
@doc """
unsets the `PWR_UP` bit on the CONFIG register if it is set.
"""
def power_up(rf24) do
# unsets bit 1 on NRF_CONFIG if it is high
case read_reg_bin(rf24, :NRF_CONFIG) do
# bit 1 is high
<<head::6, 0::1, prim_rx::1>> ->
write_reg(rf24, :NRF_CONFIG, <<head::6, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, prim_rx::1>>)
# bit 1 is already low
<<_head::6, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, _prim_rx::1>> ->
rf24
end
end
@doc "drop the RX fifo"
def flush_rx(rf24) do
rf24 = select(rf24)
{:ok, _} = spi_transfer(rf24.spi, <<instr(:FLUSH_RX)>>)
unselect(rf24)
end
@doc "drop the TX fifo"
def flush_tx(rf24) do
rf24 = select(rf24)
{:ok, _} = spi_transfer(rf24.spi, <<instr(:FLUSH_TX)>>)
unselect(rf24)
end
@doc """
Send a payload.
if ack? is true, the W_ACK_PAYLOAD instruction is used,
else the W_TX_PAYLOAD is used.
User is responsible for ensuring the correct flags are set
in the FEATURE register
"""
def send_payload(rf24, payload, ack?)
def send_payload(rf24, payload, true) when byte_size(payload) <= 32 do
send_payload(rf24, payload, 0b10100000)
end
def send_payload(rf24, payload, false) when byte_size(payload) <= 32 do
send_payload(rf24, payload, 0b10110000)
end
def send_payload(rf24, payload, instr) do
rf24 = enable_ptx(rf24)
rf24 = select(rf24)
{:ok, status} = spi_transfer(rf24.spi, <<instr>>)
{:ok, _} = spi_transfer(rf24.spi, payload)
unselect(rf24)
gpio_write(rf24.ce, 0)
Process.sleep(10)
gpio_write(rf24.ce, 1)
status
end
@doc "Write an ack packet for a pipe"
def send_ack_payload(rf24, pipe, payload) when byte_size(payload) <= 32 do
rf24 = enable_ptx(rf24)
rf24 = select(rf24)
{:ok, _} = spi_transfer(rf24.spi, <<0b10101::5, pipe::3>>)
{:ok, _} = spi_transfer(rf24.spi, payload)
gpio_write(rf24.ce, 0)
Process.sleep(10)
gpio_write(rf24.ce, 1)
unselect(rf24)
end
# this register is undocumented. Stuff doesn't work without it tho.
# i have no idea what it is
@doc false
def toggle_features(rf24) do
rf24 = select(rf24)
{:ok, _} = spi_transfer(rf24.spi, <<instr(:ACTIVATE), 0x73::8>>)
unselect(rf24)
end
@doc false
def select(rf24) do
gpio_write(rf24.csn, 0)
rf24
end
@doc false
def unselect(rf24) do
gpio_write(rf24.csn, 1)
rf24
end
@doc false
def gpio_open(pin, mode, opts \\ []) do
GPIO.open(pin, mode, opts)
end
@doc false
def gpio_write(gpio, value) do
GPIO.write(gpio, value)
end
@doc false
def gpio_set_interrupts(gpio, mode) do
GPIO.set_interrupts(gpio, mode)
end
@doc false
def spi_open(bus, opts) do
SPI.open(bus, opts)
end
@doc false
def spi_transfer(spi, data) do
SPI.transfer(spi, data)
end
@doc "Initialize the radio. Doesn't actually check for success."
def radio_init(rf24) do
with {:ok, ce} <- gpio_open(rf24.ce_pin, :output, initial_value: 0),
{:ok, csn} <- gpio_open(rf24.csn_pin, :output, initial_value: 1),
{:ok, irq} <- gpio_open(rf24.irq_pin, :input),
:ok <- gpio_set_interrupts(irq, :falling),
{:ok, spi} <- spi_open(rf24.spi_bus_name, mode: 0, speed_hz: 10_000_000) do
%{rf24 | ce: ce, csn: csn, irq: irq, spi: spi}
end
end
end
|
lib/rf24/util.ex
| 0.553264 | 0.535341 |
util.ex
|
starcoder
|
defmodule UderzoExample.Thermostat do
@moduledoc """
A basic thermostat display, mostly fake, to show off Uderzo
"""
use Clixir
@clixir_header "thermostat"
# A sample thermostat display.
def temp(t) do
# Fake the temperature
25 * :math.sin(t / 10)
end
def tim_init() do
base_dir = Application.app_dir(:uderzo_example, ".")
priv_dir = Path.absname("priv", base_dir)
create_font("sans", Path.join(priv_dir, "SourceCodePro-Regular.ttf"))
end
def_c create_font(name, file_name) do
cdecl "char *": [name, file_name]
cdecl int: retval
assert(nvgCreateFont(vg, name, file_name) >= 0)
end
def tim_render(win_width, win_height, t) do
inside = temp(t)
outside = temp(t - 10)
burn = inside < outside
draw_inside_temp(inside, win_width, win_height)
draw_outside_temp(outside, win_width, win_height)
draw_burn_indicator(burn, win_width, win_height)
end
defp left_align(x), do: 0.1 * x
defp display_temp(t), do: "#{:erlang.float_to_binary(t, [decimals: 1])}°C"
def draw_inside_temp(temp, w, h) do
left_align = left_align(w)
draw_small_text("Inside temp", left_align, 0.1 * h)
draw_big_text(display_temp(temp), left_align, 0.14 * h)
end
def draw_outside_temp(temp, w, h) do
left_align = left_align(w)
draw_small_text("Outside temp", left_align, 0.3 * h)
draw_big_text(display_temp(temp), left_align, 0.34 * h)
end
def draw_burn_indicator(_burn = true, w, h), do: nil #show_flame(w, h)
def draw_burn_indicator(_burn = false, _w, _h), do: nil
def draw_small_text(t, x, y), do: draw_text(t, String.length(t), 16.0, x, y)
def draw_big_text(t, x, y), do: draw_text(t, String.length(t), 40.0, x, y)
def_c show_flame(w, h) do
cdecl double: [w, h]
fprintf(stderr, "Here is where we draw a flame..;")
end
def_c draw_text(t, tl, sz, x, y) do
cdecl "char *": t
cdecl long: tl
cdecl double: [sz, x, y]
nvgFontSize(vg, sz)
nvgFontFace(vg, "sans")
nvgTextAlign(vg, NVG_ALIGN_LEFT|NVG_ALIGN_TOP)
nvgFillColor(vg, nvgRGBA(255, 255, 255, 255))
nvgText(vg, x, y, t, t + tl)
end
end
|
uderzo_example/lib/thermostat.ex
| 0.745861 | 0.456107 |
thermostat.ex
|
starcoder
|
defmodule ExPng.RawData do
@moduledoc """
This struct provides an intermediate data format between a PNG image file and
and `ExPng.Image` struct. Raw image file data is parsed into this struct
when reading from a PNG file, and when turning an `ExPng.Image` into a
saveable image file.
This data can be accessed via the `raw_data` field on an `ExPng.Image` struct,
but users have no need to manipulate this data directly.
"""
@signature <<0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A>>
alias ExPng.Chunks
alias Chunks.{Ancillary, End, Header, ImageData, Palette, Transparency}
@type t :: %__MODULE__{
header_chunk: Header.t(),
data_chunk: ImageData.t(),
palette_chunk: ExPng.maybe(Palette.t()),
transparency_chunk: ExPng.maybe(Transparency.t()),
ancillary_chunks: ExPng.maybe([Ancillary.t()]),
end_chunk: End.t()
}
defstruct [
:header_chunk,
:data_chunk,
:palette_chunk,
:transparency_chunk,
:ancillary_chunks,
:end_chunk
]
@doc false
def from_file(filename) do
with {:ok, @signature <> data} <- File.read(filename),
{:ok, header_chunk, data} <- parse_ihdr(data),
{:ok, chunks} <- parse_chunks(data, []),
{:ok, raw_data} <- from_chunks(header_chunk, chunks) do
{:ok, raw_data}
else
# file cannot be read
{:error, error} -> {:error, error, filename}
# file read, but cannot be parsed as a valid PNG
{:ok, _data} -> {:error, "malformed PNG signature", filename}
# error during parsing the PNG data
{:error, error, _data} -> {:error, error, filename}
end
end
@doc false
def from_binary(pngdata) do
with @signature <> data <- pngdata,
{:ok, header_chunk, data} <- parse_ihdr(data),
{:ok, chunks} <- parse_chunks(data, []),
{:ok, raw_data} <- from_chunks(header_chunk, chunks) do
{:ok, raw_data}
else
# data cannot be parsed as a valid PNG
{:ok, _data} -> {:error, "malformed PNG signature", pngdata}
# error during parsing the PNG data
{:error, error, _data} -> {:error, error, pngdata}
{:error, error} -> {:error, error, pngdata}
end
end
@doc false
def to_file(%__MODULE__{} = raw_data, filename, encoding_options \\ []) do
image_data = ImageData.to_bytes(raw_data.data_chunk, encoding_options)
palette_data =
case raw_data.palette_chunk do
nil -> ""
palette -> Palette.to_bytes(palette)
end
transparency_data =
case raw_data.transparency_chunk do
nil -> ""
transparency -> Transparency.to_bytes(transparency)
end
data =
@signature <>
Header.to_bytes(raw_data.header_chunk) <>
palette_data <>
transparency_data <>
image_data <>
End.to_bytes(raw_data.end_chunk)
File.write(filename, data)
end
@doc false
def to_binary(%__MODULE__{} = raw_data, encoding_options \\ []) do
image_data = ImageData.to_bytes(raw_data.data_chunk, encoding_options)
palette_data =
case raw_data.palette_chunk do
nil -> ""
palette -> Palette.to_bytes(palette)
end
transparency_data =
case raw_data.transparency_chunk do
nil -> ""
transparency -> Transparency.to_bytes(transparency)
end
data =
@signature <>
Header.to_bytes(raw_data.header_chunk) <>
palette_data <>
transparency_data <>
image_data <>
End.to_bytes(raw_data.end_chunk)
data
end
## PRIVATE
defp parse_ihdr(
<<13::32, "IHDR"::bytes, header_data::bytes-size(13), crc::32, rest::binary>> = data
) do
case validate_crc("IHDR", header_data, crc) do
true ->
case Chunks.from_type(:IHDR, header_data) do
{:ok, header_chunk} -> {:ok, header_chunk, rest}
error -> error
end
false ->
{:error, "malformed IHDR", data}
end
end
defp parse_ihdr(data), do: {:error, "malformed IHDR", data}
defp parse_chunks(_, [%End{} | _] = chunks), do: {:ok, Enum.reverse(chunks)}
defp parse_chunks(
<<size::32, type::bytes-size(4), chunk_data::bytes-size(size), crc::32, rest::binary>> =
data,
chunks
) do
case validate_crc(type, chunk_data, crc) do
true ->
with {:ok, new_chunk} <- Chunks.from_type(String.to_atom(type), chunk_data) do
parse_chunks(rest, [new_chunk | chunks])
end
false ->
{:error, "malformed #{type}", data}
end
end
defp from_chunks(header_chunk, chunks) do
with {:ok, data_chunks, chunks} <- find_image_data(chunks),
{:ok, end_chunk, chunks} <- find_end(chunks),
{:ok, transparency, chunks} <- find_transparency(chunks),
{:ok, palette, chunks} <- find_palette(chunks, header_chunk) do
{:ok, transparency} = Transparency.parse_data(transparency, header_chunk)
palette = Palette.parse_data(palette, transparency, header_chunk)
{
:ok,
%__MODULE__{
header_chunk: header_chunk,
data_chunk: ImageData.merge(data_chunks),
end_chunk: end_chunk,
palette_chunk: palette,
transparency_chunk: transparency,
ancillary_chunks: chunks
}
}
else
{:error, error} -> {:error, error}
end
end
defp find_image_data(chunks) do
case Enum.split_with(chunks, &(&1.type == :IDAT)) do
{[], _} -> {:error, "missing IDAT chunks"}
{image_data, chunks} -> {:ok, image_data, chunks}
end
end
defp find_end(chunks) do
case find_chunk(chunks, :IEND) do
nil -> {:error, "missing IEND chunk"}
chunk -> {:ok, chunk, Enum.reject(chunks, fn c -> c == chunk end)}
end
end
defp find_transparency(chunks) do
with transparency_chunk <- find_chunk(chunks, :tRNS) do
{:ok, transparency_chunk, Enum.reject(chunks, fn chunk -> chunk == transparency_chunk end)}
end
end
defp find_palette(chunks, %{color_mode: color_mode}) do
case {find_chunk(chunks, :PLTE), color_mode} do
{nil, 3} ->
{:error, "missing PLTE for color type 3"}
{plt, ct} when not is_nil(plt) and ct in [0, 4] ->
{:error, "PLTE present for grayscale image"}
{plt, _} ->
{:ok, plt, Enum.reject(chunks, fn c -> c == plt end)}
end
end
defp find_chunk(chunks, type) do
Enum.find(chunks, &(&1.type == type))
end
defp validate_crc(type, data, crc) do
:erlang.crc32([type, data]) == crc
end
end
|
lib/ex_png/raw_data.ex
| 0.814016 | 0.69438 |
raw_data.ex
|
starcoder
|
defmodule Conduit.Util do
@moduledoc """
Provides utilities to wait for something to happen
"""
@type attempt_function :: (() -> {:error, term} | term | no_return) | (integer() -> {:error, term} | term | no_return)
@doc """
Runs a function until it returns a truthy value.
A timeout can optionally be specified to limit how long a function is attempted.
## Examples
Conduit.Util.wait_until(fn ->
table
|> :ets.lookup(:thing)
|> List.first()
end)
Conduit.Util.wait_until(30_000, fn ->
table
|> :ets.lookup(:thing)
|> List.first()
end)
"""
@spec wait_until(timeout :: integer() | :infinity, attempt_function) :: :ok | {:error, term}
def wait_until(timeout \\ :infinity, fun) when is_function(fun) do
attempts = if(is_number(timeout), do: div(timeout, 10), else: timeout)
retry([backoff_factor: 1, attempts: attempts], fn delay ->
fun
|> is_function(0)
|> if(do: fun.(), else: fun.(delay))
|> case do
falsey when falsey in [nil, false] -> {:error, :timeout}
_ -> :ok
end
end)
end
@doc """
Attempts to run a function and retry's if it fails.
Allows the following options:
## Options
* `attempts` - Number of times to run the function before giving up. (defaults to 3)
* `backoff_factor` - What multiple of the delay should be backoff on each attempt. For
a backoff of 2, on each retry we double the amount of time of the last delay. Set to
1 to use the same delay each retry.
(defaults to 2)
* `jitter` - Size of randomness applied to delay. This is useful to prevent multiple
processes from retrying at the same time. (defaults to 0)
* `delay` - How long to wait between attempts. (defaults to 1000ms)
## Examples
Conduit.Util.retry(fn ->
# thing that sometimes fails
end)
Conduit.Util.retry([attempts: 20, delay: 100], fn ->
# thing that sometimes fails
end)
"""
@default_retry_opts %{
delay: 10,
backoff_factor: 2,
jitter: 0,
max_delay: 1_000,
attempts: 3
}
@spec retry(opts :: Keyword.t(), attempt_function) :: term
def retry(opts \\ [], fun) when is_function(fun) do
opts = Map.merge(@default_retry_opts, Map.new(opts))
sequence()
|> delay(opts.delay, opts.backoff_factor)
|> jitter(opts.jitter)
|> max_delay(opts.max_delay)
|> limit(opts.attempts)
|> attempt(fun)
end
defp sequence do
Stream.iterate(0, &Kernel.+(&1, 1))
end
defp delay(stream, delay, backoff_factor) do
Stream.map(stream, fn
0 -> 0
retries -> delay * :math.pow(backoff_factor, retries)
end)
end
defp jitter(stream, jitter) do
Stream.map(stream, &round(:rand.uniform() * &1 * jitter + &1))
end
defp max_delay(stream, max_delay) do
Stream.map(stream, &min(&1, max_delay))
end
defp limit(stream, :infinity), do: stream
defp limit(stream, attempts) do
Stream.take(stream, attempts)
end
defp attempt(stream, fun) do
Enum.reduce_while(stream, nil, fn
0, _ ->
do_attempt(fun, 0)
delay, _ ->
Process.sleep(delay)
do_attempt(fun, delay)
end)
end
defp do_attempt(fun, delay) do
fun
|> is_function(0)
|> if(do: fun.(), else: fun.(delay))
|> case do
{:error, reason} ->
{:cont, {:error, reason}}
result ->
{:halt, result}
end
catch
:error, reason ->
{:cont, {:error, reason}}
end
end
|
lib/conduit/util.ex
| 0.871119 | 0.563678 |
util.ex
|
starcoder
|
defmodule Ecto.Entity do
@moduledoc """
This module is used to define an entity. An entity is a record with associated
meta data that is persisted to a repository.
Every entity is also a record, that means that you work with entities just
like you would work with records, to set the default values for the record
fields the `default` option is set in the `field` options.
## Example
defmodule User.Entity do
use Ecto.Entity
field :name, :string
field :age, :integer, default: 0
has_many :posts, Post
end
User.Entity.new
#=> User.Entity[]
In the majority of the cases though, an entity is defined inlined in a model:
defmodule User do
use Ecto.Model
queryable "users" do
field :name, :string
field :age, :integer, default: 0
has_many :posts, Post
end
end
User.Entity.new
#=> User.Entity[]
When used, it allows the following options:
* `:model` - Sets the default associated model;
* `:primary_key` - Sets the primary key, if this option is not set a primary
key named *id* of type *integer* will be generated. If
set to `false` no primary key will be generated, to set
a custom primary key give `{ name, type }` to the option.
In addition to the record functionality, Ecto also defines accessors and updater
functions for the primary key will be generated on the entity, specifically
`primary_key/1`, `primary_key/2` and `update_primary_key/2`.
"""
@type t :: Record.t
## API
@doc """
Defines a field on the entity with given name and type, will also create a
record field. If the type is `:virtual` it wont be persisted.
## Options
* `:default` - Sets the default value on the entity and the record;
* `:primary_key` - Sets the field to be the primary key, the default
primary key have to be overridden by setting its name to `nil`;
"""
defmacro field(name, type // :string, opts // []) do
quote do
Ecto.Entity.__field__(__MODULE__, unquote(name), unquote(type), unquote(opts))
end
end
@doc %S"""
Indicates a one-to-many association with another queryable, where this entity
has zero or more records of the queryable structure. The other queryable often
has a `belongs_to` field with the reverse association.
Creates a virtual field called `name`. The association can be accessed via
this field, see `Ecto.Associations.HasMany` for more information. Check the
examples to see how to perform queries on the association and
`Ecto.Query.join/3` for joins.
A `field/2` function will be generated with the name of the field, this
function will update what is loaded on the association. Note that Ecto never
persists associations when an entity is persisted so this function should be
used with care.
## Options
* `:foreign_key` - Sets the foreign key, this should map to a field on the
other entity, defaults to: `:"#{model}_id"`;
* `:primary_key` - Sets the key on the current entity to be used for the
association, defaults to the primary key on the entity;
## Examples
defmodule Post do
queryable "posts" do
has_many :comments, Comment
end
end
# Get all comments for a given post
post = Repo.get(Post, 42)
comments = Repo.all(post.comments)
# The comments can come preloaded on the post record
[post] = Repo.all(from(p in Post, where: p.id == 42, preload: :comments))
post.comments.to_list #=> [ Comment.Entity[...], ... ]
# Or via an association join
[post] = Repo.all(from(p in Post,
where: p.id == 42,
left_join: c in p.comments,
select: assoc(p, c)))
post.comments.to_list #=> [ Comment.Entity[...], ... ]
"""
defmacro has_many(name, queryable, opts // []) do
quote do
Ecto.Entity.__has_many__(__MODULE__, unquote(name), unquote(queryable), unquote(opts))
end
end
@doc %S"""
Indicates a one-to-one association with another queryable, where this entity
has zero or one records of the queryable structure. The other queryable often
has a `belongs_to` field with the reverse association.
Creates a virtual field called `name`. The association can be accessed via
this field, see `Ecto.Associations.HasOne` for more information. Check the
examples to see how to perform queries on the association and
`Ecto.Query.join/3` for joins.
A `field/2` function will be generated with the name of the field, this
function will update what is loaded on the association. Note that Ecto never
persists associations when an entity is persisted so this function should be
used with care.
## Options
* `:foreign_key` - Sets the foreign key, this should map to a field on the
other entity, defaults to: `:"#{model}_id"`;
* `:primary_key` - Sets the key on the current entity to be used for the
association, defaults to the primary key on the entity;
## Examples
defmodule Post do
queryable "posts" do
has_one :permalink, Permalink
end
end
# The permalink can come preloaded on the post record
[post] = Repo.all(from(p in Post, where: p.id == 42, preload: :permalink))
post.permalink.get #=> Permalink.Entity[...]
# Or via an association join
[post] = Repo.all(from(p in Post,
where: p.id == 42,
left_join: pl in p.permalink,
select: assoc(p, pl)))
post.permalink.get #=> Permalink.Entity[...]
"""
defmacro has_one(name, queryable, opts // []) do
quote do
Ecto.Entity.__has_one__(__MODULE__, unquote(name), unquote(queryable), unquote(opts))
end
end
@doc %S"""
Indicates a one-to-one association with another queryable, this entity
belongs to zero or one records of the queryable structure. The other queryable
often has a `has_one` or a `has_many` field with the reverse association.
Compared to `has_one` this association should be used where you would place
the foreign key on a SQL table.
Creates a virtual field called `name`. The association can be accessed via
this field, see `Ecto.Associations.BelongsTo` for more information. Check the
examples to see how to perform queries on the association and
`Ecto.Query.join/3` for joins.
Will generate a foreign key field. A `field/2` function will be generated with
the name of the field, this function will update what is loaded on the
association. Note that Ecto never persists associations when an entity is
persisted so this function should be used with care.
## Options
* `:foreign_key` - Sets the foreign key field name, defaults to:
`:"#{other_entity}_id"`;
* `:primary_key` - Sets the key on the other entity to be used for the
association, defaults to: `:id`;
## Examples
defmodule Comment do
queryable "comments" do
belongs_to :post, Post
end
end
# The post can come preloaded on the comment record
[comment] = Repo.all(from(c in Comment, where: c.id == 42, preload: :post))
comment.post.get #=> Post.Entity[...]
# Or via an association join
[comment] = Repo.all(from(c in Comment,
where: c.id == 42,
left_join: p in c.post,
select: assoc(c, p)))
comment.post.get #=> Post.Entity[...]
"""
defmacro belongs_to(name, queryable, opts // []) do
quote do
Ecto.Entity.__belongs_to__(__MODULE__, unquote(name), unquote(queryable), unquote(opts))
end
end
## Callbacks
@types %w(boolean string integer float binary list datetime interval virtual)a
@doc false
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
import Ecto.Entity
@before_compile Ecto.Entity
@ecto_fields []
@record_fields []
@ecto_primary_key nil
Module.register_attribute(__MODULE__, :ecto_assocs, accumulate: true)
@ecto_model opts[:model]
field(:model, :virtual, default: opts[:model])
case opts[:primary_key] do
nil ->
field(:id, :integer, primary_key: true)
false ->
:ok
{ name, type } ->
field(name, type, primary_key: true)
other ->
raise ArgumentError, message: ":primary_key must be false or { name, type }"
end
end
end
@doc false
defmacro __before_compile__(env) do
mod = env.module
primary_key = Module.get_attribute(mod, :ecto_primary_key)
all_fields = Module.get_attribute(mod, :ecto_fields) |> Enum.reverse
assocs = Module.get_attribute(mod, :ecto_assocs) |> Enum.reverse
record_fields = Module.get_attribute(mod, :record_fields)
Record.deffunctions(record_fields, env)
fields = Enum.filter(all_fields, fn({ _, opts }) -> opts[:type] != :virtual end)
[ ecto_fields(fields),
ecto_assocs(assocs, primary_key, fields),
ecto_primary_key(primary_key),
ecto_helpers(fields, all_fields) ]
end
# TODO: Check that the opts are valid for the given type,
# especially check the default value
@doc false
def __field__(mod, name, type, opts) do
check_type!(type)
fields = Module.get_attribute(mod, :ecto_fields)
if opts[:primary_key] do
if pk = Module.get_attribute(mod, :ecto_primary_key) do
raise ArgumentError, message: "primary key already defined as `#{pk}`"
else
Module.put_attribute(mod, :ecto_primary_key, name)
end
end
clash = Enum.any?(fields, fn({ prev, _ }) -> name == prev end)
if clash do
raise ArgumentError, message: "field `#{name}` was already set on entity"
end
record_fields = Module.get_attribute(mod, :record_fields)
Module.put_attribute(mod, :record_fields, record_fields ++ [{ name, opts[:default] }])
opts = Enum.reduce([:default, :primary_key], opts, &Dict.delete(&2, &1))
Module.put_attribute(mod, :ecto_fields, [{ name, [type: type] ++ opts }|fields])
end
@doc false
def __has_many__(mod, name, queryable, opts) do
check_foreign_key!(mod, opts[:foreign_key])
assoc = Ecto.Associations.HasMany.__assoc__(:new, name, mod)
__field__(mod, :"__#{name}__", :virtual, default: assoc)
opts = [type: :has_many, queryable: queryable] ++ opts
Module.put_attribute(mod, :ecto_assocs, { name, opts })
end
@doc false
def __has_one__(mod, name, queryable, opts) do
check_foreign_key!(mod, opts[:foreign_key])
assoc = Ecto.Associations.HasOne.__assoc__(:new, name, mod)
__field__(mod, :"__#{name}__", :virtual, default: assoc)
opts = [type: :has_one, queryable: queryable] ++ opts
Module.put_attribute(mod, :ecto_assocs, { name, opts })
end
@doc false
def __belongs_to__(mod, name, queryable, opts) do
assoc_name = queryable |> Module.split |> List.last |> String.downcase
opts = opts
|> Keyword.put_new(:primary_key, :id)
|> Keyword.put_new(:foreign_key, :"#{assoc_name}_id")
__field__(mod, opts[:foreign_key], :integer, [])
assoc = Ecto.Associations.BelongsTo.__assoc__(:new, name, mod)
__field__(mod, :"__#{name}__", :virtual, default: assoc)
opts = [type: :belongs_to, queryable: queryable] ++ opts
Module.put_attribute(mod, :ecto_assocs, { name, opts })
end
## Helpers
defp check_type!({ outer, inner }) when is_atom(outer) do
check_type!(outer)
check_type!(inner)
end
defp check_type!(type) do
unless type in @types do
raise ArgumentError, message: "`#{Macro.to_string(type)}` is not a valid field type"
end
end
defp check_foreign_key!(mod, foreign_key) do
model = Module.get_attribute(mod, :ecto_model)
if nil?(model) and nil?(foreign_key) do
raise ArgumentError, message: "need to set `foreign_key` option for " <>
"assocation when model name can't be infered"
end
end
defp ecto_fields(fields) do
quoted = Enum.map(fields, fn({ name, opts }) ->
quote do
def __entity__(:field, unquote(name)), do: unquote(opts)
def __entity__(:field_type, unquote(name)), do: unquote(opts[:type])
end
end)
field_names = Enum.map(fields, &elem(&1, 0))
quoted ++ [ quote do
def __entity__(:field, _), do: nil
def __entity__(:field_type, _), do: nil
def __entity__(:field_names), do: unquote(field_names)
end ]
end
defp ecto_assocs(assocs, primary_key, fields) do
quoted = Enum.map(assocs, fn({ name, opts, }) ->
quote bind_quoted: [name: name, opts: opts, primary_key: primary_key, fields: fields] do
pk = opts[:primary_key] || primary_key
virtual_name = :"__#{name}__"
if nil?(pk) do
raise ArgumentError, message: "need to set `primary_key` option for " <>
"association when entity has no primary key"
end
if opts[:type] in [:has_many, :has_one] do
unless Enum.any?(fields, fn { name, _ } -> pk == name end) do
raise ArgumentError, message: "`primary_key` option on association " <>
"doesn't match any field on the entity"
end
end
refl = Ecto.Associations.create_reflection(opts[:type], name, @ecto_model,
__MODULE__, pk, opts[:queryable], opts[:foreign_key])
def __entity__(:association, unquote(name)) do
unquote(refl |> Macro.escape)
end
# TODO: Simplify this once Elixir 0.11 is out
record_args = quote do: [{unquote(virtual_name), assoc}]
if opts[:type] in [:has_many, :has_one] do
record_args = quote(do: [{unquote(pk), pk}]) ++ record_args
def unquote(name)(__MODULE__[unquote_splicing(record_args)]) do
if nil?(pk) do
raise ArgumentError, message: "cannot access association when its " <>
"primary key is not set on the entity"
end
assoc.__assoc__(:primary_key, pk)
end
else
def unquote(name)(__MODULE__[unquote_splicing(record_args)]) do
assoc
end
end
if opts[:type] == :has_many do
def unquote(name)(value, __MODULE__[unquote_splicing(record_args)])
when is_list(value) do
assoc = assoc.__assoc__(:loaded, value)
__MODULE__[unquote_splicing(record_args)]
end
else
def unquote(name)(value, __MODULE__[unquote_splicing(record_args)])
when is_record(value, unquote(opts[:queryable])) do
assoc = assoc.__assoc__(:loaded, value)
__MODULE__[unquote_splicing(record_args)]
end
end
end
end)
quoted ++ [ quote do
def __entity__(:association, _), do: nil
end ]
end
defp ecto_primary_key(primary_key) do
quote do
def __entity__(:primary_key), do: unquote(primary_key)
if unquote(primary_key) do
def primary_key(record), do: unquote(primary_key)(record)
def primary_key(value, record), do: unquote(primary_key)(value, record)
def update_primary_key(fun, record), do: unquote(:"update_#{primary_key}")(fun, record)
else
def primary_key(_record), do: nil
def primary_key(_value, record), do: record
def update_primary_key(_fun, record), do: record
end
end
end
defp ecto_helpers(fields, all_fields) do
field_names = Enum.map(fields, &elem(&1, 0))
all_field_names = Enum.map(all_fields, &elem(&1, 0))
quote do
# TODO: This can be optimized
def __entity__(:allocate, values) do
zip = Enum.zip(unquote(field_names), values)
__MODULE__.new(zip)
end
def __entity__(:entity_kw, entity, opts // []) do
filter_pk = opts[:primary_key] == false
primary_key = __entity__(:primary_key)
[_module|values] = tuple_to_list(entity)
zipped = Enum.zip(unquote(all_field_names), values)
Enum.filter(zipped, fn { field, _ } ->
__entity__(:field, field) &&
(not filter_pk || (filter_pk && field != primary_key))
end)
end
end
end
end
|
lib/ecto/entity.ex
| 0.88897 | 0.61477 |
entity.ex
|
starcoder
|
defmodule Mix.Tasks.Escript.Build do
use Mix.Task
use Bitwise, only_operators: true
@shortdoc "Builds an escript for the project"
@recursive true
@moduledoc ~S"""
Builds an escript for the project.
An escript is an executable that can be invoked from the
command line. An escript can run on any machine that has
Erlang installed and by default does not require Elixir to
be installed, as Elixir is embedded as part of the escript.
This task guarantees the project and its dependencies are
compiled and packages them inside an escript.
## Command line options
* `--force` - forces compilation regardless of modification times
* `--no-compile` - skips compilation to .beam files
## Configuration
The following option must be specified in your `mix.exs` under `:escript`
key:
* `:main_module` - the module to be invoked once the escript starts.
The module must contain a function named `main/1` that will receive the
command line arguments as binaries.
The remaining options can be specified to further customize the escript:
* `:name` - the name of the generated escript.
Defaults to app name.
* `:path` - the path to write the escript to.
Defaults to app name.
* `:app` - the app to start with the escript.
Defaults to app name. Set it to `nil` if no application should
be started.
* `:embed_elixir` - if `true` embed elixir and its children apps
(`ex_unit`, `mix`, etc.) mentioned in the `:applications` list inside the
`application` function in `mix.exs`.
Defaults to `true`.
* `:shebang` - shebang interpreter directive used to execute the escript.
Defaults to `"#! /usr/bin/env escript\n"`.
* `:comment` - comment line to follow shebang directive in the escript.
Defaults to `""`.
* `:emu_args` - emulator arguments to embed in the escript file.
Defaults to `""`.
## Example
defmodule MyApp.Mixfile do
def project do
[app: :myapp,
version: "0.0.1",
escript: escript]
end
def escript do
[main_module: MyApp.CLI]
end
end
"""
def run(args) do
{opts, _, _} = OptionParser.parse(args, switches: [force: :boolean, compile: :boolean])
# Require the project to be available
Mix.Project.get!
if Keyword.get(opts, :compile, true) do
Mix.Task.run :compile, args
end
escriptize(Mix.Project.config, opts[:force])
end
defp escriptize(project, force) do
escript_opts = project[:escript] || []
script_name = to_string(escript_opts[:name] || project[:app])
filename = escript_opts[:path] || script_name
main = escript_opts[:main_module]
app = Keyword.get(escript_opts, :app, project[:app])
files = project_files()
escript_mod = String.to_atom(Atom.to_string(app) <> "-escript-main")
cond do
!script_name ->
Mix.raise "Could not generate escript, no name given, " <>
"set :name escript option or :app in the project settings"
!main or !Code.ensure_loaded?(main)->
Mix.raise "Could not generate escript, please set :main_module " <>
"in your project configuration (under `:escript` option) to a module that implements main/1"
force || Mix.Utils.stale?(files, [filename]) ->
tuples = gen_main(escript_mod, main, app) ++
to_tuples(files) ++ deps_tuples() ++ embed_tuples(escript_opts)
case :zip.create 'mem', tuples, [:memory] do
{:ok, {'mem', zip}} ->
shebang = escript_opts[:shebang] || "#! /usr/bin/env escript\n"
comment = build_comment(escript_opts[:comment])
emu_args = build_emu_args(escript_opts[:emu_args], escript_mod)
script = IO.iodata_to_binary([shebang, comment, emu_args, zip])
File.mkdir_p!(Path.dirname(filename))
File.write!(filename, script)
set_perms(filename)
{:error, error} ->
Mix.raise "Error creating escript: #{error}"
end
Mix.shell.info "Generated escript #{filename}"
:ok
true ->
:noop
end
end
defp project_files do
get_files(Mix.Project.app_path)
end
defp get_files(app) do
Path.wildcard("#{app}/ebin/*.{app,beam}") ++
(Path.wildcard("#{app}/priv/**/*") |> Enum.filter(&File.regular?/1))
end
defp get_tuples(app) do
get_files(app) |> to_tuples
end
defp to_tuples(files) do
for f <- files do
{String.to_char_list(Path.basename(f)), File.read!(f)}
end
end
defp set_perms(filename) do
stat = File.stat!(filename)
:ok = File.chmod(filename, stat.mode ||| 0o111)
end
defp deps_tuples do
deps = Mix.Dep.loaded(env: Mix.env) || []
Enum.flat_map(deps, fn dep -> get_tuples(dep.opts[:build]) end)
end
defp embed_tuples(escript_opts) do
if Keyword.get(escript_opts, :embed_elixir, true) do
Enum.flat_map [:elixir|extra_apps()], &app_tuples(&1)
else
[]
end
end
defp extra_apps() do
mod = Mix.Project.get!
extra_apps =
if function_exported?(mod, :application, 0) do
mod.application[:applications]
end
Enum.filter(extra_apps || [], &(&1 in [:eex, :ex_unit, :mix, :iex, :logger]))
end
defp app_tuples(app) do
case :code.where_is_file('#{app}.app') do
:non_existing -> Mix.raise "Could not find application #{app}"
file -> get_tuples(Path.dirname(Path.dirname(file)))
end
end
defp build_comment(user_comment) do
"%% #{user_comment}\n"
end
defp build_emu_args(user_args, escript_mod) do
"%%! -escript main #{escript_mod} #{user_args}\n"
end
defp gen_main(name, module, app) do
{:module, ^name, binary, _} =
defmodule name do
@module module
@app app
def main(args) do
case :application.ensure_all_started(:elixir) do
{:ok, _} ->
start_app(@app)
args = Enum.map(args, &List.to_string(&1))
Kernel.CLI.run fn _ -> @module.main(args) end, true
_ ->
io_error "Elixir is not available, aborting."
System.halt(1)
end
end
defp start_app(nil) do
:ok
end
defp start_app(app) do
case :application.ensure_all_started(app) do
{:ok, _} -> :ok
{:error, {app, reason}} ->
io_error "Could not start application #{app}: " <>
Application.format_error(reason)
System.halt(1)
end
end
defp io_error(message) do
IO.puts :stderr, IO.ANSI.format([:red, :bright, message])
end
end
[{'#{name}.beam', binary}]
end
end
|
lib/mix/lib/mix/tasks/escript.build.ex
| 0.772015 | 0.411879 |
escript.build.ex
|
starcoder
|
defmodule Formular do
require Logger
@kernel_functions Formular.DefaultFunctions.kernel_functions()
@kernel_macros Formular.DefaultFunctions.kernel_macros()
@default_eval_options []
@default_max_heap_size :infinity
@default_timeout :infinity
@moduledoc """
A tiny extendable DSL evaluator. It's a wrap around Elixir's `Code.eval_string/3` or `Code.eval_quoted/3`, with the following limitations:
- No calling module functions;
- No calling some functions which can cause VM to exit;
- No sending messages;
- (optional) memory usage limit;
- (optional) execution time limit.
Here's an example using this module to evaluate a discount number against an order struct:
```elixir
iex> discount_formula = ~s"
...> case order do
...> # old books get a big promotion
...> %{book: %{year: year}} when year < 2000 ->
...> 0.5
...>
...> %{book: %{tags: tags}} ->
...> # Elixir books!
...> if ~s{elixir} in tags do
...> 0.9
...> else
...> 1.0
...> end
...>
...> _ ->
...> 1.0
...> end
...> "
...>
...> book_order = %{
...> book: %{
...> title: "Elixir in Action", year: 2019, tags: ["elixir"]
...> }
...> }
...>
...> Formular.eval(discount_formula, [order: book_order])
{:ok, 0.9}
```
The code being evaluated is just a piece of Elixir code, so it can be expressive when describing business rules.
## Literals
```elixir
# number
iex> Formular.eval("1", [])
{:ok, 1} # <- note that it's an integer
# plain string
iex> Formular.eval(~s["some text"], [])
{:ok, "some text"}
# atom
iex> Formular.eval(":foo", [])
{:ok, :foo}
# list
iex> Formular.eval("[:foo, Bar]", [])
{:ok, [:foo, Bar]}
# keyword list
iex> Formular.eval("[a: 1, b: :hi]", [])
{:ok, [a: 1, b: :hi]}
```
## Variables
Variables can be passed within the `binding` parameter.
```elixir
# bound value
iex> Formular.eval("1 + foo", [foo: 42])
{:ok, 43}
```
## Functions in the code
### Kernel functions and macros
Kernel functions and macros are limitedly supported. Only a picked list of them are supported out of the box so that dangerouse functions such as `Kernel.exit/1` will not be invoked.
Supported functions from `Kernel` are:
```elixir
#{inspect(@kernel_functions, pretty: true)}
```
Supported macros from `Kernel` are:
```elixir
#{inspect(@kernel_macros, pretty: true)}
```
Example:
```elixir
# Kernel function
iex> Formular.eval("min(5, 100)", [])
{:ok, 5}
iex> Formular.eval("max(5, 100)", [])
{:ok, 100}
```
### Custom functions
Custom functions can be provided in two ways, either in a binding lambda:
```elixir
# bound function
iex> Formular.eval("1 + add.(-1, 5)", [add: &(&1 + &2)])
{:ok, 5}
```
... or with a context module:
```elixir
iex> defmodule MyContext do
...> def foo() do
...> 42
...> end
...> end
...> Formular.eval("10 + foo", [], context: MyContext)
{:ok, 52}
```
**Directly calling to module functions in the code are disallowed** for security reason. For example:
```elixir
iex> Formular.eval("Map.new", [])
{:error, :no_calling_module_function}
iex> Formular.eval("min(0, :os.system_time())", [])
{:error, :no_calling_module_function}
```
## Evaluating AST instead of plain string code
You may want to use AST instead of string for performance consideration. In this case, an AST can be passed to `eval/3`:
```elixir
iex> "a = b = 10; a * b" |> Code.string_to_quoted!() |> Formular.eval([])
{:ok, 100}
```
...so that you don't have to parse it every time before evaluating it.
## Compiling the code into an Elixir module
Most of the likelihood `Code.eval_*` functions are fast enough for your application. However, compiling to an Elixir module will significantly improve the performance.
Code can be compiled into an Elixir module via `Formular.compile_to_module!/3` function, as the following:
```elixir
iex> code = quote do: min(a, b)
...> compiled = Formular.compile_to_module!(code, MyCompiledMod)
{:module, MyCompiledMod}
...> Formular.eval(compiled, [a: 5, b: 15], timeout: 5_000)
{:ok, 5}
```
Alternatively, you can directly call `MyCompiledMod.run(a: 5, b: 15)`
when none limitation of CPU or memory will apply.
## Limiting execution time
The execution time can be limited with the `:timeout` option:
```elixir
iex> sleep = fn -> :timer.sleep(:infinity) end
...> Formular.eval("sleep.()", [sleep: sleep], timeout: 10)
{:error, :timeout}
```
Default timeout is 5_000 milliseconds.
## Limiting heap usage
The evaluation can also be limited in heap size, with `:max_heap_size` option. When the limit is exceeded, an error `{:error, :killed}` will be returned.
Example:
```elixir
iex> code = "for a <- 0..999_999_999_999, do: to_string(a)"
...> Formular.eval(code, [], timeout: :infinity, max_heap_size: 1_000)
{:error, :killed}
```
The default max heap size is 1_000_000 words.
"""
@supervisor Formular.Tasks
@type code :: binary() | Macro.t() | {:module, module()}
@type option ::
{:context, context()}
| {:max_heap_size, non_neg_integer() | :infinity}
| {:timeout, non_neg_integer() | :infinity}
@type context :: module()
@type options :: [option()]
@type eval_result :: {:ok, term()} | {:error, term()}
@doc """
Evaluate the code with binding context.
## Parameters
- `code` : code to eval. Could be a binary, or parsed AST.
- `binding` : the variable binding to support the evaluation
- `options` : current these options are supported:
- `context` : The module to import before evaluation.
- `timeout` : A timer used to terminate the evaluation after x milliseconds. `#{@default_timeout}` milliseconds by default.
- `max_heap_size` : A limit on heap memory usage. If set to zero, the max heap size limit is disabled. `#{@default_max_heap_size}` words by default.
## Examples
```elixir
iex> Formular.eval("1", [])
{:ok, 1}
iex> Formular.eval(~s["some text"], [])
{:ok, "some text"}
iex> Formular.eval("min(5, 100)", [])
{:ok, 5}
iex> Formular.eval("max(5, 100)", [])
{:ok, 100}
iex> Formular.eval("count * 5", [count: 6])
{:ok, 30}
iex> Formular.eval("add.(1, 2)", [add: &(&1 + &2)])
{:ok, 3}
iex> Formular.eval("Map.new", [])
{:error, :no_calling_module_function}
iex> Formular.eval("Enum.count([1])", [])
{:error, :no_calling_module_function}
iex> Formular.eval("min(0, :os.system_time())", [])
{:error, :no_calling_module_function}
iex> Formular.eval("inspect.(System.A)", [inspect: &Kernel.inspect/1])
{:ok, "System.A"}
iex> Formular.eval "f = &IO.inspect/1", []
{:error, :no_calling_module_function}
iex> Formular.eval("mod = IO; mod.inspect(1)", [])
{:error, :no_calling_module_function}
iex> "a = b = 10; a * b" |> Code.string_to_quoted!() |> Formular.eval([])
{:ok, 100}
```
"""
@spec eval(code, binding :: keyword(), options()) :: eval_result()
def eval(code, binding, opts \\ @default_eval_options)
def eval({:module, mod}, binding, opts),
do: spawn_and_exec(fn -> {:ok, mod.run(binding)} end, opts)
def eval(text, binding, opts) when is_binary(text) do
with {:ok, ast} <- Code.string_to_quoted(text) do
eval_ast(ast, binding, opts)
end
end
def eval(ast, binding, opts),
do: eval_ast(ast, binding, opts)
defp eval_ast(ast, binding, opts) do
with :ok <- valid?(ast) do
spawn_and_exec(
fn -> do_eval(ast, binding, opts[:context]) end,
opts
)
end
end
defp spawn_and_exec(fun, opts) do
timeout = Keyword.get(opts, :timeout, @default_timeout)
max_heap_size = Keyword.get(opts, :max_heap_size, @default_max_heap_size)
case {timeout, max_heap_size} do
{:infinity, :infinity} ->
fun.()
_ ->
{pid, ref} = spawn_task(fun, max_heap_size)
receive do
{:result, ret} ->
Process.demonitor(ref, [:flush])
ret
{:DOWN, ^ref, :process, ^pid, reason} ->
Logger.error("Evaluating process killed, reason: #{inspect(reason)}")
{:error, :killed}
after
timeout ->
Process.demonitor(ref, [:flush])
:ok = Task.Supervisor.terminate_child(@supervisor, pid)
{:error, :timeout}
end
end
end
defp spawn_task(fun, max_heap_size) do
parent = self()
{:ok, pid} =
Task.Supervisor.start_child(
@supervisor,
fn ->
if max_heap_size != :infinity do
Process.flag(:max_heap_size, max_heap_size)
end
ret = fun.()
send(parent, {:result, ret})
end
)
ref = Process.monitor(pid)
{pid, ref}
end
defp do_eval(ast, binding, context) do
{ret, _binding} =
ast
|> Code.eval_quoted(
binding,
functions: imported_functions(context),
macros: imported_macros(context),
requires: [Elixir.Kernel]
)
{:ok, ret}
rescue
err ->
{:error, err}
end
defp imported_functions(nil),
do: [{Elixir.Kernel, @kernel_functions}]
defp imported_functions(mod) when is_atom(mod),
do: [
{mod, mod.__info__(:functions)},
{Elixir.Kernel, @kernel_functions}
]
defp imported_macros(nil),
do: [{Elixir.Kernel, @kernel_macros}]
defp imported_macros(mod) when is_atom(mod),
do: [
{mod, mod.__info__(:macros)},
{Elixir.Kernel, @kernel_macros}
]
defp valid?(ast) do
# credo:disable-for-next-line
case check_rules(ast) do
false ->
:ok
ret ->
{:error, ret}
end
end
defp check_rules({:., _pos, [Access, :get]}),
do: false
defp check_rules({:., _pos, [_callee, func]}) when is_atom(func),
do: :no_calling_module_function
defp check_rules({:import, _pos, [_ | _]}),
do: :no_import_or_require
defp check_rules({:require, _pos, [_]}),
do: :no_import_or_require
defp check_rules({op, _pos, args}),
do: check_rules(op) || check_rules(args)
defp check_rules([]),
do: false
defp check_rules([ast | rest]),
do: check_rules(ast) || check_rules(rest)
defp check_rules(_),
do: false
@doc """
Compile the code into an Elixir module function.
"""
@spec compile_to_module!(code(), module(), context()) :: {:module, module()}
def compile_to_module!(code, mod, context \\ nil)
def compile_to_module!(code, mod, context) when is_binary(code),
do: code |> Code.string_to_quoted!() |> compile_to_module!(mod, context)
def compile_to_module!(ast, mod, context) do
with :ok <- valid?(ast) do
env = %Macro.Env{
functions: imported_functions(context),
macros: imported_macros(context),
requires: [Elixir.Kernel]
}
Formular.Compiler.create_module(mod, ast, env)
end
end
@doc """
Returns used variables in the code. This can be helpful if
you intend to build some UI based on the variables, or to
validate if the code is using variables within the allowed
list.
## Example
```elixir
iex> code = "f.(a + b)"
...> Formular.used_vars(code)
[:a, :b, :f]
```
"""
@spec used_vars(code()) :: [atom()]
def used_vars(code) when is_binary(code),
do: code |> Code.string_to_quoted!() |> used_vars()
def used_vars(code),
do: Formular.Compiler.extract_vars(code)
end
|
lib/formular.ex
| 0.891599 | 0.84241 |
formular.ex
|
starcoder
|
defmodule Sanbase.Signal do
@moduledoc """
Dispatch module used for fetching signals.
As there is a single signal adapter now, the dispatching is done directly.
After a second signals source is introduced, a dispatching logic similar
to the one found in Sanbase.Metric should be implemented.
"""
alias Sanbase.Signal.SignalAdapter
alias Sanbase.Signal.Behaviour, as: Type
@type datetime :: DateTime.t()
@type signal :: Type.signal()
@type signals :: :all | list(signal)
@type aggregation :: Type.aggregation()
@type interval :: Type.interval()
@type selector :: Type.selector()
@type raw_signals_selector :: :all | selector()
@type opts :: Keyword.t()
@spec has_signal?(signal) :: true | {:error, String.t()}
def has_signal?(signal), do: SignalAdapter.has_signal?(signal)
@doc ~s"""
Get available aggregations
"""
@spec available_aggregations() :: list(aggregation)
def available_aggregations(), do: SignalAdapter.available_aggregations()
@doc ~s"""
Get the human readable name representation of a given signal
"""
@spec human_readable_name(signal) :: {:ok, String.t()}
def human_readable_name(signal), do: {:ok, SignalAdapter.human_readable_name(signal)}
@doc ~s"""
Get a list of the free signals
"""
@spec free_signals() :: list(signal)
def free_signals(), do: SignalAdapter.free_signals()
@doc ~s"""
Get a list of the free signals
"""
@spec restricted_signals() :: list(signal)
def restricted_signals(), do: SignalAdapter.restricted_signals()
@doc ~s"""
Get a map where the key is a signal and the value is its access restriction
"""
@spec access_map() :: map()
def access_map(), do: SignalAdapter.access_map()
@doc ~s"""
Checks if historical data is allowed for a given `signal`
"""
@spec is_historical_data_allowed?(signal) :: boolean
def is_historical_data_allowed?(signal) do
get_in(access_map(), [signal, "historical"]) === :free
end
@doc ~s"""
Checks if realtime data is allowed for a given `signal`
"""
@spec is_realtime_data_allowed?(signal) :: boolean
def is_realtime_data_allowed?(signal) do
get_in(access_map(), [signal, "realtime"]) === :free
end
@doc ~s"""
Get a map where the key is a signal and the value is the min plan it is
accessible in.
"""
@spec min_plan_map() :: map()
def min_plan_map() do
SignalAdapter.min_plan_map()
end
@doc ~s"""
Get all available signals in the json files
"""
@spec available_signals() :: list(signal)
def available_signals() do
SignalAdapter.available_signals()
end
@doc ~s"""
Get all available signals for a given slug selector
"""
@spec available_signals(map()) :: Type.available_signals_result()
def available_signals(selector) do
SignalAdapter.available_signals(selector)
end
@doc ~s"""
Get available signals with timeseries data types
"""
@spec available_timeseries_signals() :: list(signal)
def available_timeseries_signals() do
SignalAdapter.available_timeseries_signals()
end
@doc ~s"""
Get all available slugs for a given signal
"""
@spec available_slugs(signal()) :: Type.available_slugs_result()
def available_slugs(signal) do
SignalAdapter.available_slugs(signal)
end
@doc ~s"""
Get metadata for a given signal
"""
@spec metadata(signal) :: {:ok, Type.metadata()} | {:error, String.t()}
def metadata(signal) do
case SignalAdapter.has_signal?(signal) do
true -> SignalAdapter.metadata(signal)
{:error, error} -> {:error, error}
end
end
@doc ~s"""
Get the first datetime for which a given signal is available for a given slug
"""
@spec first_datetime(signal, map) :: Type.first_datetime_result()
def first_datetime(signal, selector) do
SignalAdapter.first_datetime(signal, selector)
end
@doc ~s"""
Return all or a subset of the raw signals for all assets.
Raw signal means that no aggregation is applied and the values and the metadata
for every signal are returned without combining them with the data of other signals.
If the `signals` argument has the atom value :all, then all available signals
that occured in the given from-to interval are returned.
If the `signals` arguments has a list of signals as a value, then all of those
signals that occured in the given from-to interval are returned.
"""
@spec raw_data(signals, raw_signals_selector, datetime, datetime) :: Type.raw_data_result()
def raw_data(signals, selector, from, to) do
SignalAdapter.raw_data(signals, selector, from, to)
end
@doc ~s"""
Returns timeseries data (pairs of datetime and float value) for a given set
of arguments.
Get a given signal for an interval and time range. The signal's aggregation
function can be changed by providing the :aggregation key in the last argument.
If no aggregation is provided, a default one will be used (currently COUNT).
"""
@spec timeseries_data(signal, selector, datetime, datetime, interval, opts) ::
Type.timeseries_data_result()
def timeseries_data(signal, selector, from, to, interval, opts) do
SignalAdapter.timeseries_data(signal, selector, from, to, interval, opts)
end
@doc ~s"""
Get the aggregated value for a signal, an selector and time range.
The signal's aggregation function can be changed by the last optional parameter.
If no aggregation is provided, a default one will be used (currently COUNT).
"""
@spec aggregated_timeseries_data(signal, selector, datetime, datetime, opts) ::
Type.aggregated_timeseries_data_result()
def aggregated_timeseries_data(signal, selector, from, to, opts) do
SignalAdapter.aggregated_timeseries_data(signal, selector, from, to, opts)
end
end
|
lib/sanbase/signal/signal.ex
| 0.867204 | 0.715225 |
signal.ex
|
starcoder
|
defmodule Remit.Commit do
use Remit, :schema
schema "commits" do
field :sha, :string
field :usernames, {:array, :string}, default: []
field :owner, :string
field :repo, :string
field :message, :string
field :committed_at, :utc_datetime_usec
field :url, :string
field :payload, :map
field :unlisted, :boolean
field :review_started_at, :utc_datetime_usec
field :reviewed_at, :utc_datetime_usec
field :review_started_by_username, :string
field :reviewed_by_username, :string
field :date_separator_before, :date, virtual: true
timestamps()
end
def latest_listed(q \\ __MODULE__, count), do: q |> latest(count) |> listed()
def latest(q \\ __MODULE__, count), do: from q, limit: ^count, order_by: [desc: :id]
def listed(q \\ __MODULE__), do: from q, where: [unlisted: false]
def authored_by?(_commit, nil), do: false
def authored_by?(commit, username), do: commit.usernames |> Enum.map(&String.downcase/1) |> Enum.member?(String.downcase(username))
def being_reviewed_by?(%Commit{review_started_by_username: username, reviewed_at: nil}, username) when not is_nil(username), do: true
def being_reviewed_by?(_, _), do: false
def oldest_unreviewed_for(_commits, nil), do: nil
def oldest_unreviewed_for(commits, username) do
commits
|> Enum.reverse()
|> Enum.find(& !&1.reviewed_at && !authored_by?(&1, username) && (being_reviewed_by?(&1, username) || !&1.review_started_at))
end
@overlong_in_review_over_minutes 15
@overlong_in_review_over_seconds @overlong_in_review_over_minutes * 60
def oldest_overlong_in_review_by(commits, username, now \\ DateTime.utc_now())
def oldest_overlong_in_review_by(_commits, nil, _now), do: nil
def oldest_overlong_in_review_by(commits, username, now) do
commits
|> Enum.reverse()
|> Enum.find(& being_reviewed_by?(&1, username) && DateTime.diff(now, &1.review_started_at) > @overlong_in_review_over_seconds)
end
def bot?(username), do: String.ends_with?(username, "[bot]")
def botless_username(username), do: String.replace_trailing(username, "[bot]", "")
def message_summary(commit), do: commit.message |> String.split(~r/[\r\n]/) |> hd
def add_date_separators(commits) do
{new_commits, _acc} =
Enum.map_reduce(commits, nil, fn (commit, prev_date) ->
date = Remit.Utils.to_date(commit.committed_at)
separator = if date == prev_date, do: nil, else: date
commit = %{commit | date_separator_before: separator}
{commit, date}
end)
new_commits
end
end
|
lib/remit/commit.ex
| 0.51879 | 0.462534 |
commit.ex
|
starcoder
|
defmodule Automata.Blackboard do
@moduledoc """
A global Blackboard for knowledge representations
Memory and Interaction Protocols
With large trees, we face another challenge: storage. In an ideal world,
each AI would have an entire tree allocated to it, with each behavior having
a persistent amount of storage allocated to it, so that any state necessary
for its functioning would simply always be available. However, assuming
about 100 actors allocated at a time about 60 behaviors in the average tree,
and each behavior taking up about 32 bytes of memory, this gives us about
192K of persistent behavior storage. Clearly, as the tree grows even further
this becomes even more of a memory burden, initially for a platform like
the Xbox.
We can cut down on this burden considerably if we note that in the vast majority
of cases, we are only really interested in a small number of behaviors - those
that are actually running (the current leaf, its parent, it grandparent and so
on up the tree). The obvious optimization to make is to create a small pool of
state memory for each actor divided into chunks corresponding to levels of the
hierarchy. The tree becomes a free-standing static structure (i.e. is not
allocated per actor) and the behaviors themselves become code fragments that
operate on a chunk. (The same sort of memory usage can be obtained in an object
oriented way if parent behavior objects only instantiate their children at the
time that the children are selected. This was the approach taken in [Alt04]).
Our memory usage suddenly becomes far more efficient: 100 actors times 64 bytes
(an upper bound on the amount behavior storage needed) times 4 layers (in the
case of Halo 2), or about 25K. Very importantly, this number only grows with the
maximum depth of the tree, not the number of behaviors.
This leaves us with another problem however, the problem of persistent
behavior state. There are numerous instances in the Halo 2 repertoire
where behaviors are disallowed for a certain amount of time after their
last successful performance (grenade-throwing, for example). In the ideal
world, this information about "last execution time" would be stored in the
persistently allocated grenade behavior. However, as that storage in the
above scheme is only temporarily allocated, we need somewhere else to
store the persistent behavior data.
There is an even worse example - what about per-target persistent behavior
state? Consider the search behavior. Search would like to indicate when it
fails in its operation on a particular target. This lets the actor know to
forget about that target and concentrate its efforts elsewhere. However,
this doesn't preclude the actor going and searching for a different target -
so the behavior cannot simply be turned off once it has failed.
Memory - in the psychological sense of stored information on past actions
and events, not in the sense of RAM - presents a problem that is inherent to
the tree structure. The solution in any world besides the ideal one is to
create a memory pool - or a number of memory pools - outside the tree to act
as its storage proxy.
When we consider our memory needs more generally, we can quickly distinguish
at least four different categories:
Per-behavior (persistent): grenade throws, recent vehicle actions
Per-behavior (short-term): state lost when the behavior finishes
Per-object: perception information, last seen position, last seen orientation
Per-object per-behavior: last-meleed time, search failures, pathfinding-to failures
"""
defmacro __using__(_automaton_config) do
end
end
|
lib/automata/knowledge/blackboards/automata_blackboard.ex
| 0.807271 | 0.835953 |
automata_blackboard.ex
|
starcoder
|
defmodule Race do
@moduledoc """
Simulates a race.
This is meant to be run in IEx in a console which supports printing of
UTF-8 characters.
"""
alias Race.Arena
@racer_count 10
@refresh_delay 50
@goal_line 80
@doc ~S"""
Starts a new race.
## Examples
iex> Race.start_race
With options:
iex> Race.start_race [racers: 20, goal: 10]
## Options
| Option | Description | Default value |
| ------------ | ------------------------------------ | -------------:|
| **`racers`** | Number of `racer`s in the race. | 10 |
| **`delay`** | Delay between updates in ms. | 50 |
| **`goal`** | Number of columns to the goal line. | 80 |
"""
def start_race(opts \\ []) do
racers = Keyword.get(opts, :racers, @racer_count)
delay = Keyword.get(opts, :delay, @refresh_delay)
goal_line = Keyword.get(opts, :goal, @goal_line)
{:ok, arena} = Arena.open
Arena.add_racers(arena, racers)
draw(arena, delay, goal_line)
end
defp draw(arena, delay, goal_line) do
:timer.sleep delay
refresh(get_positions(arena, goal_line), goal_line)
Arena.update(arena)
case Arena.check_winners(arena, goal_line) do
[] -> draw(arena, delay, goal_line)
[winner] -> IO.puts "The winner is: " <>
get_racer_char(winner + 1)<>
" !"
winners -> Enum.map(winners, &(&1 + 1))
|> Enum.map(&get_racer_char/1)
|> Enum.join(", ")
|> (fn ws -> IO.puts "The winners are: #{ws} !" end).()
end
end
defp get_positions(arena, goal_line) do
Arena.get_positions(arena)
|> Enum.map(fn p -> goal_line - p end) # should run right -> left
|> Enum.with_index
|> Enum.map(fn {pos, idx} -> {pos, idx+1} end)
end
defp refresh(arena, goal_line) do
IEx.Helpers.clear
IO.puts generate_arena(arena, goal_line)
end
# box outline chars
@horizontal << 0x2501 :: utf8 >>
@vertical << 0x2503 :: utf8 >>
@topleft << 0x250f :: utf8 >>
@topright << 0x2513 :: utf8 >>
@bottomright << 0x251b :: utf8 >>
@bottomleft << 0x2517 :: utf8 >>
@init_racer 0x1f420
defp generate_arena(positions, goal_line) do
line_end = goal_line + 1
max_x = goal_line
max_y = length(positions) + 1
for y <- 0..max_y, x <- 0..line_end do
if {x, y} in positions do
get_racer_char(y)
else
case {x, y} do
{^line_end, _}
-> "\n"
{0 , 0 } -> @topleft
{0 , ^max_y} -> @bottomleft
{^max_x, 0 } -> @topright
{^max_x, ^max_y} -> @bottomright
{ _ , 0 } -> @horizontal
{ _ , ^max_y} -> @horizontal
{0 , _ } -> @vertical
{^max_x, _ } -> @vertical
_ -> " "
end
end
end
end
defp get_racer_char(char_offset) do
<< @init_racer + char_offset :: utf8 >>
end
end
|
lib/race.ex
| 0.670608 | 0.499756 |
race.ex
|
starcoder
|
defmodule Fly.RPC do
@moduledoc """
Provides an RPC interface for executing an MFA on a node within a region.
## Configuration
Assumes each node is running the `Fly.RPC` server in its supervision tree and
exports `FLY_REGION` environment variable to identify the fly region.
To run code on a specific region call `rpc_region/4`. A node found within the
given region will be chosen at random. Raises if no nodes exist on the given
region.
The special `:primary` region may be passed to run the rpc against the region
identified by the `PRIMARY_REGION` environment variable.
## Examples
> rpc_region("hkg", String, :upcase, ["fly"])
"FLY"
> rpc_region(Fly.primary_region(), String, :upcase, ["fly"])
"FLY"
> rpc_region(:primary, String, :upcase, ["fly"])
"FLY"
## Server
The GenServer's responsibility is just to monitor other nodes as they enter
and leave the cluster. It maintains a list of nodes and the Fly.io region
where they are deployed in an ETS table that other processes can use to find
and initiate their own RPC calls to.
"""
use GenServer
require Logger
@tab :fly_regions
def start_link(opts) do
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
end
@doc """
Returns the Elixir OTP nodes registered the region. Reads from a local cache.
"""
def region_nodes(tab \\ @tab, region) do
case :ets.lookup(tab, region) do
[{^region, nodes}] -> nodes
[] -> []
end
end
@doc """
Asks a node what Fly region it's running in.
Returns `:error` if RPC is not supported on remote node.
"""
def region(node) do
if is_rpc_supported?(node) do
{:ok, rpc(node, Fly, :my_region, [])}
else
Logger.info("Detected Fly RPC support is not available on node #{inspect(node)}")
:error
end
end
def rpc_region(region, module, func, args, opts \\ [])
def rpc_region(:primary, module, func, args, opts) do
rpc_region(Fly.primary_region(), module, func, args, opts)
end
def rpc_region(region, module, func, args, opts) when is_binary(region) do
if region == Fly.my_region() do
apply(module, func, args)
else
timeout = Keyword.get(opts, :timeout, 5_000)
available_nodes = region_nodes(region)
if Enum.empty?(available_nodes),
do: raise(ArgumentError, "no node found running in region #{inspect(region)}")
node = Enum.random(available_nodes)
rpc(node, module, func, args, timeout)
end
end
@doc """
Executes the function on the remote node and waits for the response.
Exits after `timeout` milliseconds.
"""
def rpc(node, module, func, args, timeout \\ 5000) do
verbose_log(:info, fn ->
"Starting RPC from #{Fly.my_region()} for #{Fly.mfa_string(module, func, args)}"
end)
caller = self()
ref = make_ref()
# Perform the RPC call to the remote node and wait for the response
_pid =
Node.spawn_link(node, __MODULE__, :__local_rpc__, [
[caller, ref, module, func | args]
])
receive do
{^ref, result} ->
verbose_log(:info, fn ->
"RECEIVED RPC in #{Fly.my_region()} for #{Fly.mfa_string(module, func, args)}"
end)
result
after
timeout ->
verbose_log(:error, fn ->
"TIMEOUT for RPC in #{Fly.my_region()} calling #{Fly.mfa_string(module, func, args)}"
end)
exit(:timeout)
end
end
@doc false
# Private function that can be executed on a remote node in the cluster. Used
# to execute arbitrary function from a trusted caller.
def __local_rpc__([caller, ref, module, func | args]) do
result = apply(module, func, args)
send(caller, {ref, result})
end
@doc """
Executes a function on the remote node to determine if the RPC API support is
available.
Support may not exist on the remote node in a "first roll out" scenario.
"""
def is_rpc_supported?(node) do
# note: use :erpc.call once erlang 23+ is reauired
case :rpc.call(node, Kernel, :function_exported?, [Fly, :my_region, 0], 5000) do
result when is_boolean(result) ->
result
{:badrpc, reason} ->
Logger.warn("Failed RPC supported test on node #{inspect(node)}, got: #{inspect(reason)}")
false
end
end
## RPC calls run on local node
def init(_opts) do
tab = :ets.new(@tab, [:named_table, :public, read_concurrency: true])
# monitor new node up/down activity
:global_group.monitor_nodes(true)
{:ok, %{nodes: MapSet.new(), tab: tab}, {:continue, :get_node_regions}}
end
def handle_continue(:get_node_regions, state) do
new_state =
Enum.reduce(Node.list(), state, fn node_name, acc ->
put_node(acc, node_name)
end)
{:noreply, new_state}
end
def handle_info({:nodeup, node_name}, state) do
Logger.debug("nodeup #{node_name}")
# Only react/track visible nodes (hidden ones are for IEx, etc)
new_state =
if node_name in Node.list(:visible) do
put_node(state, node_name)
else
state
end
{:noreply, new_state}
end
def handle_info({:nodedown, node_name}, state) do
Logger.debug("nodedown #{node_name}")
{:noreply, drop_node(state, node_name)}
end
# Executed when a new node shows up in the cluster. Asks the node what region
# it's running in. If the request isn't supported by the node, do nothing.
# This happens when this node is the first node with this new code. It reaches
# out to the other nodes (they show up as having just appeared) but they don't
# yet have the new code. So this ignores that node until it gets new code,
# restarts and will then again show up as a new node.
@doc false
def put_node(state, node_name) do
case region(node_name) do
{:ok, region} ->
Logger.info("Discovered node #{inspect(node_name)} in region #{region}")
region_nodes = region_nodes(state.tab, region)
:ets.insert(state.tab, {region, [node_name | region_nodes]})
%{state | nodes: MapSet.put(state.nodes, {node_name, region})}
:error ->
state
end
end
@doc false
def drop_node(state, node_name) do
# find the node information for the node going down.
case get_node(state, node_name) do
{^node_name, region} ->
Logger.info("Dropping node #{inspect(node_name)} for region #{region}")
# get the list of nodes currently registered in that region
region_nodes = region_nodes(state.tab, region)
# Remove the node from the known regions and update the local cache
new_regions = Enum.reject(region_nodes, fn n -> n == node_name end)
:ets.insert(state.tab, {region, new_regions})
# Remove the node entry from the GenServer's state
new_nodes =
Enum.reduce(state.nodes, state.nodes, fn
{^node_name, ^region}, acc -> MapSet.delete(acc, {node_name, region})
{_node, _region}, acc -> acc
end)
# Return the new state
%{state | nodes: new_nodes}
# Node is not known to us. Ignore it.
nil ->
state
end
end
defp get_node(state, name) do
Enum.find(state.nodes, fn {n, _region} -> n == name end)
end
defp verbose_log(kind, func) do
if Application.get_env(:fly_rpc, :verbose_logging) do
Logger.log(kind, func)
end
end
end
|
fly_rpc/lib/fly_rpc.ex
| 0.802788 | 0.615666 |
fly_rpc.ex
|
starcoder
|
defmodule Syslog.Parser do
@moduledoc """
Module to handle the parsing of various acceptable forms of a Syslog message
as described in [RFC3164](https://tools.ietf.org/html/rfc3164) and
[RFC5426](https://tools.ietf.org/html/rfc5426). Additionally, various other
forms of syslog messages are accepted, as found "in the wild".
Improperly formatted Syslog messages are normalized while parsing, so that
the output `Syslog.Entry` returned is completely populated. The methods
for normalization are handled in the manner described in the RFCs above.
The parser also handles a special form of structured data content, which is
a form that pre-dates [RFC5424](https://tools.ietf.org/html/rfc5424). All
key-value pairs found inside a message
are extracted in to the `Syslog.Entry` returned, under the `kvps` Map
field.
"""
@doc """
Parses a binary blob containing a Syslog Message.
Returns `Syslog.Entry`.
## Examples
iex> Syslog.Parser.parse('<12>Jul 7 16:05:00.12312 myhostname tag[123]: a message')
%Syslog.Entry{arrival_datetime: nil, datetime: ~N[2017-07-07 16:05:00.123120],
facility: :user, hostname: "myhostname", ip: nil, kvps: %{},
message: "a message", pid: "123", port: nil, priority: 12, process: "tag",
severity: :warn}
"""
@spec parse(binary()) :: Syslog.Entry.t
def parse(raw) do
state = %{priority: [], datetime: [], hostname: nil, tag: nil, process: nil, pid: nil, kvps: %{}}
{:ok, message, state} =
with {raw, state} <- parse_priority(raw, state),
{raw, state} <- parse_datetime(raw, state),
{raw, state} <- parse_host_and_tag(raw, state),
{raw, state} <- parse_kw(raw, state),
do: {:ok, raw, state}
# Priority is two components, split them
{priority, _} =
state.priority
|> to_string
|> Integer.parse
{facility, severity} = {decode_facility(div(priority, 8)),
decode_severity(rem(priority, 8))}
%Syslog.Entry{datetime: state.datetime,
priority: priority,
facility: facility,
severity: severity,
hostname: state.hostname,
process: state.process,
pid: state.pid,
message: message,
kvps: state.kvps}
end
@spec decode_facility(integer) :: atom
# credo:disable-for-next-line
defp decode_facility(facility) do
case facility do
0 -> :kern
1 -> :user
2 -> :mail
3 -> :system
4 -> :auth
5 -> :internal
6 -> :lpr
7 -> :nns
8 -> :uucp
9 -> :clock
10 -> :authpriv
11 -> :ftp
12 -> :ntp
13 -> :audit
14 -> :alert
15 -> :clock2 # ?
16 -> :local0
17 -> :local1
18 -> :local2
19 -> :local3
20 -> :local4
21 -> :local5
22 -> :local6
23 -> :local7
_ -> :undefined
end
end
@spec decode_severity(integer) :: atom
# credo:disable-for-next-line
defp decode_severity(severity) do
case severity do
0 -> :emerg
1 -> :alert
2 -> :crit
3 -> :err
4 -> :warn
5 -> :notice
6 -> :info
7 -> :debug
_ -> :undefined
end
end
@spec parse_priority(list(binary()), map()) :: {nonempty_maybe_improper_list(), map()}
defp parse_priority([h | t], state) do
case h do
?< -> parse_priority_1(t, state)
_ -> raise ArgumentError, "invalid priority opening character"
end
end
@spec parse_priority_1(list(binary()), map()) :: {nonempty_maybe_improper_list(), map()}
defp parse_priority_1([h | t], %{priority: p} = state) do
case h do
x when x in ?0..?9 -> parse_priority_1(t, %{state | priority: p ++ [x]})
?> -> {t, state}
end
end
@months ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
@spec parse_datetime(binary, map) :: {nonempty_maybe_improper_list(), map()}
defp parse_datetime(chars, state) do
case Regex.run(~r/(?<month>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s(?|(?:\s)(?<day>[1-9])?|(?<day>[1-9]\d))\s(?<hour>[0-2]\d):(?<minute>[0-5][0-9]):(?<second>[0-5][0-9])(?|(?:\.)(?<fractional_seconds>\d{1,9})?|)\s/iu, to_string(chars)) do
nil -> raise ArgumentError, "invalid date and time formatting"
elements ->
# first is the entire match sequence
continuation = to_charlist(String.slice(to_string(chars), String.length(hd(elements))..-1))
year = NaiveDateTime.utc_now().year
month = Enum.find_index(@months, fn(m) -> m == Enum.at(elements, 1) end) + 1
{day, _} = elements |> Enum.at(2) |> Integer.parse
{hour, _} = elements |> Enum.at(3) |> Integer.parse
{minutes, _} = elements |> Enum.at(4) |> Integer.parse
{seconds, _} = elements |> Enum.at(5) |> Integer.parse
{microseconds, _} =
(Enum.at(elements, 6) || "")
|> String.pad_trailing(6, "0")
|> to_charlist
|> Enum.take(6)
|> to_string
|> Integer.parse
{:ok, dt} = NaiveDateTime.new(year, month, day, hour, minutes, seconds, microseconds)
{continuation, %{state | datetime: dt}}
end
end
@spec parse_host_and_tag(binary, map) :: {[binary], map()}
defp parse_host_and_tag(chars, state) do
elements = chars
|> to_string
|> String.split
# first element could be a hostname
[h | t] = elements
{cont, state} =
case Regex.run(~r/^(?<process>.+)\[(?<pid>\d+)\]:?$/u, h) do
nil -> {true, %{state | hostname: h}}
elements ->
{false, %{state | tag: h, hostname: "localhost", process: Enum.at(elements, 1), pid: Enum.at(elements, 2)}}
end
case cont do
false ->
{t, state}
true ->
[h1 | t1] = t
case Regex.run(~r/^(?<process>.+)\[(?<pid>\d+)\]:?$/u, h1) do
nil -> {t, state}
elements ->
{t1, %{state | tag: h1, process: Enum.at(elements, 1), pid: Enum.at(elements, 2)}}
end
end
end
@spec parse_kw(binary, map) :: {binary, map}
defp parse_kw(chars, state) do
elements =
Regex.scan(~r/\s?(([a-zA-Z0-9]+)=("(?:[^"\\]|\\.)*"|[^ ]+))\s?/,
Enum.join(chars, " "))
state =
Enum.reduce(elements, state, fn(s, state) ->
k = Enum.at(s, 2)
v = Enum.at(s, 3)
%{state | kvps: Map.put(state.kvps, k, v |> String.trim("\""))}
end)
{Enum.join(chars, " "), state}
end
end
|
lib/esyslog/parser.ex
| 0.762866 | 0.624294 |
parser.ex
|
starcoder
|
defmodule BattleBox.Utilities.Graph.AStar do
@iter_limit 2500
defmodule State do
@enforce_keys [:start_loc, :end_loc, :open, :heuristic, :neighbors]
defstruct [
:closed,
:cost_from_start,
:end_loc,
:estimated_cost_to_end,
:heuristic,
:neighbors,
:open,
:start_loc,
came_from: %{},
iteration: 0
]
end
def a_star(start_loc, end_loc, _neighbors, _heuristic) when start_loc == end_loc do
{:ok, [start_loc]}
end
def a_star(start_loc, end_loc, neighbors, heuristic)
when is_function(neighbors, 1) and is_function(heuristic, 2),
do:
do_a_star(%State{
start_loc: start_loc,
end_loc: end_loc,
open: MapSet.new([start_loc]),
closed: MapSet.new(),
neighbors: neighbors,
heuristic: heuristic,
estimated_cost_to_end: %{start_loc => heuristic.(start_loc, end_loc)},
cost_from_start: %{start_loc => 0}
})
defp do_a_star(%State{iteration: iteration}) when iteration > @iter_limit,
do: {:error, :iterations_exceeded}
defp do_a_star(%State{} = state) do
state = update_in(state.iteration, &(&1 + 1))
with {:open_empty?, false} <- {:open_empty?, MapSet.size(state.open) == 0},
best = Enum.min_by(state.open, &state.estimated_cost_to_end[&1]),
{:complete?, false} <- {:complete?, best == state.end_loc} do
neighbors = state.neighbors.(best)
state = update_in(state.open, &MapSet.delete(&1, best))
state = update_in(state.closed, &MapSet.put(&1, best))
neighbors
|> Enum.reject(fn neighbor -> neighbor in state.closed end)
|> Enum.reduce(state, fn neighbor, state ->
state = update_in(state.open, &MapSet.put(&1, neighbor))
candidate_cost = state.cost_from_start[best] + 1
case state.cost_from_start[neighbor] do
better_cost when not is_nil(better_cost) and better_cost <= candidate_cost ->
state
_ ->
estimated_to_end = candidate_cost + state.heuristic.(neighbor, state.end_loc)
state = update_in(state.came_from, &Map.put(&1, neighbor, best))
state = update_in(state.cost_from_start, &Map.put(&1, neighbor, candidate_cost))
update_in(state.estimated_cost_to_end, &Map.put(&1, neighbor, estimated_to_end))
end
end)
|> do_a_star
else
{:open_empty?, true} ->
{:error, :no_path}
{:complete?, true} ->
path = [
state.end_loc
| Stream.unfold({state.end_loc, state.came_from}, fn {current, came_from} ->
if node = came_from[current], do: {node, {node, came_from}}
end)
|> Enum.to_list()
]
{:ok, Enum.reverse(path)}
end
end
end
|
lib/battle_box/utilities/graph/a_star.ex
| 0.556882 | 0.509642 |
a_star.ex
|
starcoder
|
defmodule Bunch.KVEnum do
@moduledoc """
A bunch of helper functions for manipulating key-value enums (including keyword
enums).
Key-value enums are represented as enums of 2-element tuples, where the first
element of each tuple is a key, and the second is a value.
"""
@type t(_key, _value) :: Enumerable.t()
@doc """
Returns all keys from the `enum`.
Duplicated keys appear duplicated in the final enum of keys.
## Examples
iex> #{inspect(__MODULE__)}.keys(a: 1, b: 2)
[:a, :b]
iex> #{inspect(__MODULE__)}.keys(a: 1, b: 2, a: 3)
[:a, :b, :a]
"""
@spec keys(t(key, value)) :: [key] when key: any, value: any
def keys(enum) do
Enum.map(enum, &Bunch.key/1)
end
@doc """
Returns all values from the `enum`.
Values from duplicated keys will be kept in the final enum of values.
## Examples
iex> #{inspect(__MODULE__)}.values(a: 1, b: 2)
[1, 2]
iex> #{inspect(__MODULE__)}.values(a: 1, b: 2, a: 3)
[1, 2, 3]
"""
@spec values(t(key, value)) :: [value] when key: any, value: any
def values(enum) do
Enum.map(enum, &Bunch.value/1)
end
@doc """
Maps keys of `enum` using function `f`.
## Example
iex> #{inspect(__MODULE__)}.map_keys([{1, :a}, {2, :b}], & &1+1)
[{2, :a}, {3, :b}]
"""
@spec map_keys(t(k1, v), (k1 -> k2)) :: t(k2, v) when k1: any, k2: any, v: any
def map_keys(enum, f) do
enum |> Enum.map(fn {key, value} -> {f.(key), value} end)
end
@doc """
Maps values of `enum` using function `f`.
## Example
iex> #{inspect(__MODULE__)}.map_values([a: 1, b: 2], & &1+1)
[a: 2, b: 3]
"""
@spec map_values(t(k, v1), (v1 -> v2)) :: t(k, v2) when k: any, v1: any, v2: any
def map_values(enum, f) do
enum |> Enum.map(fn {key, value} -> {key, f.(value)} end)
end
@doc """
Filters elements of `enum` by keys using function `f`.
## Example
iex> #{inspect(__MODULE__)}.filter_by_keys([a: 1, b: 2, a: 3], & &1 == :a)
[a: 1, a: 3]
"""
@spec filter_by_keys(t(k, v), (k -> as_boolean(term))) :: t(k, v) when k: any, v: any
def filter_by_keys(enum, f) do
enum |> Enum.filter(&apply_to_key(&1, f))
end
@doc """
Filters elements of `enum` by values using function `f`.
## Example
iex> #{inspect(__MODULE__)}.filter_by_values([a: 1, b: 2, a: 3], & &1 |> rem(2) == 0)
[b: 2]
"""
@spec filter_by_values(t(k, v), (v -> as_boolean(term))) :: t(k, v) when k: any, v: any
def filter_by_values(enum, f) do
enum |> Enum.filter(&apply_to_value(&1, f))
end
@doc """
Executes `f` for each key in `enum`.
## Example
iex> #{inspect(__MODULE__)}.each_key([a: 1, b: 2, a: 3], & send(self(), &1))
iex> [:a, :b, :a] |> Enum.each(&receive do ^&1 -> :ok end)
:ok
"""
@spec each_key(t(k, v), (k -> any | no_return)) :: :ok when k: any, v: any
def each_key(enum, f) do
enum |> Enum.each(&apply_to_key(&1, f))
end
@doc """
Executes `f` for each value in `enum`.
## Example
iex> #{inspect(__MODULE__)}.each_value([a: 1, b: 2, a: 3], & send(self(), &1))
iex> 1..3 |> Enum.each(&receive do ^&1 -> :ok end)
:ok
"""
@spec each_value(t(k, v), (v -> any | no_return)) :: :ok when k: any, v: any
def each_value(enum, f) do
enum |> Enum.each(&apply_to_value(&1, f))
end
@doc """
Returns `true` if `f` returns truthy value for any key from `enum`, otherwise `false`.
## Example
iex> #{inspect(__MODULE__)}.any_key?([a: 1, b: 2, a: 3], & &1 == :b)
true
iex> #{inspect(__MODULE__)}.any_key?([a: 1, b: 3, a: 5], & &1 == :c)
false
"""
@spec any_key?(t(k, v), (k -> as_boolean(term))) :: boolean when k: any, v: any
def any_key?(enum, f \\ & &1) do
enum |> Enum.any?(&apply_to_key(&1, f))
end
@doc """
Returns `true` if `f` returns truthy value for any value from `enum`, otherwise `false`.
## Example
iex> #{inspect(__MODULE__)}.any_value?([a: 1, b: 2, a: 3], & &1 |> rem(2) == 0)
true
iex> #{inspect(__MODULE__)}.any_value?([a: 1, b: 3, a: 5], & &1 |> rem(2) == 0)
false
"""
@spec any_value?(t(k, v), (v -> as_boolean(term))) :: boolean when k: any, v: any
def any_value?(enum, f \\ & &1) do
enum |> Enum.any?(&apply_to_value(&1, f))
end
defp apply_to_key({key, _value}, f), do: f.(key)
defp apply_to_value({_key, value}, f), do: f.(value)
end
|
lib/bunch/kv_enum.ex
| 0.909496 | 0.704707 |
kv_enum.ex
|
starcoder
|
defmodule Stump do
@moduledoc """
Stump allows for Maps and Strings to be passed into the Elixir Logger and return logs in the JSON format.
"""
@moduledoc since: "1.6.2"
@doc """
The `log` method formats your given error message whether it be a Map or a String then passes it to Elixirs own Logger.
Usage for the module is as follows:
```
Stump.log(level, message)
```
The level can be any of four `:debug/:info/:warn/:error`
Message can be either a `String` or `Map`
```
Stump.log(:error, 'Error Logged')
{'message':'Error Logged','level':'error','datetime':'2019-03-06T12:21:52.661587Z'}
```
"""
import Logger, only: [log: 2]
def log(level, data, metadata \\ []) when level in [:error, :warn, :info, :debug] do
Logger.log(level, format(level, data), metadata)
end
def metadata(keyword) do
Logger.metadata(keyword)
end
def metadata() do
Map.new(Logger.metadata())
end
defp format(level, data) when data == nil or data == "" do
format(level, "Event Logger received log level, but no error message was provided")
end
defp format(level, %_{} = struct) do
format(level, Map.from_struct(struct))
end
defp format(level, data) when is_map(data) do
data
|> destruct()
|> Map.merge(%{datetime: time(), level: to_string(level), metadata: metadata()})
|> encode()
end
defp format(level, data) when is_bitstring(data) or is_binary(data) do
%{message: data, datetime: time(), level: to_string(level), metadata: metadata()}
|> encode()
end
defp destruct(struct = %_{}) do
struct
|> Map.from_struct()
|> destruct()
end
defp destruct(map) when is_map(map) do
Enum.into(map, %{}, fn {k, v} -> {k, destruct(v)} end)
end
defp destruct(data) when is_tuple(data) do
data
|> Tuple.to_list()
|> destruct()
end
defp destruct(data) when is_list(data) do
Enum.map(data, fn x -> destruct(x) end)
end
defp destruct(data) when is_reference(data) do
"#Ref<>"
end
defp destruct(data) when is_pid(data) do
"#Pid<>"
end
defp destruct(data), do: data
defp encode(map) do
case Jason.encode(map) do
{:ok, value} ->
value
{:error, _} ->
encode(%{
jason_error: "Jason returned an error encoding your log message",
raw_log: Kernel.inspect(map),
datetime: time()
})
end
end
@doc false
def time() do
Application.get_env(:stump, :time_api).utc_now()
end
end
|
lib/stump.ex
| 0.798501 | 0.910346 |
stump.ex
|
starcoder
|
defmodule MapDiff do
@moduledoc """
Documentation for MapDiff.
"""
def diff(a, b) when a == b and is_map(a), do: %{}
def diff(a, b) when a == b and is_list(a), do: []
def diff(a, b) when is_map(a) and is_map(b) do
delta = Enum.reduce(a, %{}, &compare(&1, &2, b))
additions(a, b, delta)
end
def diff(a, b) when is_list(a) and is_list(b) do
delta = [a, b]
|> Enum.zip
|> Enum.reduce([], &compare(&1, &2))
additions(a, b, delta)
end
def diff(a, nil), do: [a, nil]
def diff(a, b) do
raise ArgumentError, message: "MapDiff type changed"
[a, b]
end
def compare({a_key, value_a} = a, delta, b) when is_map(value_a) do
value_b = b |> Map.get(a_key)
changes = diff(value_a, value_b)
put_delta(delta, changes, a_key)
end
def put_delta(delta, result) when result == %{}, do: delta
def put_delta(delta, result, _) when result == [], do: delta
def put_delta(delta, result, key) when is_map(delta) do
delta
|> Map.put(key, result)
end
def put_delta(delta, result) when is_list(delta) do
delta ++ [result]
end
def compare({a_key, value_a} = a, delta, b) when is_list(value_a) do
value_b = b |> Map.get(a_key)
changes = diff(value_a, value_b)
put_delta(delta, changes, a_key)
end
def compare({a, b}, delta) when is_list(delta) do
changes = diff(a, b)
put_delta(delta, changes)
end
def compare({a_key, value_a} = a, delta, b) do
value_b = b |> Map.get(a_key)
if value_b != value_a do
delta
|> Map.put(a_key, [value_a, value_b])
else
delta
end
end
def additions(a, b, delta) when is_map(a) do
Enum.reduce(Map.keys(b) -- Map.keys(a), delta, fn(key, delta) ->
raise ArgumentError, message: "MapDiff additions found in b"
end)
end
def additions(a, b, delta) when is_list(a) do
a_length = (Enum.count(a) - 1)
new_in_b = b
|> Enum.with_index
|> Enum.filter(fn({value, index}) ->
index > a_length
end)
|> Enum.map(fn({value, index}) ->
value
end)
Enum.reduce(new_in_b, delta, fn(key, delta) ->
raise ArgumentError, message: "MapDiff additions found in b"
end)
end
end
|
lib/map_diff.ex
| 0.739611 | 0.583915 |
map_diff.ex
|
starcoder
|
defmodule AWS.Forecast do
@moduledoc """
Provides APIs for creating and managing Amazon Forecast resources.
"""
@doc """
Creates an Amazon Forecast dataset. The information about the dataset that
you provide helps Forecast understand how to consume the data for model
training. This includes the following:
<ul> <li> * `DataFrequency` * - How frequently your historical time-series
data is collected.
</li> <li> * `Domain` * and * `DatasetType` * - Each dataset has an
associated dataset domain and a type within the domain. Amazon Forecast
provides a list of predefined domains and types within each domain. For
each unique dataset domain and type within the domain, Amazon Forecast
requires your data to include a minimum set of predefined fields.
</li> <li> * `Schema` * - A schema specifies the fields in the dataset,
including the field name and data type.
</li> </ul> After creating a dataset, you import your training data into it
and add the dataset to a dataset group. You use the dataset group to create
a predictor. For more information, see `howitworks-datasets-groups`.
To get a list of all your datasets, use the `ListDatasets` operation.
For example Forecast datasets, see the [Amazon Forecast Sample GitHub
repository](https://github.com/aws-samples/amazon-forecast-samples).
<note> The `Status` of a dataset must be `ACTIVE` before you can import
training data. Use the `DescribeDataset` operation to get the status.
</note>
"""
def create_dataset(client, input, options \\ []) do
request(client, "CreateDataset", input, options)
end
@doc """
Creates a dataset group, which holds a collection of related datasets. You
can add datasets to the dataset group when you create the dataset group, or
later by using the `UpdateDatasetGroup` operation.
After creating a dataset group and adding datasets, you use the dataset
group when you create a predictor. For more information, see
`howitworks-datasets-groups`.
To get a list of all your datasets groups, use the `ListDatasetGroups`
operation.
<note> The `Status` of a dataset group must be `ACTIVE` before you can
create use the dataset group to create a predictor. To get the status, use
the `DescribeDatasetGroup` operation.
</note>
"""
def create_dataset_group(client, input, options \\ []) do
request(client, "CreateDatasetGroup", input, options)
end
@doc """
Imports your training data to an Amazon Forecast dataset. You provide the
location of your training data in an Amazon Simple Storage Service (Amazon
S3) bucket and the Amazon Resource Name (ARN) of the dataset that you want
to import the data to.
You must specify a `DataSource` object that includes an AWS Identity and
Access Management (IAM) role that Amazon Forecast can assume to access the
data, as Amazon Forecast makes a copy of your data and processes it in an
internal AWS system. For more information, see `aws-forecast-iam-roles`.
The training data must be in CSV format. The delimiter must be a comma (,).
You can specify the path to a specific CSV file, the S3 bucket, or to a
folder in the S3 bucket. For the latter two cases, Amazon Forecast imports
all files up to the limit of 10,000 files.
Because dataset imports are not aggregated, your most recent dataset import
is the one that is used when training a predictor or generating a forecast.
Make sure that your most recent dataset import contains all of the data you
want to model off of, and not just the new data collected since the
previous import.
To get a list of all your dataset import jobs, filtered by specified
criteria, use the `ListDatasetImportJobs` operation.
"""
def create_dataset_import_job(client, input, options \\ []) do
request(client, "CreateDatasetImportJob", input, options)
end
@doc """
Creates a forecast for each item in the `TARGET_TIME_SERIES` dataset that
was used to train the predictor. This is known as inference. To retrieve
the forecast for a single item at low latency, use the operation. To export
the complete forecast into your Amazon Simple Storage Service (Amazon S3)
bucket, use the `CreateForecastExportJob` operation.
The range of the forecast is determined by the `ForecastHorizon` value,
which you specify in the `CreatePredictor` request. When you query a
forecast, you can request a specific date range within the forecast.
To get a list of all your forecasts, use the `ListForecasts` operation.
<note> The forecasts generated by Amazon Forecast are in the same time zone
as the dataset that was used to create the predictor.
</note> For more information, see `howitworks-forecast`.
<note> The `Status` of the forecast must be `ACTIVE` before you can query
or export the forecast. Use the `DescribeForecast` operation to get the
status.
</note>
"""
def create_forecast(client, input, options \\ []) do
request(client, "CreateForecast", input, options)
end
@doc """
Exports a forecast created by the `CreateForecast` operation to your Amazon
Simple Storage Service (Amazon S3) bucket. The forecast file name will
match the following conventions:
<ForecastExportJobName>_<ExportTimestamp>_<PartNumber>
where the <ExportTimestamp> component is in Java SimpleDateFormat
(yyyy-MM-ddTHH-mm-ssZ).
You must specify a `DataDestination` object that includes an AWS Identity
and Access Management (IAM) role that Amazon Forecast can assume to access
the Amazon S3 bucket. For more information, see `aws-forecast-iam-roles`.
For more information, see `howitworks-forecast`.
To get a list of all your forecast export jobs, use the
`ListForecastExportJobs` operation.
<note> The `Status` of the forecast export job must be `ACTIVE` before you
can access the forecast in your Amazon S3 bucket. To get the status, use
the `DescribeForecastExportJob` operation.
</note>
"""
def create_forecast_export_job(client, input, options \\ []) do
request(client, "CreateForecastExportJob", input, options)
end
@doc """
Creates an Amazon Forecast predictor.
In the request, you provide a dataset group and either specify an algorithm
or let Amazon Forecast choose the algorithm for you using AutoML. If you
specify an algorithm, you also can override algorithm-specific
hyperparameters.
Amazon Forecast uses the chosen algorithm to train a model using the latest
version of the datasets in the specified dataset group. The result is
called a predictor. You then generate a forecast using the `CreateForecast`
operation.
After training a model, the `CreatePredictor` operation also evaluates it.
To see the evaluation metrics, use the `GetAccuracyMetrics` operation.
Always review the evaluation metrics before deciding to use the predictor
to generate a forecast.
Optionally, you can specify a featurization configuration to fill and
aggregate the data fields in the `TARGET_TIME_SERIES` dataset to improve
model training. For more information, see `FeaturizationConfig`.
For RELATED_TIME_SERIES datasets, `CreatePredictor` verifies that the
`DataFrequency` specified when the dataset was created matches the
`ForecastFrequency`. TARGET_TIME_SERIES datasets don't have this
restriction. Amazon Forecast also verifies the delimiter and timestamp
format. For more information, see `howitworks-datasets-groups`.
**AutoML**
If you want Amazon Forecast to evaluate each algorithm and choose the one
that minimizes the `objective function`, set `PerformAutoML` to `true`. The
`objective function` is defined as the mean of the weighted p10, p50, and
p90 quantile losses. For more information, see `EvaluationResult`.
When AutoML is enabled, the following properties are disallowed:
<ul> <li> `AlgorithmArn`
</li> <li> `HPOConfig`
</li> <li> `PerformHPO`
</li> <li> `TrainingParameters`
</li> </ul> To get a list of all of your predictors, use the
`ListPredictors` operation.
<note> Before you can use the predictor to create a forecast, the `Status`
of the predictor must be `ACTIVE`, signifying that training has completed.
To get the status, use the `DescribePredictor` operation.
</note>
"""
def create_predictor(client, input, options \\ []) do
request(client, "CreatePredictor", input, options)
end
@doc """
Deletes an Amazon Forecast dataset that was created using the
`CreateDataset` operation. You can only delete datasets that have a status
of `ACTIVE` or `CREATE_FAILED`. To get the status use the `DescribeDataset`
operation.
<note> Forecast does not automatically update any dataset groups that
contain the deleted dataset. In order to update the dataset group, use the
operation, omitting the deleted dataset's ARN.
</note>
"""
def delete_dataset(client, input, options \\ []) do
request(client, "DeleteDataset", input, options)
end
@doc """
Deletes a dataset group created using the `CreateDatasetGroup` operation.
You can only delete dataset groups that have a status of `ACTIVE`,
`CREATE_FAILED`, or `UPDATE_FAILED`. To get the status, use the
`DescribeDatasetGroup` operation.
This operation deletes only the dataset group, not the datasets in the
group.
"""
def delete_dataset_group(client, input, options \\ []) do
request(client, "DeleteDatasetGroup", input, options)
end
@doc """
Deletes a dataset import job created using the `CreateDatasetImportJob`
operation. You can delete only dataset import jobs that have a status of
`ACTIVE` or `CREATE_FAILED`. To get the status, use the
`DescribeDatasetImportJob` operation.
"""
def delete_dataset_import_job(client, input, options \\ []) do
request(client, "DeleteDatasetImportJob", input, options)
end
@doc """
Deletes a forecast created using the `CreateForecast` operation. You can
delete only forecasts that have a status of `ACTIVE` or `CREATE_FAILED`. To
get the status, use the `DescribeForecast` operation.
You can't delete a forecast while it is being exported. After a forecast is
deleted, you can no longer query the forecast.
"""
def delete_forecast(client, input, options \\ []) do
request(client, "DeleteForecast", input, options)
end
@doc """
Deletes a forecast export job created using the `CreateForecastExportJob`
operation. You can delete only export jobs that have a status of `ACTIVE`
or `CREATE_FAILED`. To get the status, use the `DescribeForecastExportJob`
operation.
"""
def delete_forecast_export_job(client, input, options \\ []) do
request(client, "DeleteForecastExportJob", input, options)
end
@doc """
Deletes a predictor created using the `CreatePredictor` operation. You can
delete only predictor that have a status of `ACTIVE` or `CREATE_FAILED`. To
get the status, use the `DescribePredictor` operation.
"""
def delete_predictor(client, input, options \\ []) do
request(client, "DeletePredictor", input, options)
end
@doc """
Describes an Amazon Forecast dataset created using the `CreateDataset`
operation.
In addition to listing the parameters specified in the `CreateDataset`
request, this operation includes the following dataset properties:
<ul> <li> `CreationTime`
</li> <li> `LastModificationTime`
</li> <li> `Status`
</li> </ul>
"""
def describe_dataset(client, input, options \\ []) do
request(client, "DescribeDataset", input, options)
end
@doc """
Describes a dataset group created using the `CreateDatasetGroup` operation.
In addition to listing the parameters provided in the `CreateDatasetGroup`
request, this operation includes the following properties:
<ul> <li> `DatasetArns` - The datasets belonging to the group.
</li> <li> `CreationTime`
</li> <li> `LastModificationTime`
</li> <li> `Status`
</li> </ul>
"""
def describe_dataset_group(client, input, options \\ []) do
request(client, "DescribeDatasetGroup", input, options)
end
@doc """
Describes a dataset import job created using the `CreateDatasetImportJob`
operation.
In addition to listing the parameters provided in the
`CreateDatasetImportJob` request, this operation includes the following
properties:
<ul> <li> `CreationTime`
</li> <li> `LastModificationTime`
</li> <li> `DataSize`
</li> <li> `FieldStatistics`
</li> <li> `Status`
</li> <li> `Message` - If an error occurred, information about the error.
</li> </ul>
"""
def describe_dataset_import_job(client, input, options \\ []) do
request(client, "DescribeDatasetImportJob", input, options)
end
@doc """
Describes a forecast created using the `CreateForecast` operation.
In addition to listing the properties provided in the `CreateForecast`
request, this operation lists the following properties:
<ul> <li> `DatasetGroupArn` - The dataset group that provided the training
data.
</li> <li> `CreationTime`
</li> <li> `LastModificationTime`
</li> <li> `Status`
</li> <li> `Message` - If an error occurred, information about the error.
</li> </ul>
"""
def describe_forecast(client, input, options \\ []) do
request(client, "DescribeForecast", input, options)
end
@doc """
Describes a forecast export job created using the `CreateForecastExportJob`
operation.
In addition to listing the properties provided by the user in the
`CreateForecastExportJob` request, this operation lists the following
properties:
<ul> <li> `CreationTime`
</li> <li> `LastModificationTime`
</li> <li> `Status`
</li> <li> `Message` - If an error occurred, information about the error.
</li> </ul>
"""
def describe_forecast_export_job(client, input, options \\ []) do
request(client, "DescribeForecastExportJob", input, options)
end
@doc """
Describes a predictor created using the `CreatePredictor` operation.
In addition to listing the properties provided in the `CreatePredictor`
request, this operation lists the following properties:
<ul> <li> `DatasetImportJobArns` - The dataset import jobs used to import
training data.
</li> <li> `AutoMLAlgorithmArns` - If AutoML is performed, the algorithms
that were evaluated.
</li> <li> `CreationTime`
</li> <li> `LastModificationTime`
</li> <li> `Status`
</li> <li> `Message` - If an error occurred, information about the error.
</li> </ul>
"""
def describe_predictor(client, input, options \\ []) do
request(client, "DescribePredictor", input, options)
end
@doc """
Provides metrics on the accuracy of the models that were trained by the
`CreatePredictor` operation. Use metrics to see how well the model
performed and to decide whether to use the predictor to generate a
forecast. For more information, see `metrics`.
This operation generates metrics for each backtest window that was
evaluated. The number of backtest windows (`NumberOfBacktestWindows`) is
specified using the `EvaluationParameters` object, which is optionally
included in the `CreatePredictor` request. If `NumberOfBacktestWindows`
isn't specified, the number defaults to one.
The parameters of the `filling` method determine which items contribute to
the metrics. If you want all items to contribute, specify `zero`. If you
want only those items that have complete data in the range being evaluated
to contribute, specify `nan`. For more information, see
`FeaturizationMethod`.
<note> Before you can get accuracy metrics, the `Status` of the predictor
must be `ACTIVE`, signifying that training has completed. To get the
status, use the `DescribePredictor` operation.
</note>
"""
def get_accuracy_metrics(client, input, options \\ []) do
request(client, "GetAccuracyMetrics", input, options)
end
@doc """
Returns a list of dataset groups created using the `CreateDatasetGroup`
operation. For each dataset group, this operation returns a summary of its
properties, including its Amazon Resource Name (ARN). You can retrieve the
complete set of properties by using the dataset group ARN with the
`DescribeDatasetGroup` operation.
"""
def list_dataset_groups(client, input, options \\ []) do
request(client, "ListDatasetGroups", input, options)
end
@doc """
Returns a list of dataset import jobs created using the
`CreateDatasetImportJob` operation. For each import job, this operation
returns a summary of its properties, including its Amazon Resource Name
(ARN). You can retrieve the complete set of properties by using the ARN
with the `DescribeDatasetImportJob` operation. You can filter the list by
providing an array of `Filter` objects.
"""
def list_dataset_import_jobs(client, input, options \\ []) do
request(client, "ListDatasetImportJobs", input, options)
end
@doc """
Returns a list of datasets created using the `CreateDataset` operation. For
each dataset, a summary of its properties, including its Amazon Resource
Name (ARN), is returned. To retrieve the complete set of properties, use
the ARN with the `DescribeDataset` operation.
"""
def list_datasets(client, input, options \\ []) do
request(client, "ListDatasets", input, options)
end
@doc """
Returns a list of forecast export jobs created using the
`CreateForecastExportJob` operation. For each forecast export job, this
operation returns a summary of its properties, including its Amazon
Resource Name (ARN). To retrieve the complete set of properties, use the
ARN with the `DescribeForecastExportJob` operation. You can filter the list
using an array of `Filter` objects.
"""
def list_forecast_export_jobs(client, input, options \\ []) do
request(client, "ListForecastExportJobs", input, options)
end
@doc """
Returns a list of forecasts created using the `CreateForecast` operation.
For each forecast, this operation returns a summary of its properties,
including its Amazon Resource Name (ARN). To retrieve the complete set of
properties, specify the ARN with the `DescribeForecast` operation. You can
filter the list using an array of `Filter` objects.
"""
def list_forecasts(client, input, options \\ []) do
request(client, "ListForecasts", input, options)
end
@doc """
Returns a list of predictors created using the `CreatePredictor` operation.
For each predictor, this operation returns a summary of its properties,
including its Amazon Resource Name (ARN). You can retrieve the complete set
of properties by using the ARN with the `DescribePredictor` operation. You
can filter the list using an array of `Filter` objects.
"""
def list_predictors(client, input, options \\ []) do
request(client, "ListPredictors", input, options)
end
@doc """
Lists the tags for an Amazon Forecast resource.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Associates the specified tags to a resource with the specified
`resourceArn`. If existing tags on a resource are not specified in the
request parameters, they are not changed. When a resource is deleted, the
tags associated with that resource are also deleted.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Deletes the specified tags from a resource.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Replaces the datasets in a dataset group with the specified datasets.
<note> The `Status` of the dataset group must be `ACTIVE` before you can
use the dataset group to create a predictor. Use the `DescribeDatasetGroup`
operation to get the status.
</note>
"""
def update_dataset_group(client, input, options \\ []) do
request(client, "UpdateDatasetGroup", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "forecast"}
host = build_host("forecast", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AmazonForecast.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/forecast.ex
| 0.945551 | 0.827271 |
forecast.ex
|
starcoder
|
defmodule AlertProcessor.ServiceInfo.CacheFile do
@moduledoc """
This module holds the logic for loading and saving cache files
in dev and test environments with the goal of decreasing startup
time so devs can work more quickly.
If expected API payloads change the cache files should be deleted/removed
to allowing caching of the changes.
"""
alias AlertProcessor.Helpers.EnvHelper
require Logger
@directory Path.join([File.cwd!(), "priv/service_info_cache"])
@dev_filename "dev_cache.terms"
@test_filename "test_cache.terms"
@doc """
Given a filename generates a filepath for saving cache info file.
"""
def generate_filepath(filename) when is_binary(filename) do
Path.join([@directory, filename])
end
@doc """
The application should use the file to load state for the ServiceInfoCache
if the the envs are :dev or :test.
iex> Mix.env
:test
iex> CacheFile.should_use_file?
true
"""
def should_use_file? do
EnvHelper.is_env?(:dev) || EnvHelper.is_env?(:test)
end
@doc """
The environment specific filepath (path and file name) or nil.
"""
def cache_filename() do
cond do
EnvHelper.is_env?(:dev) -> @dev_filename
EnvHelper.is_env?(:test) -> @test_filename
true -> nil
end
end
@doc """
Attempt to load a cache file.
This will attempt to load a cache file in dev and test. It will fail
if the Mix.env is not dev or test, it will fail if the file does not exist
and it will fail if the loaded term is not a map (minimal validation).
The validation for loading this file must be appended in later code. No
such validator currently exists.
"""
def load_service_info() do
Logger.info(fn -> "Loading service info cache from default path" end)
filename = cache_filename()
filepath = generate_filepath(filename)
load_service_info(filepath)
end
def load_service_info(filepath) when is_binary(filepath) do
Logger.info(fn -> "Loading service info cache from file #{filepath}" end)
with {:ok, binary_cache} <- File.read(filepath),
{:ok, state} when is_map(state) <- binary_to_term(binary_cache) do
Logger.info(fn -> "Loaded service info cache from file #{filepath}" end)
{:ok, state}
else
_ ->
Logger.info(fn -> "Failed to load service info cache from file #{filepath}" end)
{:error, :cache_not_loaded}
end
end
def load_service_info(_) do
Logger.info(fn -> "Failed to load service info cache from file" end)
{:error, :cache_not_loaded}
end
@doc """
Attempt to save a cache file.
"""
def save_service_info(state) do
filename = cache_filename()
if is_binary(filename) do
filepath = generate_filepath(filename)
Logger.info(fn -> "Saving service info cache to file #{filepath}" end)
save_service_info(state, filepath)
else
{:error, :cache_file_not_saved}
end
end
def save_service_info(state, filepath) when is_map(state) and is_binary(filepath) do
bin = :erlang.term_to_binary(state)
File.write(filepath, bin)
end
def save_service_info(_, _) do
{:error, :cache_file_not_saved}
end
defp binary_to_term(bin) do
try do
{:ok, :erlang.binary_to_term(bin)}
rescue
ArgumentError ->
{:error, :invalid_erlang_term_binary}
end
end
end
|
apps/alert_processor/lib/service_info/cache_file.ex
| 0.630116 | 0.428742 |
cache_file.ex
|
starcoder
|
defmodule ExSaga.Step do
@moduledoc """
"""
alias ExSaga.{DryRun, Event, Stepable}
@typedoc """
"""
@type breakpoint_fun :: (Event.t(), Stepable.t() -> boolean)
@typedoc """
"""
@type breakpoint :: breakpoint_fun | {:before | :after, breakpoint_fun}
@typedoc """
"""
@type opt ::
Stepable.opt()
| {:subscribers, [Process.dest()]}
| {:breakpoints, breakpoint_fun | [breakpoint_fun]}
@typedoc """
"""
@type opts :: [opt]
@typedoc """
"""
@type mstep_result :: {Stepable.stage_result() | Stepable.step_result(), [Event.t()]}
@doc """
"""
@spec step(Stepable.t(), Event.t(), Stepable.opts()) :: Stepable.stage_result() | Stepable.step_result()
def step(stepable, event, opts \\ []) do
Stepable.step(stepable, event, opts)
end
@doc """
"""
@spec step_from(Stepable.t(), Stepable.stage_result(), Stepable.opts()) :: Stepable.step_result()
def step_from(stepable, stage_result, opts \\ []) do
Stepable.step_from(stepable, stage_result, opts)
end
@doc """
"""
@spec mstep_from(Stepable.t() | module, Stepable.stage_result(), opts) :: mstep_result
def mstep_from(stepable, stage_result, opts \\ [])
def mstep_from(stepable, stage_result, opts) when is_atom(stepable) do
stepable.create()
|> mstep_from(stage_result, opts)
end
def mstep_from(stepable, stage_result, opts) do
result = step_from(stepable, stage_result, opts)
mstep(result, opts)
end
@doc """
"""
@spec mstep_at(Stepable.t(), Event.t(), opts) :: mstep_result
def mstep_at(stepable, event, opts \\ []) do
mstep({:continue, event, stepable}, opts)
end
@doc """
"""
@spec mstep(Stepable.stage_result() | Stepable.step_result(), opts, [Event.t()]) :: mstep_result
def mstep(result, opts, events \\ [])
def mstep({:continue, nil, _stepable} = result, _opts, events) do
{result, Enum.reverse(events)}
end
def mstep({:continue, event, stepable} = result, opts, events) do
if break?(stepable, event, opts) do
{result, Enum.reverse([event | events])}
else
step_result = step(stepable, event, opts)
_ =
Keyword.get(opts, :subscribers, [])
|> publish(step_result)
mstep(step_result, opts, [event | events])
end
end
def mstep(result, _opts, events) do
{result, Enum.reverse(events)}
end
@doc """
"""
@spec break?(Stepable.t(), Event.t(), opts) :: boolean
def break?(stepable, event, opts) do
timeout = Keyword.get(opts, :timeout, :infinity)
breakpoints = Keyword.get(opts, :breakpoints, [])
break?(breakpoints, stepable, event, timeout)
end
@doc """
"""
@spec break?([breakpoint_fun] | breakpoint_fun, Stepable.t(), Event.t(), timeout) :: boolean
def break?([], _stepable, _event, _timeout), do: false
def break?([bp | bps], stepable, event, timeout) do
case DryRun.execute(bp, [], timeout) do
true -> true
_ -> break?(bps, stepable, event, timeout)
end
end
def break?(breakpoint, stepable, event, timeout) do
break?([breakpoint], stepable, event, timeout)
end
@doc false
@spec publish([Process.dest()], Stepable.stage_result() | Stepable.step_result()) :: :ok
def publish([], _step_result), do: :ok
def publish([sub | subs], step_result) do
_ = send(sub, {:step, ExSaga, self(), step_result})
publish(subs, step_result)
end
end
|
lib/ex_saga/step.ex
| 0.773901 | 0.605916 |
step.ex
|
starcoder
|
defmodule RedisMutex.Lock do
import Exredis.Script
@moduledoc """
This module contains the actual Redis locking business logic. The `with_lock`
macro is generally the only function that should be used from this module, as it
will handle the logic for setting and removing key/values in Redis.
"""
defredis_script :unlock_script, """
if redis.call("get", KEYS[1]) == ARGV[1] then
return redis.call("del", KEYS[1])
else
return 0
end
"""
@default_timeout :timer.seconds(40)
@expiry :timer.seconds(20)
@doc """
This macro takes in a key and a timeout.
A key might be be an id or a resource identifier that will
lock a particular resource. A good example might be an email or phone
number for a user, and you might want lock a db insert so that
multiple users aren't created for one email or phone number.
The timeout is in milliseconds and defaults to 40000 milliseconds.
There is a key expiration of 20 seconds, so the timeout should always
be greater than 20 seconds. The 20 second expiry allows the key to expire
in case the logic inside the `with_lock` macro throws an error or fails
to complete within 20 seconds, thereby freeing up the key so the lock
can be obtained by another request or resource.
The lock will be released after the logic inside the `with_lock` has
completed, or the timeout, whichever comes first. The return value
of the macro will be the return value of the logic inside
the 'with_lock' macro.
```elixir
defmodule PossumLodge do
use RedisMutex
alias PossumLodge.{Repo, Member}
def add_member(params) do
with_lock(params.phone_number) do
%Member{}
|> Member.changeset(params)
|> Repo.insert_or_update!
end
end
end
```
"""
@spec with_lock(any, integer) :: any
defmacro with_lock(key, timeout \\ @default_timeout, do: clause) do
quote do
key = unquote(key)
timeout = unquote(timeout)
uuid = UUID.uuid1()
RedisMutex.Lock.take_lock(key, uuid, timeout)
block_value = unquote(clause)
RedisMutex.Lock.unlock(key, uuid)
block_value
end
end
@doc """
This function takes in a key, unique string, and a timeout in milliseconds.
It will call itself recursively until it is able to set a lock
or the timeout expires.
"""
def take_lock(key, uuid, timeout \\ @default_timeout, start \\ nil, finish \\ nil)
def take_lock(key, uuid, timeout, nil, nil) do
start = Timex.now
finish = Timex.shift(start, milliseconds: timeout)
take_lock(key, uuid, timeout, start, finish)
end
def take_lock(key, uuid, timeout, start, finish) do
if Timex.before?(finish, start) do
raise RedisMutex.Error, message: "Unable to obtain lock."
end
if !lock(key, uuid) do
take_lock(key, uuid, timeout, start, finish)
end
end
@doc """
This function takes in a key and a unique identifier to set it in Redis.
This is how a lock is identified in Redis. If a key/value pair is able to be
set in Redis, `lock` returns `true`. If it isn't able to set in Redis, `lock`
returns `false`.
"""
def lock(key, value) do
client = Process.whereis(:redis_mutex_connection)
case Exredis.query(client, ["SET", key, value, "NX", "PX", "#{@expiry}"]) do
"OK" -> true
:undefined -> false
end
end
@doc """
This function takes in the key/value pair that are to be released in Redis
"""
def unlock(key, value) do
client = Process.whereis(:redis_mutex_connection)
case unlock_script(client, [key], [value]) do
"1" -> true
"0" -> false
end
end
end
|
lib/redis_mutex/lock.ex
| 0.675978 | 0.750576 |
lock.ex
|
starcoder
|
defmodule Narou.Entity do
@moduledoc """
APIデータの基底モジュール。
"""
import Narou.Util
@doc """
APIデータの共通処理。
## param
- validate: list(symbol) : バリデーションを追加したいカラムリストを指定する。 [:st, :limit]
- hoge_attr: default_val : 追加したいプロパティと初期値を指定する。
## EXAMPLE
defmodule MyStruct
use Narou.Entity, hoge: "", limit: 1, validate: [:limit]
end
"""
defmacro __using__(attributes) do
quote do
import Narou.Entity
use Vex.Struct
{validate_info, attributes} = Keyword.split(unquote(attributes), [:validate, :validate_use_value])
[
type: nil,
out_type: :json,
maximum_fetch_mode: false
] ++ attributes |> defstruct
validates :type, inclusion: Narou.Entity.api_types
validates :out_type, inclusion: [:json]
add_validate_cols = Keyword.get(validate_info, :validate) |> List.wrap()
validations =
[
[:st, number: [greater_than_or_equal_to: 1, less_than_or_equal_to: 2000]],
[:limit, number: [greater_than_or_equal_to: 1, less_than_or_equal_to: 500]],
[:order, inclusion: validate_info[:validate_use_value][:order]],
[:select, by: &valid_select?/1]
]
Enum.each(validations, fn [key_name, opt] ->
if Enum.member?(add_validate_cols, key_name), do: validates(key_name, List.wrap(opt))
end)
def __drop_keys__, do: [:__struct__, :maximum_fetch_mode]
defoverridable __drop_keys__: 0
end
end
@spec init(keyword()) :: {:ok, map()} | {:error, binary()}
def init(opt) do
{[type: type], opt} = Keyword.split(opt, [:type])
if type in api_types() do
gen_struct(type, opt)
|> initialized()
else
{:error, "Unexpected type `#{type}`."}
end
end
def init_or_update(type_sym_or_querable, opt) do
if is_symbol?(type_sym_or_querable) do
init(opt ++ [type: type_sym_or_querable])
else
type_sym_or_querable
|> Map.merge(Map.new(opt))
|> initialized()
end
end
defp gen_struct(type, opt) do
to_submodule_name(type)
|> struct([type: type] ++ opt)
end
defp initialized(querable) do
querable
|> patch()
|> validate()
end
defp to_submodule_name(type), do: Module.concat(__MODULE__, type |> to_string |> Macro.camelize |> String.to_atom)
defp patch(struct), do: Enum.reduce([:maximum_fetch_mode], struct, &do_patch(&1, &2, Map.fetch!(struct, &1)))
defp do_patch(:maximum_fetch_mode, struct, mode) when is_boolean(mode) do
if(mode, do: %{struct | limit: 500}, else: struct) |> Map.merge(%{maximum_fetch_mode: mode})
end
defp do_patch(:maximum_fetch_mode, struct, _), do: struct
@doc """
対応しているAPIタイプのリスト
"""
@spec api_types() :: list(atom)
def api_types(), do: [:novel, :rank, :rankin, :user]
@spec validate(struct()) :: struct() | {:error, [any()]}
def validate(s), do: if valid?(s), do: s, else: {:error, errors(s)}
defp valid?(s) when is_struct(s), do: s.__struct__.valid?(s)
defp errors(s), do: Vex.errors(s)
def valid_select?(cols), do: Enum.all?(cols, &Narou.Util.is_symbol?/1)
def to_map_for_build_query(entity), do: Map.drop(entity, to_submodule_name(entity.type).__drop_keys__)
end
|
lib/narou/entity.ex
| 0.646572 | 0.413744 |
entity.ex
|
starcoder
|
defmodule Ex2ms do
@moduledoc """
This module provides the `Ex2ms.fun/1` macro for translating Elixir functions
to match specifications.
"""
@bool_functions [
:is_atom,
:is_float,
:is_integer,
:is_list,
:is_number,
:is_pid,
:is_port,
:is_reference,
:is_tuple,
:is_binary,
:is_function,
:is_record,
:and,
:or,
:not,
:xor
]
@extra_guard_functions [
:abs,
:element,
:hd,
:count,
:node,
:round,
:size,
:tl,
:trunc,
:+,
:-,
:*,
:/,
:div,
:rem,
:band,
:bor,
:bxor,
:bnot,
:bsl,
:bsr,
:>,
:>=,
:<,
:<=,
:===,
:==,
:!==,
:!=,
:self
]
@guard_functions @bool_functions ++ @extra_guard_functions
@action_functions [
:set_seq_token,
:get_seq_token,
:message,
:return_trace,
:exception_trace,
:process_dump,
:enable_trace,
:disable_trace,
:trace,
:display,
:caller,
:set_tcw,
:silent
]
@elixir_erlang [===: :"=:=", !==: :"=/=", !=: :"/=", <=: :"=<", and: :andalso, or: :orelse]
Enum.each(@guard_functions, fn atom ->
defp is_guard_function(unquote(atom)), do: true
end)
defp is_guard_function(_), do: false
Enum.each(@action_functions, fn atom ->
defp is_action_function(unquote(atom)), do: true
end)
defp is_action_function(_), do: false
Enum.each(@elixir_erlang, fn {elixir, erlang} ->
defp map_elixir_erlang(unquote(elixir)), do: unquote(erlang)
end)
defp map_elixir_erlang(atom), do: atom
@doc """
Translates an anonymous function to a match specification.
## Examples
iex> Ex2ms.fun do {x, y} -> x == 2 end
[{{:"$1", :"$2"}, [], [{:==, :"$1", 2}]}]
"""
defmacro fun(do: clauses) do
clauses
|> Enum.map(fn {:->, _, clause} -> translate_clause(clause, __CALLER__) end)
|> Macro.escape(unquote: true)
end
defmacrop is_literal(term) do
quote do
is_atom(unquote(term)) or is_number(unquote(term)) or is_binary(unquote(term))
end
end
defp translate_clause([head, body], caller) do
{head, conds, state} = translate_head(head, caller)
case head do
%{} ->
raise_parameter_error(head)
_ ->
body = translate_body(body, state)
{head, conds, body}
end
end
defp translate_body({:__block__, _, exprs}, state) when is_list(exprs) do
Enum.map(exprs, &translate_cond(&1, state))
end
defp translate_body(expr, state) do
[translate_cond(expr, state)]
end
defp translate_cond({name, _, context}, state) when is_atom(name) and is_atom(context) do
if match_var = state.vars[{name, context}] do
:"#{match_var}"
else
raise ArgumentError,
message:
"variable `#{name}` is unbound in matchspec (use `^` for outer variables and expressions)"
end
end
defp translate_cond({left, right}, state), do: translate_cond({:{}, [], [left, right]}, state)
defp translate_cond({:{}, _, list}, state) when is_list(list) do
{list |> Enum.map(&translate_cond(&1, state)) |> List.to_tuple()}
end
defp translate_cond({:^, _, [var]}, _state) do
{:const, {:unquote, [], [var]}}
end
defp translate_cond(fun_call = {fun, _, args}, state) when is_atom(fun) and is_list(args) do
cond do
is_guard_function(fun) ->
match_args = Enum.map(args, &translate_cond(&1, state))
match_fun = map_elixir_erlang(fun)
[match_fun | match_args] |> List.to_tuple()
expansion = is_expandable(fun_call, state.caller) ->
translate_cond(expansion, state)
is_action_function(fun) ->
match_args = Enum.map(args, &translate_cond(&1, state))
[fun | match_args] |> List.to_tuple()
true ->
raise_expression_error(fun_call)
end
end
defp translate_cond(list, state) when is_list(list) do
Enum.map(list, &translate_cond(&1, state))
end
defp translate_cond(literal, _state) when is_literal(literal) do
literal
end
defp translate_cond(expr, _state), do: raise_expression_error(expr)
defp translate_head([{:when, _, [param, cond]}], caller) do
{head, state} = translate_param(param, caller)
cond = translate_cond(cond, state)
{head, [cond], state}
end
defp translate_head([param], caller) do
{head, state} = translate_param(param, caller)
{head, [], state}
end
defp translate_head(expr, _caller), do: raise_parameter_error(expr)
defp translate_param(param, caller) do
param = Macro.expand(param, %{caller | context: :match})
{param, state} =
case param do
{:=, _, [{name, _, context}, param]} when is_atom(name) and is_atom(context) ->
state = %{vars: %{{name, context} => "$_"}, count: 0, caller: caller}
{Macro.expand(param, %{caller | context: :match}), state}
{:=, _, [param, {name, _, context}]} when is_atom(name) and is_atom(context) ->
state = %{vars: %{{name, context} => "$_"}, count: 0, caller: caller}
{Macro.expand(param, %{caller | context: :match}), state}
{name, _, context} when is_atom(name) and is_atom(context) ->
{param, %{vars: %{}, count: 0, caller: caller}}
{:{}, _, list} when is_list(list) ->
{param, %{vars: %{}, count: 0, caller: caller}}
{:%{}, _, list} when is_list(list) ->
{param, %{vars: %{}, count: 0, caller: caller}}
{_, _} ->
{param, %{vars: %{}, count: 0, caller: caller}}
_ ->
raise_parameter_error(param)
end
do_translate_param(param, state)
end
defp do_translate_param({:_, _, context}, state) when is_atom(context) do
{:_, state}
end
defp do_translate_param({name, _, context}, state) when is_atom(name) and is_atom(context) do
if match_var = state.vars[{name, context}] do
{:"#{match_var}", state}
else
match_var = "$#{state.count + 1}"
state = %{
state
| vars: Map.put(state.vars, {name, context}, match_var),
count: state.count + 1
}
{:"#{match_var}", state}
end
end
defp do_translate_param({left, right}, state) do
do_translate_param({:{}, [], [left, right]}, state)
end
defp do_translate_param({:{}, _, list}, state) when is_list(list) do
{list, state} = Enum.map_reduce(list, state, &do_translate_param(&1, &2))
{List.to_tuple(list), state}
end
defp do_translate_param({:^, _, [expr]}, state) do
{{:unquote, [], [expr]}, state}
end
defp do_translate_param(list, state) when is_list(list) do
Enum.map_reduce(list, state, &do_translate_param(&1, &2))
end
defp do_translate_param(literal, state) when is_literal(literal) do
{literal, state}
end
defp do_translate_param({:%{}, _, list}, state) do
Enum.reduce(list, {%{}, state}, fn {key, value}, {map, state} ->
{key, key_state} = do_translate_param(key, state)
{value, value_state} = do_translate_param(value, key_state)
{Map.put(map, key, value), value_state}
end)
end
defp do_translate_param(expr, _state), do: raise_parameter_error(expr)
defp is_expandable(ast, env) do
expansion = Macro.expand_once(ast, env)
if ast !== expansion, do: expansion, else: false
end
defp raise_expression_error(expr) do
message = "illegal expression in matchspec:\n#{Macro.to_string(expr)}"
raise ArgumentError, message: message
end
defp raise_parameter_error(expr) do
message =
"illegal parameter to matchspec (has to be a single variable or tuple):\n" <>
Macro.to_string(expr)
raise ArgumentError, message: message
end
end
|
lib/ex2ms.ex
| 0.659953 | 0.631395 |
ex2ms.ex
|
starcoder
|
defmodule Twinex do
@moduledoc """
"""
@doc """
Two strings are twins iff their odd **and** even substrings are anagrams
That is obviously true for empty strings:
iex(1)> twins?("", "")
true
However it is obviously false if the odds are not anagrams (we consider the odds
being the second splice, starting counting @ 0) ...
iex(2)> twins?("ab", "ac")
false
... and the same holds for evens, of course
iex(3)> twins?("ab", "cb")
false
Another _obvious_ absence of twinhood is if the strings are not of the same length
iex(4)> twins?("aaa", "aa")
false
Now a string composed from anagrams to demonstrate some twinhood
iex(5)> _evens = "banana"
...(5)> _odds = "ALPHA"
...(5)> _even_ana = "abaann"
...(5)> _odd_ana = "PHALA"
...(5)> twins?("bAaLnPaHnAa", "aPbHaAaLnAn")
true
"""
def twins?(lhs, rhs) do
if String.length(lhs) == String.length(rhs) do
_counts(lhs) == _counts(rhs)
else
false
end
end
@doc """
twin_pairs compares two lists of strings by means of the `twins?` predicate
iex(6)> list1 = ~w[alpha beta gamma omega]
...(6)> list2 = ~w[plaha tbea ammag phi]
...(6)> twin_pairs(list1, list2)
~w[Yo Na Yo Na]
And for those, not familiar with Austrian dialect
iex(7)> list1 = ~w[yes nnoo]
...(7)> list2 = ~w[sey xxxx yyy]
...(7)> twin_pairs(list1, list2, yes: 1, no: 0)
[1, 0]
If the first argument is longer than the second, the
expected will happen, that is if you expect the right
thing, of course:
iex(8)> list1 = ~w[yes]
...(8)> twin_pairs(list1, [])
[]
"""
def twin_pairs(lhs, rhs, options \\ []) do
yes_str = Keyword.get(options, :yes, "Yo")
no_str = Keyword.get(options, :no, "Na")
Enum.zip(lhs, rhs)
|> Enum.map(fn {le, re} -> if twins?(le, re), do: yes_str, else: no_str end)
end
defp _counts(string) do
string
|> String.graphemes
|> Enum.chunk_every(2, 2, [" "])
|> Enum.reduce({%{}, %{}}, &_update_counters/2)
end
defp _update_counter(counter, graph) do
Map.put(counter, graph, Map.get(counter, graph, 0) + 1)
end
defp _update_counters( [even_graph, odd_graph], {even_counter, odd_counter} ) do
{
_update_counter(even_counter, even_graph),
_update_counter(odd_counter, odd_graph)}
end
end
|
lib/twinex.ex
| 0.670932 | 0.408867 |
twinex.ex
|
starcoder
|
defmodule TextDelta.Transformation do
@moduledoc """
The transformation of two concurrent deltas such that they satisfy the
convergence properties of Operational Transformation.
Transformation allows optimistic conflict resolution in concurrent editing.
Given a delta A that occurred at the same time as delta B against the same
text state, we can transform the operations of delta A such that the state
of the text after applying delta A and then delta B is the same as after
applying delta B and then the transformation of delta A against delta B:
S ○ Oa ○ transform(Ob, Oa) = S ○ Ob ○ transform(Oa, Ob)
There is a great article written on [Operational Transformation][ot1] that
author of this library used. It is called [Understanding and Applying
Operational Transformation][ot2].
[tp1]: https://en.wikipedia.org/wiki/Operational_transformation#Convergence_properties
[ot1]: https://en.wikipedia.org/wiki/Operational_transformation
[ot2]: http://www.codecommit.com/blog/java/understanding-and-applying-operational-transformation
"""
alias TextDelta.{Operation, Attributes, Iterator}
@typedoc """
Atom representing transformation priority. Which delta came first?
"""
@type priority :: :left | :right
@doc """
Transforms `right` delta against the `left` one.
The function also takes a third `t:TextDelta.Transformation.priority/0`
argument that indicates which delta came first. This is important when
doing conflict resolution.
"""
@spec transform(TextDelta.t(), TextDelta.t(), priority) :: TextDelta.t()
def transform(left, right, priority) do
{TextDelta.operations(left), TextDelta.operations(right)}
|> iterate()
|> do_transform(priority, TextDelta.new())
|> TextDelta.trim()
end
defp do_transform({{_, _}, {nil, _}}, _, result) do
result
end
defp do_transform({{nil, _}, {op_b, remainder_b}}, _, result) do
List.foldl([op_b | remainder_b], result, &TextDelta.append(&2, &1))
end
defp do_transform(
{{%{insert: _} = ins_a, remainder_a},
{%{insert: _} = ins_b, remainder_b}},
:left,
result
) do
retain = make_retain(ins_a)
{remainder_a, [ins_b | remainder_b]}
|> iterate()
|> do_transform(:left, TextDelta.append(result, retain))
end
defp do_transform(
{{%{insert: _} = ins_a, remainder_a},
{%{insert: _} = ins_b, remainder_b}},
:right,
result
) do
{[ins_a | remainder_a], remainder_b}
|> iterate()
|> do_transform(:right, TextDelta.append(result, ins_b))
end
defp do_transform(
{{%{insert: _} = ins, remainder_a}, {%{retain: _} = ret, remainder_b}},
priority,
result
) do
retain = make_retain(ins)
{remainder_a, [ret | remainder_b]}
|> iterate()
|> do_transform(priority, TextDelta.append(result, retain))
end
defp do_transform(
{{%{insert: _} = ins, remainder_a}, {%{delete: _} = del, remainder_b}},
priority,
result
) do
retain = make_retain(ins)
{remainder_a, [del | remainder_b]}
|> iterate()
|> do_transform(priority, TextDelta.append(result, retain))
end
defp do_transform(
{{%{delete: _} = del, remainder_a}, {%{insert: _} = ins, remainder_b}},
priority,
result
) do
{[del | remainder_a], remainder_b}
|> iterate()
|> do_transform(priority, TextDelta.append(result, ins))
end
defp do_transform(
{{%{delete: _}, remainder_a}, {%{retain: _}, remainder_b}},
priority,
result
) do
{remainder_a, remainder_b}
|> iterate()
|> do_transform(priority, result)
end
defp do_transform(
{{%{delete: _}, remainder_a}, {%{delete: _}, remainder_b}},
priority,
result
) do
{remainder_a, remainder_b}
|> iterate()
|> do_transform(priority, result)
end
defp do_transform(
{{%{retain: _} = ret, remainder_a}, {%{insert: _} = ins, remainder_b}},
priority,
result
) do
{[ret | remainder_a], remainder_b}
|> iterate()
|> do_transform(priority, TextDelta.append(result, ins))
end
defp do_transform(
{{%{retain: _} = ret_a, remainder_a},
{%{retain: _} = ret_b, remainder_b}},
priority,
result
) do
retain = make_retain(ret_a, transform_attributes(ret_a, ret_b, priority))
{remainder_a, remainder_b}
|> iterate()
|> do_transform(priority, TextDelta.append(result, retain))
end
defp do_transform(
{{%{retain: _}, remainder_a}, {%{delete: _} = del, remainder_b}},
priority,
result
) do
{remainder_a, remainder_b}
|> iterate()
|> do_transform(priority, TextDelta.append(result, del))
end
defp iterate(stream), do: Iterator.next(stream, :insert)
defp make_retain(op, attrs \\ %{}) do
op
|> Operation.length()
|> Operation.retain(attrs)
end
defp transform_attributes(op_a, op_b, priority) do
attrs_a = Map.get(op_a, :attributes)
attrs_b = Map.get(op_b, :attributes)
Attributes.transform(attrs_a, attrs_b, priority)
end
end
|
lib/text_delta/transformation.ex
| 0.869659 | 0.818519 |
transformation.ex
|
starcoder
|
defmodule EctoDot do
@moduledoc """
EctoDot provides an easy way to generate .dot, .png, .svg and .pdf diagrams directly from your ecto schema modules.
"""
alias EctoDot.Schema
alias EctoDot.Association
alias EctoDot.Diagram
@doc """
Creates the .dot representation including attributes and associations between the received schema module/s.
Note: only associations between received modules will be shown.
"""
def diagram(mods, opts \\ [])
def diagram(mod, opts) when is_atom(mod) do
diagram([mod], opts)
end
def diagram(mods, opts) when is_list(mods) do
name = Keyword.get(opts, :name, "Diagram")
schemas =
mods
|> Enum.map(&Schema.from_ecto/1)
associations =
mods
|> Enum.flat_map(&Association.from_ecto/1)
|> Enum.filter(fn assoc ->
Enum.member?(mods, assoc.to)
end)
%Diagram{name: name, schemas: schemas, associations: associations}
end
@doc """
Creates the .dot representation including attributes and associations accesible from the received schema module/s.
"""
def expanded_diagram(mods, opts \\ [])
def expanded_diagram(mod, opts) when is_atom(mod) do
expanded_diagram([mod], opts)
end
def expanded_diagram(mods, opts) when is_list(mods) do
reachable_modules =
expand_modules(MapSet.new(), MapSet.new(mods))
|> MapSet.to_list()
diagram(reachable_modules, opts)
end
defp expand_modules(reachable, to_expand) do
if Enum.empty?(to_expand) do
reachable
else
will_expand = Enum.at(to_expand, 0)
new_reachable = MapSet.put(reachable, will_expand)
rest_to_expand = MapSet.delete(to_expand, will_expand)
new_to_expand =
Association.from_ecto(will_expand)
|> Enum.map(fn assoc -> assoc.to end)
|> Enum.into(MapSet.new())
|> MapSet.union(rest_to_expand)
|> MapSet.difference(new_reachable)
expand_modules(new_reachable, new_to_expand)
end
end
@doc """
Exports a diagram. Params:
* A diagram returned from `diagram/2` or `expanded_diagram/2`
* The path where the file/s will be generated, including the filename (no extension)
* A list with the formats you want to be exported: [:dot, :png, :svg, :pdf]
"""
def export(%Diagram{} = diag, path, formats \\ [:dot]) do
dot = Diagram.to_dot(diag)
output_file = fn format -> Path.rootname(path) <> ".#{format}" end
dot_file = output_file.(:dot)
# need to generate anyway
File.write(dot_file, dot)
try do
formats
|> Enum.each(fn
format when format in [:png, :svg, :pdf] ->
file = output_file.(format)
IO.puts("Generating #{file}")
System.cmd("dot", ["-T#{format}", dot_file, "-o#{file}"])
:dot ->
IO.puts("Generating #{dot_file}")
end)
rescue
_ ->
IO.puts(
"It seems dot is not installed or in PATH. Please install it if you want to generate png, svg or pdf files with it."
)
after
if :dot not in formats do
File.rm(dot_file)
end
end
:ok
end
end
|
lib/ecto_dot.ex
| 0.672547 | 0.408837 |
ecto_dot.ex
|
starcoder
|
defmodule EctoCommons.EmailValidator do
@moduledoc ~S"""
Validates emails.
## Options
There are various `:checks` depending on the strictness of the validation you require. Indeed, perfect email validation
does not exist (see StackOverflow questions about it):
- `:html_input`: Checks if the email follows the regular expression used by browsers for
their `type="email"` input fields. This is the default as it corresponds to most use-cases. It is quite strict
without being too narrow. It does not support unicode emails though. If you need better internationalization,
please use the `:pow` check as it is more flexible with international emails. Defaults to enabled.
- `:burner`: Checks if the email given is a burner email provider (uses the `Burnex` lib under the hood).
When enabled, will reject temporary email providers. Defaults to disabled.
- `:check_mx_record`: Checks if the email domain exists in the DNS system (can be a bit slow).
- `:pow`: Checks the email using the [`pow`](https://hex.pm/packages/pow) logic. Defaults to disabled.
The rules are the following:
- Split into local-part and domain at last `@` occurrence
- Local-part should;
- be at most 64 octets
- separate quoted and unquoted content with a single dot
- only have letters, digits, and the following characters outside quoted
content:
```text
!#$%&'*+-/=?^_`{|}~.
```
- not have any consecutive dots outside quoted content
- Domain should;
- be at most 255 octets
- only have letters, digits, hyphen, and dots
Unicode characters are permitted in both local-part and domain.
The implementation is based on [RFC 3696](https://tools.ietf.org/html/rfc3696#section-3).
IP addresses are not allowed as per the RFC 3696 specification: "The domain name can also be
replaced by an IP address in square brackets, but that form is strongly discouraged except
for testing and troubleshooting purposes.".
You're invited to compare the tests to see the difference between the `:html_input`
check and the `:pow` check. `:pow` is better suited for i18n and is more correct
in regards to the email specification but will allow valid emails many systems don't
manage correctly. `:html_input` is more basic but should be OK for most common use-cases.
## Example:
iex> types = %{email: :string}
iex> params = %{email: "<EMAIL>"}
iex> Ecto.Changeset.cast({%{}, types}, params, Map.keys(types))
...> |> validate_email(:email)
#Ecto.Changeset<action: nil, changes: %{email: "<EMAIL>"}, errors: [], data: %{}, valid?: true>
iex> types = %{email: :string}
iex> params = %{email: "@invalid_email"}
iex> Ecto.Changeset.cast({%{}, types}, params, Map.keys(types))
...> |> validate_email(:email)
#Ecto.Changeset<action: nil, changes: %{email: "@invalid_email"}, errors: [email: {"is not a valid email", [validation: :email]}], data: %{}, valid?: false>
iex> types = %{email: :string}
iex> params = %{email: "<EMAIL>"}
iex> Ecto.Changeset.cast({%{}, types}, params, Map.keys(types))
...> |> validate_email(:email, checks: [:html_input, :burner])
#Ecto.Changeset<action: nil, changes: %{email: "<EMAIL>"}, errors: [email: {"uses a forbidden provider", [validation: :email]}], data: %{}, valid?: false>
iex> types = %{email: :string}
iex> params = %{email: "<EMAIL>"}
iex> Ecto.Changeset.cast({%{}, types}, params, Map.keys(types))
...> |> validate_email(:email, checks: [:html_input, :pow])
#Ecto.Changeset<action: nil, changes: %{email: "<EMAIL>"}, errors: [], data: %{}, valid?: true>
"""
import Ecto.Changeset
# We use the regular expression of the html `email` field specification.
# See https://html.spec.whatwg.org/multipage/input.html#e-mail-state-(type=email)
# and https://stackoverflow.com/a/15659649/1656568
# credo:disable-for-next-line Credo.Check.Readability.MaxLineLength
@email_regex ~r/^[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$/
def validate_email(%Ecto.Changeset{} = changeset, field, opts \\ []) do
validate_change(changeset, field, {:email, opts}, fn _, value ->
checks = Keyword.get(opts, :checks, [:html_input])
# credo:disable-for-lines:6 Credo.Check.Refactor.Nesting
Enum.reduce(checks, [], fn check, errors ->
case do_validate_email(value, check) do
:ok -> errors
{:error, msg} -> [{field, {message(opts, msg), [validation: :email]}} | errors]
end
end)
|> List.flatten()
end)
end
@spec do_validate_email(String.t(), atom()) :: :ok | {:error, String.t()}
defp do_validate_email(email, :burner) do
if Burnex.is_burner?(email) do
{:error, "uses a forbidden provider"}
else
:ok
end
end
defp do_validate_email(email, :html_input) do
if String.match?(email, @email_regex),
do: :ok,
else: {:error, "is not a valid email"}
end
defp do_validate_email(email, :pow) do
case pow_validate_email(email) do
:ok -> :ok
{:error, _msg} -> {:error, "is not a valid email"}
end
end
defp do_validate_email(email, :check_mx_record) do
case email
|> String.split("@")
|> Enum.reverse() do
[domain | _rest] ->
case Burnex.check_domain_mx_record(domain) do
:ok -> :ok
{:error, _msg} -> {:error, "is not a valid email domain"}
end
_else ->
{:error, "is not a valid email domain"}
end
end
# The code below is copied and adapted from the [pow](https://hex.pm/packages/pow) package
# We just don't want to import the whole `pow` package as a dependency.
defp pow_validate_email(email) do
[domain | local_parts] =
email
|> String.split("@")
|> Enum.reverse()
local_part =
local_parts
|> Enum.reverse()
|> Enum.join("@")
cond do
String.length(local_part) > 64 -> {:error, "local-part too long"}
String.length(domain) > 255 -> {:error, "domain too long"}
local_part == "" -> {:error, "invalid format"}
local_part_only_quoted?(local_part) -> validate_domain(domain)
true -> pow_validate_email(local_part, domain)
end
end
defp pow_validate_email(local_part, domain) do
sanitized_local_part =
local_part
|> remove_comments()
|> remove_quotes_from_local_part()
cond do
local_part_consective_dots?(sanitized_local_part) ->
{:error, "consective dots in local-part"}
local_part_valid_characters?(sanitized_local_part) ->
validate_domain(domain)
true ->
{:error, "invalid characters in local-part"}
end
end
defp local_part_only_quoted?(local_part),
do: local_part =~ ~r/^"[^\"]+"$/
defp remove_quotes_from_local_part(local_part),
do: Regex.replace(~r/(^\".*\"$)|(^\".*\"\.)|(\.\".*\"$)?/, local_part, "")
defp remove_comments(any),
do: Regex.replace(~r/(^\(.*\))|(\(.*\)$)?/, any, "")
defp local_part_consective_dots?(local_part),
do: local_part =~ ~r/\.\./
defp local_part_valid_characters?(sanitized_local_part),
do: sanitized_local_part =~ ~r<^[\p{L}\p{M}0-9!#$%&'*+-/=?^_`{|}~\.]+$>u
defp validate_domain(domain) do
sanitized_domain = remove_comments(domain)
labels =
sanitized_domain
|> remove_comments()
|> String.split(".")
labels
|> validate_tld()
|> validate_dns_labels()
end
defp validate_tld(labels) do
labels
|> List.last()
|> Kernel.=~(~r/^[0-9]+$/)
|> case do
true -> {:error, "tld cannot be all-numeric"}
false -> {:ok, labels}
end
end
defp validate_dns_labels({:ok, labels}) do
Enum.reduce_while(labels, :ok, fn
label, :ok -> {:cont, validate_dns_label(label)}
_label, error -> {:halt, error}
end)
end
defp validate_dns_labels({:error, error}), do: {:error, error}
defp validate_dns_label(label) do
cond do
label == "" -> {:error, "dns label is too short"}
String.length(label) > 63 -> {:error, "dns label too long"}
String.first(label) == "-" -> {:error, "dns label begins with hyphen"}
String.last(label) == "-" -> {:error, "dns label ends with hyphen"}
dns_label_valid_characters?(label) -> :ok
true -> {:error, "invalid characters in dns label"}
end
end
defp dns_label_valid_characters?(label),
do: label =~ ~r/^[\p{L}\p{M}0-9-]+$/u
defp message(opts, key \\ :message, default) do
Keyword.get(opts, key, default)
end
end
|
lib/validators/email.ex
| 0.864896 | 0.714553 |
email.ex
|
starcoder
|
defprotocol Ecto.DataType do
@moduledoc """
Casts and dumps a given struct into an Ecto type.
While `Ecto.Type` allows developers to cast/load/dump
any value from the storage into the struct based on the
schema, `Ecto.DataType` allows developers to convert
existing data types into existing Ecto types without
the schema information.
For example, `Ecto.Date` is a custom type, represented
by the `%Ecto.Date{}` struct that can be used in place
of Ecto's primitive `:date` type. Therefore, we need to
tell Ecto how to convert `%Ecto.Date{}` into `:date`,
even in the absence of schema information, and such is
done with the `Ecto.DataType` protocol:
defimpl Ecto.DataType, for: Ecto.Date do
# Dumps to the default representation. In this case, :date.
def dump(value) do
cast(value, :date)
end
# Implement any other desired casting rule.
def cast(%Ecto.Date{day: day, month: month, year: year}, :date) do
{:ok, {year, month, day}}
end
def cast(_, _) do
:error
end
end
"""
@fallback_to_any true
@doc """
Invoked when the data structure has not been cast along the
way and must fallback to its database representation.
"""
@spec dump(term) :: {:ok, term} | :error
def dump(value)
@doc """
Invoked when attempting to cast this data structure to another type.
"""
# TODO: Deprecate this casting function when we migrate to Elixir v1.3.
@spec cast(term, Ecto.Type.t) :: {:ok, term} | :error
def cast(value, type)
end
defimpl Ecto.DataType, for: Any do
# We don't provide any automatic casting rule.
def cast(_value, _type) do
:error
end
# The default representation is itself, which
# means we are delegating to the database. If
# the database does not support, it will raise.
def dump(value) do
{:ok, value}
end
end
defimpl Ecto.DataType, for: List do
def dump(list), do: dump(list, [])
def cast(_, _), do: :error
defp dump([h|t], acc) do
case Ecto.DataType.dump(h) do
{:ok, h} -> dump(t, [h|acc])
:error -> :error
end
end
defp dump([], acc) do
{:ok, Enum.reverse(acc)}
end
end
defimpl Ecto.DataType, for: Ecto.DateTime do
def dump(value), do: cast(value, :datetime)
def cast(%Ecto.DateTime{year: year, month: month, day: day,
hour: hour, min: min, sec: sec, usec: usec}, :datetime) do
{:ok, {{year, month, day}, {hour, min, sec, usec}}}
end
def cast(_, _) do
:error
end
end
defimpl Ecto.DataType, for: Ecto.Date do
def dump(value), do: cast(value, :date)
def cast(%Ecto.Date{year: year, month: month, day: day}, :date) do
{:ok, {year, month, day}}
end
def cast(_, _) do
:error
end
end
defimpl Ecto.DataType, for: Ecto.Time do
def dump(value), do: cast(value, :time)
def cast(%Ecto.Time{hour: hour, min: min, sec: sec, usec: usec}, :time) do
{:ok, {hour, min, sec, usec}}
end
def cast(_, _) do
:error
end
end
|
deps/ecto/lib/ecto/data_type.ex
| 0.773259 | 0.640256 |
data_type.ex
|
starcoder
|
defmodule Ecto.Query.Planner do
# Normalizes a query and its parameters.
@moduledoc false
alias Ecto.Query.QueryExpr
alias Ecto.Query.JoinExpr
alias Ecto.Query.Util
alias Ecto.Query.Types
alias Ecto.Associations.Assoc
@doc """
Plans a model for query execution.
It is mostly a matter of casting the
field values.
"""
def model(kind, model, kw, dumper \\ &Types.dump/2) do
for {field, value} <- kw do
type = model.__schema__(:field_type, field)
unless type do
raise Ecto.InvalidModelError,
message: "field `#{inspect model}.#{field}` in `#{kind}` does not exist in the model source"
end
case dumper.(type, value) do
{:ok, value} ->
{field, value}
:error ->
raise Ecto.InvalidModelError,
message: "value `#{inspect value}` for `#{inspect model}.#{field}` " <>
"in `#{kind}` does not match type #{inspect type}"
end
end
end
@doc """
Plans the query for execution.
Planning happens in multiple steps:
1. First the query is prepared by retrieving
its cache key, casting and merging parameters
2. Then a cache lookup is done, if the query is
cached, we are done
3. If there is no cache, we need to actually
normalize and validate the query, before sending
it to the adapter
4. The query is sent to the adapter to be generated
Currently only steps 1 and 3 are implemented.
"""
def query(query, base, opts \\ []) do
{query, params} = prepare(query, base)
{normalize(query, base, opts), params}
end
@doc """
Prepares the query for cache.
This means all the parameters from query expressions are
merged into a single value and their entries are prunned
from the query.
In the future, this function should also calculate a hash
to be used as cache key.
This function is called by the backend before invoking
any cache mechanism.
"""
def prepare(query, params) do
query
|> prepare_sources
|> traverse_exprs(params, &merge_params/4)
end
defp merge_params(kind, query, expr, params) when kind in ~w(select limit offset)a do
if expr do
{put_in(expr.params, nil),
cast_and_merge_params(kind, query, expr, params)}
else
{expr, params}
end
end
defp merge_params(kind, query, exprs, params) when kind in ~w(distinct where group_by having order_by)a do
Enum.map_reduce exprs, params, fn expr, acc ->
{put_in(expr.params, nil),
cast_and_merge_params(kind, query, expr, acc)}
end
end
defp merge_params(:join, query, exprs, params) do
Enum.map_reduce exprs, params, fn join, acc ->
{put_in(join.on.params, nil),
cast_and_merge_params(:join, query, join.on, acc)}
end
end
defp cast_and_merge_params(kind, query, expr, params) do
size = Map.size(params)
Enum.reduce expr.params, params, fn {k, {v, type}}, acc ->
Map.put acc, k + size, cast_param(kind, query, expr, v, type)
end
end
defp cast_param(kind, query, expr, v, {composite, {idx, field}}) when is_integer(idx) do
{_, model} = elem(query.sources, idx)
type = type!(kind, query, expr, model, field)
cast_param(kind, query, expr, v, {composite, type})
end
defp cast_param(kind, query, expr, v, {idx, field}) when is_integer(idx) do
{_, model} = elem(query.sources, idx)
type = type!(kind, query, expr, model, field)
cast_param(kind, query, expr, v, type)
end
defp cast_param(kind, query, expr, v, type) do
case Types.cast(type, v) do
{:ok, nil} ->
cast! query, expr, "value `nil` in `#{kind}` cannot be cast to type #{inspect type} " <>
"(if you want to check for nils, use is_nil/1 instead)"
{:ok, v} ->
v
:error ->
cast! query, expr, "value `#{inspect v}` in `#{kind}` cannot be cast to type #{inspect type}"
end
end
# Normalize all sources and adds a source
# field to the query for fast access.
defp prepare_sources(query) do
from = query.from || error!(query, "query must have a from expression")
{joins, sources} =
Enum.map_reduce(query.joins, [from], &prepare_join(&1, &2, query))
%{query | sources: sources |> Enum.reverse |> List.to_tuple, joins: joins}
end
defp prepare_join(%JoinExpr{assoc: {ix, assoc}} = join, sources, query) do
{_, model} = Enum.fetch!(Enum.reverse(sources), ix)
unless model do
error! query, join, "association join cannot be performed without a model"
end
refl = model.__schema__(:association, assoc)
unless refl do
error! query, join, "could not find association `#{assoc}` on model #{inspect model}"
end
associated = refl.associated
source = {associated.__schema__(:source), associated}
on = on_expr(join.on, refl, ix, length(sources))
{%{join | source: source, on: on}, [source|sources]}
end
defp prepare_join(%JoinExpr{source: {source, nil}} = join, sources, _query) when is_binary(source) do
source = {source, nil}
{%{join | source: source}, [source|sources]}
end
defp prepare_join(%JoinExpr{source: {nil, model}} = join, sources, _query) when is_atom(model) do
source = {model.__schema__(:source), model}
{%{join | source: source}, [source|sources]}
end
defp on_expr(on, refl, var_ix, assoc_ix) do
key = refl.key
var = {:&, [], [var_ix]}
assoc_key = refl.assoc_key
assoc_var = {:&, [], [assoc_ix]}
expr = quote do
unquote(assoc_var).unquote(assoc_key) == unquote(var).unquote(key)
end
case on.expr do
true -> %{on | expr: expr}
_ -> %{on | expr: quote do: unquote(on.expr) and unquote(expr)}
end
end
@doc """
Normalizes the query.
After the query was prepared and there is no cache
entry, we need to update its interpolations and check
its fields and associations exist and are valid.
"""
def normalize(query, base, opts) do
only_where? = Keyword.get(opts, :only_where, false)
query
|> traverse_exprs(map_size(base), &validate_and_increment/4)
|> elem(0)
|> normalize_select(only_where?)
|> only_where(only_where?)
end
defp validate_and_increment(kind, query, expr, counter) when kind in ~w(select limit offset)a do
if expr do
do_validate_and_increment(kind, query, expr, counter)
else
{nil, counter}
end
end
defp validate_and_increment(kind, query, exprs, counter) when kind in ~w(distinct where group_by having order_by)a do
Enum.map_reduce exprs, counter, &do_validate_and_increment(kind, query, &1, &2)
end
defp validate_and_increment(:join, query, exprs, counter) do
Enum.map_reduce exprs, counter, fn join, acc ->
{on, acc} = do_validate_and_increment(:join, query, join.on, acc)
{%{join | on: on}, acc}
end
end
defp do_validate_and_increment(kind, query, expr, counter) do
{inner, acc} = Macro.prewalk expr.expr, counter, fn
{:^, meta, [param]}, acc ->
{{:^, meta, [param + counter]}, acc + 1}
{{:., _, [{:&, _, [source]}, field]}, meta, []} = quoted, acc ->
validate_field(kind, query, expr, source, field, meta)
{quoted, acc}
other, acc ->
{other, acc}
end
{%{expr | expr: inner}, acc}
end
defp validate_field(kind, query, expr, source, field, meta) do
{_, model} = elem(query.sources, source)
if model do
type = type!(kind, query, expr, model, field)
if (expected = meta[:ecto_type]) && !Types.match?(type, expected) do
error! query, expr, "field `#{inspect model}.#{field}` in `#{kind}` does not type check. " <>
"It has type #{inspect type} but a type #{inspect expected} is expected"
end
end
end
# Normalize the select field.
defp normalize_select(query, only_where?) do
cond do
only_where? ->
query
query.select ->
Macro.prewalk(query.select.expr, &validate_select(&1, query))
query
true ->
%{query | select: %QueryExpr{expr: {:&, [], [0]}}}
end
end
defp validate_select({:assoc, _, [var, fields]}, query) do
validate_assoc(var, fields, query)
end
defp validate_select(other, _query) do
other
end
defp validate_assoc(parent_var, fields, query) do
Enum.each(fields, fn {field, nested} ->
{_, parent_model} = Util.find_source(query.sources, parent_var)
refl = parent_model.__schema__(:association, field)
unless refl do
error! query, query.select, "field `#{inspect parent_model}.#{field}` " <>
"in assoc/2 is not an association"
end
{child_var, child_fields} = Assoc.decompose_assoc(nested)
{_, child_model} = Util.find_source(query.sources, child_var)
unless refl.associated == child_model do
error! query, query.select, "association `#{inspect parent_model}.#{field}` " <>
"in assoc/2 doesn't match join model `#{child_model}`"
end
case find_source_expr(query, child_var) do
%JoinExpr{qual: qual} when qual in [:inner, :left] ->
:ok
%JoinExpr{qual: qual} ->
error! query, query.select, "association `#{inspect parent_model}.#{field}` " <>
"in assoc/2 requires an inner or left join, got #{qual} join"
_ ->
:ok
end
validate_assoc(child_var, child_fields, query)
end)
end
defp find_source_expr(query, {:&, _, [0]}) do
query.from
end
defp find_source_expr(query, {:&, _, [ix]}) do
Enum.fetch! query.joins, ix - 1
end
if map_size(%Ecto.Query{}) != 14 do
raise "Ecto.Query match out of date in planner"
end
defp only_where(query, false), do: query
defp only_where(query, true) do
case query do
%Ecto.Query{joins: [], select: nil, order_bys: [], limit: nil, offset: nil,
group_bys: [], havings: [], preloads: [], distincts: [], lock: nil} ->
query
_ ->
error! query, "only `where` expressions are allowed"
end
end
## Helpers
# Traverse all query components with expressions.
# Therefore from, preload and lock are not traversed.
defp traverse_exprs(original, acc, fun) do
query = original
{select, acc} = fun.(:select, original, original.select, acc)
query = %{query | select: select}
{distincts, acc} = fun.(:distinct, original, original.distincts, acc)
query = %{query | distincts: distincts}
{joins, acc} = fun.(:join, original, original.joins, acc)
query = %{query | joins: joins}
{wheres, acc} = fun.(:where, original, original.wheres, acc)
query = %{query | wheres: wheres}
{group_bys, acc} = fun.(:group_by, original, original.group_bys, acc)
query = %{query | group_bys: group_bys}
{havings, acc} = fun.(:having, original, original.havings, acc)
query = %{query | havings: havings}
{order_bys, acc} = fun.(:order_by, original, original.order_bys, acc)
query = %{query | order_bys: order_bys}
{limit, acc} = fun.(:limit, original, original.limit, acc)
query = %{query | limit: limit}
{offset, acc} = fun.(:offset, original, original.offset, acc)
{%{query | offset: offset}, acc}
end
defp type!(_kind, _query, _expr, nil, _field), do: :any
defp type!(kind, query, expr, model, field) do
if type = model.__schema__(:field_type, field) do
type
else
error! query, expr, "field `#{inspect model}.#{field}` in `#{kind}` does not exist in the model source"
end
end
def cast!(query, expr, message) do
message =
[message: message, query: query, file: expr.file, line: expr.line]
|> Ecto.QueryError.exception()
|> Exception.message
raise Ecto.CastError, message: message
end
defp error!(query, message) do
raise Ecto.QueryError, message: message, query: query
end
defp error!(query, expr, message) do
raise Ecto.QueryError, message: message, query: query, file: expr.file, line: expr.line
end
end
|
lib/ecto/query/planner.ex
| 0.821259 | 0.486819 |
planner.ex
|
starcoder
|
defmodule ExBinance do
@moduledoc """
Binance API client.
"""
defdelegate ping, to: ExBinance.Market, as: :ping
defdelegate get_time, to: ExBinance.Market, as: :get_time
defdelegate market_info, to: ExBinance.Market, as: :info
defdelegate order_book(x, y), to: ExBinance.Market, as: :order_book
defdelegate trades(x, y), to: ExBinance.Market, as: :trades
defdelegate aggregate_trades(x, y), to: ExBinance.Market, as: :aggregate_trades
defdelegate historical_trades(x, y), to: ExBinance.Market, as: :historical_trades
defdelegate klines(x, y, z), to: ExBinance.Market, as: :klines
defdelegate prices_24hr(x), to: ExBinance.Market, as: :prices_24hr
defdelegate all_prices, to: ExBinance.Market, as: :all_prices
defdelegate all_books, to: ExBinance.Market, as: :all_books
defdelegate price_ticker(x), to: ExBinance.Market, as: :price_ticker
defdelegate book_ticker(x), to: ExBinance.Market, as: :book_ticker
defdelegate account_info, to: ExBinance.User, as: :account_info
defdelegate my_trades(x, y), to: ExBinance.User, as: :trades
defdelegate create_order(symbol, side, type, time_in_force, quantity, price, params), to: ExBinance.User, as: :create_order
defdelegate test_create_order(symbol, side, type, time_in_force, quantity, price, params), to: ExBinance.User, as: :test_create_order
defdelegate check_order(x, y), to: ExBinance.User, as: :check_order
defdelegate check_client_order(x, y), to: ExBinance.User, as: :check_client_order
defdelegate cancel_order(x, y), to: ExBinance.User, as: :cancel_order
defdelegate cancel_client_order(x, y), to: ExBinance.User, as: :cancel_client_order
defdelegate open_orders(x), to: ExBinance.User, as: :open_orders
defdelegate all_orders(x,y), to: ExBinance.User, as: :all_orders
defdelegate withdraw(x), to: ExBinance.User, as: :withdraw
defdelegate deposit_history(x), to: ExBinance.User, as: :deposit_history
defdelegate withdraw_history(x), to: ExBinance.User, as: :withdraw_history
defdelegate deposit_address(x), to: ExBinance.User, as: :deposit_address
defdelegate start_user_data_stream, to: ExBinance.User, as: :start_user_data_stream
defdelegate keepalive_user_data_stream, to: ExBinance.User, as: :keepalive_user_data_stream
defdelegate close_user_data_stream, to: ExBinance.User, as: :close_user_data_stream
end
|
lib/ex_binance.ex
| 0.575588 | 0.494873 |
ex_binance.ex
|
starcoder
|
defmodule EDS.Remote.Spy.Bindings do
def add(from, []), do: from
def add([{name, value} | from], to) do
add(from, add(name, value, to))
end
def add([], to), do: to
def add(name, value, [{name, _} | bindings]),
do: [{name, value} | bindings]
def add(name, value, [b1, {name, _} | bindings]),
do: [b1, {name, value} | bindings]
def add(name, value, [b1, b2, {name, _} | bindings]),
do: [b1, b2, {name, value} | bindings]
def add(name, value, [b1, b2, b3, {name, _} | bindings]),
do: [b1, b2, b3, {name, value} | bindings]
def add(name, value, [b1, b2, b3, b4, {name, _} | bindings]),
do: [b1, b2, b3, b4, {name, value} | bindings]
def add(name, value, [b1, b2, b3, b4, b5, {name, _} | bindings]),
do: [b1, b2, b3, b4, b5, {name, value} | bindings]
def add(name, value, [b1, b2, b3, b4, b5, b6 | bindings]),
do: [b1, b2, b3, b4, b5, b6 | add(name, value, bindings)]
def add(name, value, [b1, b2, b3, b4, b5 | bindings]),
do: [b1, b2, b3, b4, b5 | add(name, value, bindings)]
def add(name, value, [b1, b2, b3, b4 | bindings]),
do: [b1, b2, b3, b4 | add(name, value, bindings)]
def add(name, value, [b1, b2, b3 | bindings]),
do: [b1, b2, b3 | add(name, value, bindings)]
def add(name, value, [b1, b2 | bindings]),
do: [b1, b2 | add(name, value, bindings)]
def add(name, value, [b1 | bindings]),
do: [b1 | add(name, value, bindings)]
def add(name, value, []),
do: [{name, value}]
def add_anonymous(value, [{:_, _} | bindings]),
do: [{:_, value} | bindings]
def add_anonymous(value, [b1, {:_, _} | bindings]),
do: [b1, {:_, value} | bindings]
def add_anonymous(value, [b1, b2, {:_, _} | bindings]),
do: [b1, b2, {:_, value} | bindings]
def add_anonymous(value, [b1, b2, b3, {:_, _} | bindings]),
do: [b1, b2, b3, {:_, value} | bindings]
def add_anonymous(value, [b1, b2, b3, b4, {:_, _} | bindings]),
do: [b1, b2, b3, b4, {:_, value} | bindings]
def add_anonymous(value, [b1, b2, b3, b4, b5, {:_, _} | bindings]),
do: [b1, b2, b3, b4, b5, {:_, value} | bindings]
def add_anonymous(value, [b1, b2, b3, b4, b5, b6 | bindings]),
do: [b1, b2, b3, b4, b5, b6 | add_anonymous(value, bindings)]
def add_anonymous(value, [b1, b2, b3, b4, b5 | bindings]),
do: [b1, b2, b3, b4, b5 | add_anonymous(value, bindings)]
def add_anonymous(value, [b1, b2, b3, b4 | bindings]),
do: [b1, b2, b3, b4 | add_anonymous(value, bindings)]
def add_anonymous(value, [b1, b2, b3 | bindings]),
do: [b1, b2, b3 | add_anonymous(value, bindings)]
def add_anonymous(value, [b1, b2 | bindings]),
do: [b1, b2 | add_anonymous(value, bindings)]
def add_anonymous(value, [b1 | bindings]),
do: [b1 | add_anonymous(value, bindings)]
def add_anonymous(value, []),
do: [{:_, value}]
def find(name, [{name, value} | _]), do: {:value, value}
def find(name, [_, {name, value} | _]), do: {:value, value}
def find(name, [_, _, {name, value} | _]), do: {:value, value}
def find(name, [_, _, _, {name, value} | _]), do: {:value, value}
def find(name, [_, _, _, _, {name, value} | _]), do: {:value, value}
def find(name, [_, _, _, _, _, {name, value} | _]), do: {:value, value}
def find(name, [_, _, _, _, _, _ | bindings]), do: find(name, bindings)
def find(name, [_, _, _, _, _ | bindings]), do: find(name, bindings)
def find(name, [_, _, _, _ | bindings]), do: find(name, bindings)
def find(name, [_, _, _ | bindings]), do: find(name, bindings)
def find(name, [_, _ | bindings]), do: find(name, bindings)
def find(name, [_ | bindings]), do: find(name, bindings)
def find(_, []), do: :unbound
def merge(_eval, source, destination) do
source
|> Enum.reduce_while(destination, fn {name, variable}, acc ->
case {find(name, acc), name} do
{{:value, ^variable}, _name} ->
{:cont, acc}
{{:value, _}, :_} ->
{:cont, [{name, variable} | List.keydelete(acc, :_, 1)]}
{{:value, _}, _name} ->
{:halt, {:error, variable, acc}}
{:unbound, _name} ->
{:cont, [{name, variable} | acc]}
end
end)
|> case do
{:halt, term} -> term
bindings -> bindings
end
end
def new(), do: :erl_eval.new_bindings()
end
|
lib/eds_remote/spy/bindings.ex
| 0.584983 | 0.437643 |
bindings.ex
|
starcoder
|
defmodule Ptolemy.Auth do
@moduledoc """
`Ptolemy.Auth` provides authentication implementations to a remote vault server.
## Usage
All token request should call the `Ptolemy.Auth.authenticate/4` function and *not* the `c:authenticate/3` callback found
in each modules implementing this behaviour!
Here are a few examples of the usage:
```elixir
#Approle, no IAP
Ptolemy.Auth.authenticate(:Approle, "https://test-vault.com", %{secret_id: "test", role_id; "test"}, [])
#Approle with IAP
Ptolemy.Auth.authenticate(:Approle, "https://test-vault.com", %{secret_id: "test", role_id; "test"}, [iap_svc_acc: @gcp_svc1_with_vault_perm, client_id: @fake_id, exp: 2000])
#Approle with IAP and `bearer` token being re-used
Ptolemy.Auth.authenticate(:Approle, "https://test-vault.com", %{secret_id: "test", role_id: "test"}, {"Authorization", "Bearer 98a4c7ab98a4c7ab98a4c7ab"})
#GCP with no IAP
Ptolemy.Auth.authenticate(:GCP, "https://test-vault.com", my_svc, [])
#GCP with IAP, 2 Google service accounts, one for vault one for IAP
Ptolemy.Auth.authenticate(:GCP, @vurl, %{gcp_svc_acc: @gcp_svc1_with_vault_perm, vault_role: "test", exp: 3000}, [iap_svc_acc: my_svc, client_id: @fake_id, exp: 2000])
#GCP with IAP, re-using the same GCP service account being used to authenticate to vault inorder to auth into IAP
Ptolemy.Auth.authenticate(:GCP, @vurl, %{gcp_svc_acc: @gcp_svc1_with_vault_perm, vault_role: "test", exp: 3000}, [iap_svc_acc: :reuse, client_id: @fake_id, exp: 2000])
#GCP with IAP and `bearer` token being re-used
Ptolemy.Auth.authenticate(:GCP, @vurl, %{gcp_svc_acc: @gcp_svc1_with_vault_perm, vault_role: "test", exp: 3000}, {"Authorization", "Bearer 98a4c7ab98a4c7ab98a4c<PASSWORD>"})
```
"""
@typedoc """
Vault authentication data.
"""
@type vault_auth_data :: %{
token: {String.t(), String.t()},
renewable: boolean(),
lease_duration: pos_integer()
}
@typedoc """
Google Identity Aware Proxy authentication data.
"""
@type iap_auth_data :: %{
token: {String.t(), String.t()}
}
@typedoc """
Credential data needed to authenticated to a remote vault server.
Each specific auth method's credential data have a different schema.
"""
@type cred_data ::
%{
gcp_svc_acc: map(),
vault_role: String.t(),
exp: pos_integer()
}
| %{
secret_id: String.t(),
role_id: String.t()
}
@typedoc """
Authentication options, used to specify IAP credentials and other future authentication options.
If under the `:iap_svc_acc` key `:reuse` is specified and the auth method was set to `:GCP`, `Ptolemy.Auth`
will attempt to re-use the GCP service account specified under the supplied `cred_data` type.
`:client_id` is the OAuth2 client id, this can be found in Security -> Identity-Aware-Proxy -> Select the IAP resource -> Edit OAuth client.
`:exp` is the validity period for the token in seconds, google's API specifies that a token can only be valid for up to 3600 seconds.
Specifying a tuple of type {"Authorization", "Bearer ....."} will notify `Ptolemy.Auth.authenticate/4` to reuse the token to prevent
exessive auhtnetication calls to IAP.
"""
@type auth_opts ::
[]
| [iap_svc_acc: map(), client_id: String.t(), exp: pos_integer()]
| [headers: list()]
| [http_opts: list()]
| [headers: list(), http_opts: list()]
| [headers: list(), iap_svc_acc: map(), client_id: String.t(), exp: pos_integer()]
| [http_opts: list(), iap_svc_acc: map(), client_id: String.t(), exp: pos_integer()]
| [
headers: list(),
http_opts: list(),
iap_svc_acc: map(),
client_id: String.t(),
exp: pos_integer()
]
| [iap_svc_acc: :reuse, client_id: String.t(), exp: pos_integer()]
| [headers: list(), iap_svc_acc: :reuse, client_id: String.t(), exp: pos_integer()]
| [http_opts: list(), iap_svc_acc: :reuse, client_id: String.t(), exp: pos_integer()]
| [
headers: list(),
http_opts: list(),
iap_svc_acc: :reuse,
client_id: String.t(),
exp: pos_integer()
]
| {String.t(), String.t()}
@typedoc """
Atoms representing the authentication methods that is currently supported on ptolemy.
Currently supported methods are:
- GCP -> `:GCP`
- Approle -> `:Approle`
"""
@type auth_method :: :GCP | :Approle
@typedoc """
List representing an IAP token.
The token type returned from a sucessfull IAP call will always be of type `Authorization Bearer`.
"""
@type iap_tok :: [] | [{String.t(), String.t()}]
@doc """
Authentication method specific callback to be implemented by different modules.
Each modules representing a specific authentication method should implement this callback in its own module.
"""
@callback authenticate(endpoint :: String.t(), cred_data, list(), list()) ::
vault_auth_data | {:error, String.t()}
@doc """
Authenticates against a remote vault server with specified auth strategy and options.
Currently the only supported options deals with IAP.
Note Specifying an empty list or a tuple to this function under `auth_opts` will *NOT* return an IAP token and IAP credentials metadata.
"""
@spec authenticate(auth_method, String.t(), cred_data, auth_opts) ::
vault_auth_data
| %{vault: vault_auth_data, iap: iap_auth_data}
| {:error, String.t()}
def authenticate(method, url, credentials, opts) do
opts =
case is_tuple(opts),
do:
(
true -> [headers: [opts]]
false -> opts
)
split_opts = Keyword.split(opts, [:iap_svc_acc, :client_id, :exp])
iap_opts = elem(split_opts, 0)
headers = Keyword.get(elem(split_opts, 1), :headers, [])
http_opts = Keyword.get(elem(split_opts, 1), :http_opts, [])
case length(iap_opts) == 3 do
true -> iap_authenticate(method, url, credentials, iap_opts, headers, http_opts)
false -> opts_authenticate(method, url, credentials, headers, http_opts)
end
end
# IAP is enabled and has a seperate service account.
defp iap_authenticate(
method,
url,
credentials,
[iap_svc_acc: svc, client_id: cid, exp: exp],
headers,
http_opts
)
when is_map(svc) do
iap_tok = Ptolemy.Auth.Google.authenticate(:iap, svc, cid, exp)
vault_tok = auth(method, url, credentials, [iap_tok] ++ headers, http_opts)
%{vault: vault_tok, iap: %{token: iap_tok}}
end
# IAP is enabled with instruction to re-use `credentials` as the IAP service account
defp iap_authenticate(
method,
url,
credentials,
[iap_svc_acc: :reuse, client_id: cid, exp: exp],
headers,
http_opts
) do
iap_tok = Ptolemy.Auth.Google.authenticate(:iap, credentials[:gcp_svc_acc], cid, exp)
vault_tok = auth(method, url, credentials, [iap_tok] ++ headers, http_opts)
%{vault: vault_tok, iap: %{token: iap_tok}}
end
def opts_authenticate(method, url, credentials, headers, http_opts) do
auth(method, url, credentials, headers, http_opts)
end
defp auth(method, url, credentials, headers, http_opts) do
auth_type = Module.concat(Ptolemy.Auth, method)
auth_type.authenticate(url, credentials, headers, http_opts)
end
@doc """
Sends a payload to a remote vault server's authentication endpoint.
"""
@spec login(%Tesla.Client{}, String.t(), map()) :: vault_auth_data | {:error, String.t()}
def login(client, auth_endp, payload) do
with {:ok, resp} <- Tesla.post(client, auth_endp, payload) do
case {resp.status, resp.body} do
{status, body} when status in 200..299 ->
parse_vault_resp(body)
{status, body} ->
message = Map.fetch!(body, "errors")
{:error, "Authentication failed, Status: #{status} with error: #{message}"}
end
else
err -> err
end
end
@doc """
Creates a `%Tesla.Client{}` pointing to a remote vault server.
"""
@spec vault_auth_client(String.t(), list(), list()) :: %Tesla.Client{}
def vault_auth_client(url, headers, opts) do
Tesla.client([
{Tesla.Middleware.BaseUrl, "#{url}/v1"},
{Tesla.Middleware.Headers, headers},
{Tesla.Middleware.Opts, opts},
{Tesla.Middleware.JSON, []}
])
end
# parses auth body to return relevant information
defp parse_vault_resp(body) do
%{
"auth" => %{
"client_token" => client_token,
"renewable" => renewable,
"lease_duration" => lease_duration
}
} = body
%{
token: {"X-Vault-Token", client_token},
renewable: renewable,
lease_duration: lease_duration
}
end
end
|
lib/auth/auth.ex
| 0.889816 | 0.609989 |
auth.ex
|
starcoder
|
defmodule AtomTweaks.Markdown do
@moduledoc """
A structure that represents a chunk of Markdown text in memory.
For the database type, see `AtomTweaks.Ecto.Markdown` instead.
The structure contains both the raw Markdown `text` and, potentially, the rendered `html`. Upon
a request to render the structure using either `to_html/1` or `to_iodata/1`, the `html` field is
given preference and returned unchanged, if available. If the `html` value is `nil`, then the
contents of the `text` field are rendered using `AtomTweaksWeb.MarkdownEngine.render/1` and
returned.
This type requires special handling in forms because Phoenix's form builder functions call
`Phoenix.HTML.html_escape/1` on all field values, which returns the `html` field on this type. But
what we want when we show an `AtomTweaks.Markdown` value in a form is the `text` field.
"""
alias AtomTweaksWeb.MarkdownEngine
@type t :: %__MODULE__{text: String.t(), html: nil | String.t()}
defstruct text: "", html: nil
@typedoc """
An `AtomTweaks.Markdown` struct or a string of Markdown text.
"""
@type markdown :: %__MODULE__{} | String.t()
@doc """
Renders the supplied Markdown as HTML.
## Examples
Render Markdown from a string:
```
iex> AtomTweaks.Markdown.to_html("# Foo")
"<h1>Foo</h1>\n"
```
Render Markdown from an unrendered `Markdown` struct:
```
iex> AtomTweaks.Markdown.to_html(%AtomTweaks.Markdown{text: "# Foo"})
"<h1>Foo</h1>\n"
```
Passes already rendered Markdown through unchanged:
```
iex> AtomTweaks.Markdown.to_html(%AtomTweaks.Markdown{html: "<p>foo</p>"})
"<p>foo</p>"
```
Returns an empty string for anything that isn't a string or a `Markdown` struct:
```
iex> AtomTweaks.Markdown.to_html(5)
""
```
"""
@spec to_html(markdown) :: binary
def to_html(markdown)
def to_html(%__MODULE__{html: html}) when is_binary(html), do: html
def to_html(%__MODULE__{text: text}) when is_binary(text), do: to_html(text)
def to_html(binary) when is_binary(binary) do
MarkdownEngine.render(binary)
end
def to_html(_), do: ""
@doc """
Renders a chunk of Markdown to its `iodata` representation.
"""
@spec to_iodata(markdown) :: iodata
def to_iodata(markdown = %__MODULE__{}), do: to_html(markdown)
defimpl Jason.Encoder do
def encode(markdown = %AtomTweaks.Markdown{}, opts) do
Jason.Encode.string(markdown.text, opts)
end
end
defimpl Phoenix.HTML.Safe do
def to_iodata(markdown = %AtomTweaks.Markdown{}) do
AtomTweaks.Markdown.to_iodata(markdown)
end
end
end
|
lib/atom_tweaks/markdown.ex
| 0.924526 | 0.832713 |
markdown.ex
|
starcoder
|
defmodule Telemetry do
@moduledoc """
`Telemetry` allows you to invoke certain functions whenever a particular event is emitted.
For more information see the documentation for `attach/5`, `attach_many/5` and `execute/3`.
"""
require Logger
alias Telemetry.HandlerTable
@type handler_id :: term()
@type event_name :: [atom()]
@type event_value :: number()
@type event_metadata :: map()
@type event_prefix :: [atom()]
## API
@doc """
Attaches the handler to the event.
`handler_id` must be unique, if another handler with the same ID already exists the
`{:error, :already_exists}` tuple is returned.
See `execute/3` to learn how the handlers are invoked.
"""
@spec attach(handler_id, event_name, module, function :: atom, config :: term) ::
:ok | {:error, :already_exists}
def attach(handler_id, event_name, module, function, config) do
attach_many(handler_id, [event_name], module, function, config)
end
@doc """
Attaches the handler to many events.
The handler will be invoked whenever any of the events in the `event_names` list is emitted. Note
that failure of the handler on any of these invokations will detach it from all the events in
`event_name` (the same applies to manual detaching using `detach/1`).
"""
@spec attach_many(handler_id, [event_name], module, function :: atom, config :: term) ::
:ok | {:error, :already_exists}
def attach_many(handler_id, event_names, module, function, config) do
Enum.each(event_names, &assert_event_name_or_prefix/1)
HandlerTable.insert(handler_id, event_names, module, function, config)
end
@doc """
Removes the existing handler.
If the handler with given ID doesn't exist, `{:error, :not_found}` is returned.
"""
@spec detach(handler_id) :: :ok | {:error, :not_found}
def detach(handler_id) do
HandlerTable.delete(handler_id)
end
@doc """
Emits the event, invoking handlers attached to it.
When the event is emitted, `module.function` provided to `attach/5` is called with four arguments:
* the event name
* the event value
* the event metadata
* the handler configuration given to `attach/5`
All the handlers are executed by the process calling this function. If the function fails (raises,
exits or throws) then the handler is removed.
Note that you should not rely on the order in which handlers are invoked.
"""
@spec execute(event_name, event_value) :: :ok
@spec execute(event_name, event_value, event_metadata) :: :ok
def execute(event_name, value, metadata \\ %{})
when is_number(value) and is_map(metadata) do
handlers = HandlerTable.list_for_event(event_name)
for {handler_id, _, module, function, config} <- handlers do
try do
apply(module, function, [event_name, value, metadata, config])
catch
class, reason ->
detach(handler_id)
stacktrace = System.stacktrace()
Logger.error(
"Handler #{inspect(module)}.#{function} with ID #{inspect(handler_id)} " <>
"has failed and has been detached\n" <> Exception.format(class, reason, stacktrace)
)
end
end
:ok
end
@doc """
Returns all handlers attached to events with given prefix.
Handlers attached to many events at once using `attach_many/5` will be listed once for each
event they're attached to.
Note that you can list all handlers by feeding this function an empty list.
"""
@spec list_handlers(event_prefix) :: [
{handler_id, event_name, module, function :: atom, config :: term}
]
def list_handlers(event_prefix) do
assert_event_name_or_prefix(event_prefix)
HandlerTable.list_by_prefix(event_prefix)
end
## Helpers
@spec assert_event_name_or_prefix(term()) :: :ok | no_return
defp assert_event_name_or_prefix(list) when is_list(list) do
if Enum.all?(list, &is_atom/1) do
:ok
else
raise ArgumentError, "Expected event name or prefix to be a list of atoms"
end
end
defp assert_event_name_or_prefix(_) do
raise ArgumentError, "Expected event name or prefix to be a list of atoms"
end
end
|
lib/telemetry.ex
| 0.885774 | 0.501038 |
telemetry.ex
|
starcoder
|
defmodule Broadway do
@moduledoc ~S"""
Broadway is a concurrent, multi-stage tool for building
data ingestion and data processing pipelines.
It allows developers to consume data efficiently from different
sources, such as Amazon SQS, Apache Kafka, Google Cloud PubSub,
RabbitMQ and others.
## Built-in features
* Back-pressure - by relying on `GenStage`, we only get the amount
of events necessary from upstream sources, never flooding the
pipeline.
* Automatic acknowledgements - Broadway automatically acknowledges
messages at the end of the pipeline or in case of errors.
* Batching - Broadway provides built-in batching, allowing you to
group messages either by size and/or by time. This is important
in systems such as Amazon SQS, where batching is the most efficient
way to consume messages, both in terms of time and cost.
* Fault tolerance with minimal data loss - Broadway pipelines are
carefully designed to minimize data loss. Producers are isolated
from the rest of the pipeline and automatically resubscribed to
in case of failures. On the other hand, user callbacks are stateless,
allowing us to handle any errors locally. Finally, in face of any
unforeseen bug, we restart only downstream components, avoiding
data loss.
* Graceful shutdown - Broadway integrates with the VM to provide graceful
shutdown. By starting Broadway as part of your supervision tree, it will
guarantee all events are flushed once the VM shuts down.
* Built-in testing - Broadway ships with a built-in test API, making it
easy to push test messages through the pipeline and making sure the
event was properly processed.
* Custom failure handling - Broadway provides a `c:handle_failed/2` callback
where developers can outline custom code to handle errors. For example,
if they want to move messages to another queue for further processing.
* Dynamic batching - Broadway allows developers to batch messages based on
custom criteria. For example, if your pipeline needs to build
batches based on the `user_id`, email address, etc, it can be done
by calling `Broadway.Message.put_batch_key/2`.
* Ordering and Partitioning - Broadway allows developers to partition
messages across workers, guaranteeing messages within the same partition
are processed in order. For example, if you want to guarantee all events
tied to a given `user_id` are processed in order and not concurrently,
you can set the `:partition_by` option. See ["Ordering and partitioning"](#module-ordering-and-partitioning).
* Rate limiting: Broadway allows developers to rate limit all producers in
a single node by a given number of messages in a time period, allowing
developers to easily work sources or sinks that cannot cope with a high
number of requests. See the ":rate_limiting" option for producers in
`start_link/2`.
* Metrics - Broadway uses the `:telemetry` library for instrumentation,
see ["Telemetry"](#module-telemetry) section below for more information.
* Back-off (TODO)
## The Broadway Behaviour
In order to use Broadway, you need to:
1. Define your pipeline configuration
2. Define a module implementing the Broadway behaviour
### Example
Broadway is a process-based behaviour, and you begin by
defining a module that invokes `use Broadway`. Processes
defined by these modules will often be started by a
supervisor, and so a `start_link/1` function is frequently
also defined but not strictly necessary.
defmodule MyBroadway do
use Broadway
def start_link(_opts) do
Broadway.start_link(MyBroadway,
name: MyBroadwayExample,
producer: [
module: {Counter, []},
concurrency: 1
],
processors: [
default: [concurrency: 2]
]
)
end
...callbacks...
end
Then add your Broadway pipeline to your supervision tree
(usually in `lib/my_app/application.ex`):
children = [
{MyBroadway, []}
]
Supervisor.start_link(children, strategy: :one_for_one)
Adding your pipeline to your supervision tree in this way
calls the default `child_spec/1` function that is generated
when `use Broadway` is invoked. If you would like to customize
the child spec passed to the supervisor, you can override the
`child_spec/1` function in your module or explicitly pass a
child spec to the supervisor when adding it to your supervision tree.
The configuration above defines a pipeline with:
* One producer
* Two processors
Here is how this pipeline would be represented:
```asciidoc
[producer_1]
/ \
/ \
/ \
/ \
[processor_1] [processor_2] <- process each message
```
After the pipeline is defined, you need to implement the `c:handle_message/3`
callback which will be invoked by processors for each message.
`c:handle_message/3` receives every message as a `Broadway.Message`
struct and it must return an updated message.
## Batching
Depending on the scenario, you may want to group processed messages as
batches before publishing your data. This is common and especially
important when working with services like AWS S3 and SQS that provide a
specific API for sending and retrieving batches. This can drastically
increase throughput and consequently improve the overall performance of
your pipeline.
To create batches, define the `:batchers` configuration option:
defmodule MyBroadway do
use Broadway
def start_link(_opts) do
Broadway.start_link(MyBroadway,
name: MyBroadwayExample,
producer: [
module: {Counter, []},
concurrency: 1
],
processors: [
default: [concurrency: 2]
],
batchers: [
sqs: [concurrency: 2, batch_size: 10],
s3: [concurrency: 1, batch_size: 10]
]
)
end
# ...callbacks...
end
The configuration above defines a pipeline with:
* One producer
* Two processors
* One batcher named `:sqs` with two batch processors
* One batcher named `:s3` with one batch processor
Here is how this pipeline would be represented:
```asciidoc
[producer_1]
/ \
/ \
/ \
/ \
[processor_1] [processor_2] <- process each message
/\ /\
/ \ / \
/ \ / \
/ x \
/ / \ \
/ / \ \
/ / \ \
[batcher_sqs] [batcher_s3]
/\ \
/ \ \
/ \ \
/ \ \
[batch_sqs_1] [batch_sqs_2] [batch_s3_1] <- process each batch
```
Additionally, you have to define the `c:handle_batch/4` callback,
which batch processors invoke for each batch. You can then
call `Broadway.Message.put_batcher/2` inside `c:handle_message/3` to
control which batcher the message should go to.
The batcher receives processed messages and creates batches
specified by the `batch_size` and `batch_timeout` configuration. The
goal is to create a batch with at most `batch_size` entries within
`batch_timeout` milliseconds. Each message goes into a particular batch,
controlled by calling `Broadway.Message.put_batch_key/2` in
`c:handle_message/3`. Once a batch is created in the batcher, it is sent
to a separate process (the batch processor) that will call `c:handle_batch/4`,
passing the batcher, the batch itself (a list of messages), a `Broadway.BatchInfo`
struct, and the Broadway context.
For example, imagine your producer generates integers as `data`.
You want to route the odd integers to SQS and the even ones to
S3. Your pipeline would look like this:
defmodule MyBroadway do
use Broadway
import Integer
alias Broadway.Message
# ...start_link...
@impl true
def handle_message(_, %Message{data: data} = message, _) when is_odd(data) do
message
|> Message.update_data(&process_data/1)
|> Message.put_batcher(:sqs)
end
def handle_message(_, %Message{data: data} = message, _) when is_even(data) do
message
|> Message.update_data(&process_data/1)
|> Message.put_batcher(:s3)
end
defp process_data(data) do
# Do some calculations, generate a JSON representation, etc.
end
@impl true
def handle_batch(:sqs, messages, _batch_info, _context) do
# Send batch of successful messages as ACKs to SQS
# This tells SQS that this list of messages were successfully processed
end
def handle_batch(:s3, messages, _batch_info, _context) do
# Send batch of messages to S3
end
end
See the [callbacks documentation](#callbacks) for more information on the
arguments given to each callback and their expected return types.
### The default batcher
Once you define the `:batchers` configuration key for your Broadway pipeline,
then **all messages get batched**. By default, unless you call
`Broadway.Message.put_batcher/2`, messages have their batcher set to the
`:default` batcher. If you don't define configuration for it, Broadway is going
to raise an error.
For example, imagine you want to batch "special" messages and handle them differently
then all other messages. You can configure your pipeline like this:
defmodule MyBroadway do
use Broadway
def start_link(_opts) do
Broadway.start_link(MyBroadway,
name: MyBroadwayExample,
producer: [
module: {Counter, []},
concurrency: 1
],
processors: [
default: [concurrency: 2]
],
batchers: [
special: [concurrency: 2, batch_size: 10],
default: [concurrency: 1, batch_size: 10]
]
)
end
def handle_message(_, message, _) do
if special?(message) do
Broadway.Message.put_batcher(:special)
else
message
end
end
def handle_batch(:special, messages, _batch_info, _context) do
# Handle special batch
end
def handle_batch(:default, messages, _batch_info, _context) do
# Handle all other messages in batches
end
Now you are ready to get started. See the `start_link/2` function
for a complete reference on the arguments and options allowed.
Also makes sure to check out GUIDES in the documentation sidebar
for more examples, how tos and more.
## Acknowledgements and failures
At the end of the pipeline, messages are automatically acknowledged.
If there are no batchers, the acknowledgement will be done by processors.
The number of messages acknowledged, assuming the pipeline is running
at full scale, will be `max_demand - min_demand`. Since the default values
are 10 and 5 respectively, we will be acknowledging in groups of 5.
If there are batchers, the acknowledgement is done by the batchers,
using the `batch_size`.
In case of failures, Broadway does its best to keep the failures
contained and avoid losing messages. The failed message or batch is
acknowledged as failed immediately. For every failure, a log report
is also emitted. If your Broadway module also defines the
`c:handle_failed/2` callback, that callback will be invoked with
all the failed messages before they get acknowledged.
Note however, that `Broadway` does not provide any sort of retries
out of the box. This is left completely as a responsibility of the
producer. For instance, if you are using Amazon SQS, the default
behaviour is to retry unacknowledged messages after a user-defined
timeout. If you don't want unacknowledged messages to be retried,
is your responsibility to configure a dead-letter queue as target
for those messages.
## Producer concurrency
Setting producer concurrency is a tradeoff between latency and internal
queueing.
For efficiency, you should generally limit the amount of internal queueing.
Whenever additional messages are sitting in a busy processor's mailbox, they
can't be delivered to another processor which may be available or become
available first.
One possible cause of internal queueing is multiple producers. This is because
each processor's demand will be sent to all producers. For example, if a
processor demands `2` messages and there are `2` producers, each producer
will try to produce `2` messages (for example, by pulling from a queue or
whatever the specific producer does) and give them to the processor. So the
processor may receive `max_demand * <producer concurrency>` messages.
Setting producer `concurrency: 1` will reduce internal queueing. This is
likely a good choice for producers which take minimal time to produce a
message, such as `BroadwayRabbitMQ`, which receives messages as they are
pushed by RabbitMQ and can specify how many to prefetch.
On the other hand, when using a producer such as `BroadwaySQS` which must
make a network round trip to fetch from an external source, it may be better
to use multiple producers and accept some internal queueing to avoid having
fetch messages whenever there is new demand.
Measure your system to decide which setting is most appropriate.
Adding another single-producer pipeline, or another node running the
pipeline, are other ways you may consider to increase throughput.
## Batcher concurrency
If a batcher's `concurrency` is greater than `1`, Broadway will use as few of
the batcher processes as possible at any given moment, attempting to satisfy
the `batch_size` of one batcher process within the `batch_timeout` before
sending messages to another.
## Testing
Many producers receive data from external systems and hitting the network
is usually undesirable when running the tests.
For testing purposes, we recommend developers to use `Broadway.DummyProducer`.
This producer does not produce any messages by itself and instead the
`test_message/3` and `test_batch/3` functions should be used to publish
messages.
With `test_message/3`, you can push a message into the pipeline and receive
a process message when the pipeline acknowledges the data you have pushed
has been processed.
Let's see an example. Imagine the following `Broadway` module:
defmodule MyBroadway do
use Broadway
def start_link() do
producer_module = Application.fetch_env!(:my_app, :producer_module)
Broadway.start_link(__MODULE__,
name: __MODULE__,
producer: [
module: producer_module
],
processors: [
default: []
],
batchers: [
default: [batch_size: 10]
]
)
end
@impl true
def handle_message(_processor, message, _context) do
message
end
@impl true
def handle_batch(_batcher, messages, _batch_info, _context) do
messages
end
end
Now in config/test.exs you could do:
config :my_app, :producer_module, {Broadway.DummyProducer, []}
And we can test it like this:
defmodule MyBroadwayTest do
use ExUnit.Case, async: true
test "test message" do
ref = Broadway.test_message(MyBroadway, 1)
assert_receive {:ack, ^ref, [%{data: 1}], []}
end
end
Note that at the end we received a message in the format of:
{:ack, ^ref, successful_messages, failure_messages}
You can use the acknowledgment to guarantee the message has been
processed and therefore any side-effect from the pipeline should be
visible.
When using `test_message/3`, the message will be delivered as soon as
possible, without waiting for the pipeline `batch_size` to be reached
or without waiting for `batch_timeout`. This behaviour is useful to test
and verify single messages, without imposing high timeouts to our test
suites.
In case you want to test multiple messages, then you need to use
`test_batch/3`. `test_batch/3` will respect the batching configuration,
which most likely means you need to increase your test timeouts:
test "batch messages" do
{:ok, pid} = MyBroadway.start_link()
ref = Broadway.test_batch(pid, [1, 2, 3])
assert_receive {:ack, ^ref, [%{data: 1}, %{data: 2}, %{data: 3}], []}, 1000
end
However, keep in mind that, generally speaking, there is no guarantee
the messages will arrive in the same order that you have sent them,
especially for large batches, as Broadway will process large batches
concurrently and order will be lost.
If you want to send more than one test message at once, then we recommend
setting the `:batch_mode` to `:bulk`, especially if you want to assert how
the code will behave with large batches. Otherwise the batcher will flush
messages as soon as possible and in small batches.
However, keep in mind that, regardless of the `:batch_mode` you cannot
rely on ordering, as Broadway pipelines are inherently concurrent. For
example, if you send those messages:
test "multiple batch messages" do
{:ok, pid} = MyBroadway.start_link()
ref = Broadway.test_batch(pid, [1, 2, 3, 4, 5, 6, 7], batch_mode: :bulk)
assert_receive {:ack, ^ref, [%{data: 1}], []}, 1000
end
## Ordering and partitioning
By default, Broadway processes all messages and batches concurrently,
which means ordering is not guaranteed. Some producers may impose some
ordering (for instance, Apache Kafka), but if the ordering comes from a
business requirement, you will have to impose the ordering yourself.
This can be done with the `:partition_by` option, which enforces that
messages with a given property are always forwarded to the same stage.
In order to provide partitioning throughout the whole pipeline, just
set `:partition_by` at the root of your configuration:
defmodule MyBroadway do
use Broadway
def start_link(_opts) do
Broadway.start_link(MyBroadway,
name: MyBroadwayExample,
producer: [
module: {Counter, []},
concurrency: 1
],
processors: [
default: [concurrency: 2]
],
batchers: [
sqs: [concurrency: 2, batch_size: 10],
s3: [concurrency: 1, batch_size: 10]
],
partition_by: &partition/1
)
end
defp partition(msg) do
msg.data.user_id
end
In the example above, we are partitioning the pipeline by `user_id`.
This means any message with the same `user_id` will be handled by
the same processor and batch processor.
The `partition` function must return a non-negative integer,
starting at zero, which is routed to a stage by using the `remainder`
option.
If the data you want to partition by is not an integer, you can
explicitly hash it by calling `:erlang.phash2/1`. However, note
that `hash` does not guarantee an equal distribution of events
across partitions. So some partitions may be more overloaded than
others, slowing down the whole pipeline.
In the example above, we have set the same partition for all
processors and batchers. You can also specify the `:partition_by`
function for each "processor" and "batcher" individually.
Finally, beware of the error semantics when using partitioning.
If you require ordering and a message fails, the partition will
continue processing messages. Depending on the type of processing,
the end result may be inconsistent. If your producer supports
retrying, the failed message may be retried later, also out of
order. Those issues happens regardless of Broadway and solutions
to said problems almost always need to be addressed outside of
Broadway too.
## Telemetry
Broadway currently exposes following Telemetry events:
* `[:broadway, :topology, :init]` - Dispatched when the topology for
a Broadway pipeline is initialized. The config key in the metadata
contains the configuration options that were provided to
`Broadway.start_link/2`.
* Measurement: `%{time: System.monotonic_time}`
* Metadata: `%{supervision: pid(), config: keyword()}`
* `[:broadway, :processor, :start]` - Dispatched by a Broadway processor
before the optional `c:prepare_messages/2`
* Measurement: `%{time: System.monotonic_time}`
* Metadata: `%{name: atom, messages: [Broadway.Message.t]}`
* `[:broadway, :processor, :stop]` - Dispatched by a Broadway processor
after `c:prepare_messages/2` and after all `c:handle_message/3` callback
has been invoked for all individual messages
* Measurement: `%{time: System.monotonic_time, duration: native_time}`
* Metadata:
```
%{
name: atom,
successful_messages_to_ack: [Broadway.Message.t],
successful_messages_to_forward: [Broadway.Message.t],
failed_messages: [Broadway.Message.t]
}
```
* `[:broadway, :processor, :message, :start]` - Dispatched by a Broadway processor
before your `c:handle_message/3` callback is invoked
* Measurement: `%{time: System.monotonic_time}`
* Metadata:
```
%{
processor_key: atom,
name: atom,
message: Broadway.Message.t
}
```
* `[:broadway, :processor, :message, :stop]` - Dispatched by a Broadway processor
after your `c:handle_message/3` callback has returned
* Measurement: `%{time: System.monotonic_time, duration: native_time}`
* Metadata:
```
%{
processor_key: atom,
name: atom,
message: Broadway.Message.t,
updated_message: Broadway.Message.t
}
```
* `[:broadway, :processor, :message, :exception]` - Dispatched by a Broadway processor
if your `c:handle_message/3` callback encounters an exception
* Measurement: `%{time: System.monotonic_time, duration: native_time}`
* Metadata:
```
%{
processor_key: atom,
name: atom,
message: Broadway.Message.t,
kind: kind,
reason: reason,
stacktrace: stacktrace
}
```
* `[:broadway, :consumer, :start]` - Dispatched by a Broadway consumer before your
`c:handle_batch/4` callback is invoked
* Measurement: `%{time: System.monotonic_time}`
* Metadata:
```
%{
name: atom,
messages: [Broadway.Message.t],
batch_info: Broadway.BatchInfo.t
}
```
* `[:broadway, :consumer, :stop]` - Dispatched by a Broadway consumer after your
`c:handle_batch/4` callback has returned
* Measurement: `%{time: System.monotonic_time, duration: native_time}`
* Metadata:
```
%{
name: atom,
successful_messages: [Broadway.Message.t],
failed_messages: [Broadway.Message.t],
batch_info: Broadway.BatchInfo.t
}
```
* `[:broadway, :batcher, :start]` - Dispatched by a Broadway batcher before
handling events
* Measurement: `%{time: System.monotonic_time}`
* Metadata: `%{name: atom, messages: [{Broadway.Message.t}]}`
* `[:broadway, :batcher, :stop]` - Dispatched by a Broadway batcher after
handling events
* Measurement: `%{time: System.monotonic_time, duration: native_time}`
* Metadata: `%{name: atom}`
"""
alias Broadway.{BatchInfo, Message, Topology}
alias NimbleOptions.ValidationError
@typedoc """
Returned by `start_link/2`.
"""
@type on_start() :: {:ok, pid()} | :ignore | {:error, {:already_started, pid()} | term()}
@doc """
Invoked for preparing messages before handling (if defined).
It expects:
* `message` is the `Broadway.Message` struct to be processed.
* `context` is the user defined data structure passed to `start_link/2`.
This is the place to prepare and preload any information that will be used
by `c:handle_message/3`. For example, if you need to query the database,
instead of doing it once per message, you can do it on this callback.
The length of the list of messages received by this callback is based on
the `min_demand`/`max_demand` configuration in the processor. This callback
must always return all messages it receives, as `c:handle_message/3` is still
called individually for each message afterwards.
"""
@callback prepare_messages(messages :: [Message.t()], context :: term) :: [Message.t()]
@doc """
Invoked to handle/process individual messages sent from a producer.
It receives:
* `processor` is the key that defined the processor.
* `message` is the `Broadway.Message` struct to be processed.
* `context` is the user defined data structure passed to `start_link/2`.
And it must return the (potentially) updated `Broadway.Message` struct.
This is the place to do any kind of processing with the incoming message,
e.g., transform the data into another data structure, call specific business
logic to do calculations. Basically, any CPU bounded task that runs against
a single message should be processed here.
In order to update the data after processing, use the
`Broadway.Message.update_data/2` function. This way the new message can be
properly forwarded and handled by the batcher:
@impl true
def handle_message(_, message, _) do
message
|> update_data(&do_calculation_and_returns_the_new_data/1)
end
In case more than one batcher have been defined in the configuration,
you need to specify which of them the resulting message will be forwarded
to. You can do this by calling `put_batcher/2` and returning the new
updated message:
@impl true
def handle_message(_, message, _) do
# Do whatever you need with the data
...
message
|> put_batcher(:s3)
end
Any message that has not been explicitly failed will be forwarded to the next
step in the pipeline. If there are no extra steps, it will be automatically
acknowledged.
In case of errors in this callback, the error will be logged and that particular
message will be immediately acknowledged as failed, not proceeding to the next
steps of the pipeline. This callback also traps exits, so failures due to broken
links between processes do not automatically cascade.
"""
@callback handle_message(processor :: atom, message :: Message.t(), context :: term) ::
Message.t()
@doc """
Invoked to handle generated batches.
It expects:
* `batcher` is the key that defined the batcher. This value can be
set in the `handle_message/3` callback using `Broadway.Message.put_batcher/2`.
* `messages` is the list of `Broadway.Message` structs in the incoming batch.
* `batch_info` is a `Broadway.BatchInfo` struct containing extra information
about the incoming batch.
* `context` is the user defined data structure passed to `start_link/2`.
It must return an updated list of messages. All messages received must be returned,
otherwise an error will be logged. All messages after this step will be acknowledged
according to their status.
In case of errors in this callback, the error will be logged and the whole
batch will be failed. This callback also traps exits, so failures due to broken
links between processes do not automatically cascade.
For more information on batching, see the "Batching" section in the `Broadway`
documentation.
"""
@callback handle_batch(
batcher :: atom,
messages :: [Message.t()],
batch_info :: BatchInfo.t(),
context :: term
) :: [Message.t()]
@doc """
Invoked for failed messages (if defined).
It expects:
* `messages` is the list of messages that failed. If a message is failed in
`c:handle_message/3`, this will be a list with a single message in it. If
some messages are failed in `c:handle_batch/4`, this will be the list of
failed messages.
* `context` is the user-defined data structure passed to `start_link/2`.
This callback must return the same messages given to it, possibly updated.
For example, you could update the message data or use `Broadway.Message.configure_ack/2`
in a centralized place to configure how to ack the message based on the failure
reason.
This callback is optional. If present, it's called **before** the messages
are acknowledged according to the producer. This gives you a chance to do something
with the message before it's acknowledged, such as storing it in an external
persistence layer or similar.
This callback is also invoked if `c:handle_message/3` or `c:handle_batch/4`
crash or raise an error. If this callback crashes or raises an error,
the messages are failed internally by Broadway to avoid crashing the process.
"""
@doc since: "0.5.0"
@callback handle_failed(messages :: [Message.t()], context :: term) :: [Message.t()]
@optional_callbacks prepare_messages: 2, handle_batch: 4, handle_failed: 2
@doc false
defmacro __using__(opts) do
quote location: :keep, bind_quoted: [opts: opts, module: __CALLER__.module] do
@behaviour Broadway
@doc false
def child_spec(arg) do
default = %{
id: unquote(module),
start: {__MODULE__, :start_link, [arg]},
shutdown: :infinity
}
Supervisor.child_spec(default, unquote(Macro.escape(opts)))
end
defoverridable child_spec: 1
end
end
@doc """
Starts a `Broadway` process linked to the current process.
* `module` is the module implementing the `Broadway` behaviour.
## Options
In order to set up how the pipeline created by Broadway should work,
you need to specify the blueprint of the pipeline. You can
do this by passing a set of options to `start_link/2`.
Each component of the pipeline has its own set of options.
The broadway options are:
#{NimbleOptions.docs(Broadway.Options.definition())}
"""
@spec start_link(module(), keyword()) :: on_start()
def start_link(module, opts) do
case NimbleOptions.validate(opts, Broadway.Options.definition()) do
{:error, error} ->
raise ArgumentError, format_error(error)
{:ok, opts} ->
opts =
opts
|> carry_over_one(:producer, [:hibernate_after, :spawn_opt])
|> carry_over_many(:processors, [:partition_by, :hibernate_after, :spawn_opt])
|> carry_over_many(:batchers, [:partition_by, :hibernate_after, :spawn_opt])
Topology.start_link(module, opts)
end
end
defp format_error(%ValidationError{keys_path: [], message: message}) do
"invalid configuration given to Broadway.start_link/2, " <> message
end
defp format_error(%ValidationError{keys_path: keys_path, message: message}) do
"invalid configuration given to Broadway.start_link/2 for key #{inspect(keys_path)}, " <>
message
end
defp carry_over_one(opts, key, keys) do
update_in(opts[key], fn value -> Keyword.merge(Keyword.take(opts, keys), value) end)
end
defp carry_over_many(opts, key, keys) do
update_in(opts[key], fn list ->
defaults = Keyword.take(opts, keys)
for {k, v} <- list, do: {k, Keyword.merge(defaults, v)}
end)
end
@doc """
Returns the names of producers.
## Examples
iex> Broadway.producer_names(MyBroadway)
[MyBroadway.Producer_0, MyBroadway.Producer_1, ..., MyBroadway.Producer_7]
"""
@spec producer_names(broadway :: atom()) :: [atom()]
def producer_names(broadway) when is_atom(broadway) do
Topology.producer_names(broadway)
end
@doc """
Returns the topology details for a pipeline.
The stages that have the "concurrency" field indicates a list of
processes running with that name prefix. Each process has "name" as
prefix plus "_" and the index of `0..(concurrency - 1)`, as atom.
For example, a producer named `MyBroadway.Broadway.Producer` with
concurrency of 1 represents only a process named
`MyBroadway.Broadway.Producer_0`.
Note that `Broadway` does not accept multiple producers neither
multiple processors. But we choose to keep in a list for simplicity
and future proof.
## Examples
iex> Broadway.topology(MyBroadway)
[
producers: [%{name: MyBroadway.Broadway.Producer, concurrency: 1}],
processors: [%{name: MyBroadway.Broadway.Processor_default, concurrency: 10}],
batchers: [
%{
batcher_name: MyBroadway.Broadway.Batcher_default,
name: MyBroadway.Broadway.BatchProcessor_default,
concurrency: 5
},
%{
batcher_name: MyBroadway.Broadway.Batcher_s3,
name: MyBroadway.Broadway.BatchProcessor_s3,
concurrency: 3
}
]
]
"""
@spec topology(broadway :: atom()) :: [
{atom(),
[
%{
required(:name) => atom(),
optional(:concurrency) => pos_integer(),
optional(:batcher_name) => atom()
}
]}
]
def topology(broadway) when is_atom(broadway) do
Topology.topology(broadway)
end
@doc """
Sends a list of `Broadway.Message`s to the Broadway pipeline.
The producer is randomly chosen among all sets of producers/stages.
This is used to send out of band data to a Broadway pipeline.
"""
@spec push_messages(broadway :: atom(), messages :: [Message.t()]) :: :ok
def push_messages(broadway, messages) when is_atom(broadway) and is_list(messages) do
broadway
|> producer_names()
|> Enum.random()
|> Topology.ProducerStage.push_messages(messages)
end
@doc """
Sends a test message through the Broadway pipeline.
This is a convenience used for testing. The given data
is automatically wrapped in a `Broadway.Message` with
`Broadway.CallerAcknowledger` configured to send a message
back to the caller once the message has been fully processed.
The message is set to be flushed immediately, without waiting
for the Broadway pipeline `batch_size` to be filled or the
`batch_timeout` to be triggered.
It returns a reference that can be used to identify the ack
messages.
See ["Testing"](#module-testing) section in module documentation
for more information.
## Options
* `:metadata` - optionally a map of additional fields to add to the
message. This can be used, for example, when testing
`BroadwayRabbitMQ.Producer`.
* `:acknowledger` - optionally a function that generates `ack` fields of
`Broadway.Message.t()` that is sent. This function should have following
spec `(data :: term, {pid, reference()} -> {module, ack_ref :: term,
data :: term})`.
## Examples
For example, in your tests, you may do:
ref = Broadway.test_message(broadway, 1)
assert_receive {:ack, ^ref, [successful], []}
or if you want to override which acknowledger shall be called, you may do:
acknowledger = fn data, ack_ref -> {MyAck, ack_ref, :ok} end
Broadway.test_message(broadway, 1, acknowledger: acknowledger)
Note that messages sent using this function will ignore demand and :transform
option specified in :producer option in `Broadway.start_link/2`.
"""
@spec test_message(broadway :: atom(), term, opts :: Keyword.t()) :: reference
def test_message(broadway, data, opts \\ []) when is_list(opts) do
test_messages(broadway, [data], :flush, opts)
end
@doc """
Sends a list of data as a batch of messages to the Broadway pipeline.
This is a convenience used for testing. Each message is automatically
wrapped in a `Broadway.Message` with `Broadway.CallerAcknowledger`
configured to send a message back to the caller once all batches
have been fully processed.
If there are more messages in the batch than the pipeline `batch_size`
or if the messages in the batch take more time to process than
`batch_timeout` then the caller will receive multiple messages.
It returns a reference that can be used to identify the ack
messages.
See ["Testing"](#module-testing) section in module documentation
for more information.
## Options
* `:batch_mode` - when set to `:flush`, the batch the message is
in is immediately delivered. When set to `:bulk`, batch is
delivered when its size or timeout is reached. Defaults to `:bulk`.
* `:metadata` - optionally a map of additional fields to add to the
message. This can be used, for example, when testing
`BroadwayRabbitMQ.Producer`.
* `:acknowledger` - optionally a function that generates `ack` fields of
`Broadway.Message.t()` that is sent. This function should have following
spec `(data :: term, {pid, reference()} -> {module, ack_ref :: term,
data :: term})`. See `test_message/3` for an example.
## Examples
For example, in your tests, you may do:
ref = Broadway.test_batch(broadway, [1, 2, 3])
assert_receive {:ack, ^ref, successful, failed}, 1000
assert length(successful) == 3
assert length(failed) == 0
Note that messages sent using this function will ignore demand and :transform
option specified in :producer option in `Broadway.start_link/2`.
"""
@spec test_batch(broadway :: atom(), data :: [term], opts :: Keyword.t()) :: reference
def test_batch(broadway, batch_data, opts \\ []) when is_list(batch_data) and is_list(opts) do
test_messages(broadway, batch_data, Keyword.get(opts, :batch_mode, :bulk), opts)
end
defp test_messages(broadway, data, batch_mode, opts) do
metadata = Map.new(Keyword.get(opts, :metadata, []))
acknowledger =
Keyword.get(opts, :acknowledger, fn _, ack_ref ->
{Broadway.CallerAcknowledger, ack_ref, :ok}
end)
ref = make_ref()
messages =
Enum.map(data, fn data ->
ack = acknowledger.(data, {self(), ref})
%Message{data: data, acknowledger: ack, batch_mode: batch_mode, metadata: metadata}
end)
:ok = push_messages(broadway, messages)
ref
end
@doc """
Gets the current values used for the producer rate limiting of the given pipeline.
Returns `{:ok, info}` if rate limiting is enabled for the given pipeline or
`{:error, reason}` if the given pipeline doesn't have rate limiting enabled.
The returned info is a map with the following keys:
* `:interval`
* `:allowed_messages`
See the `:rate_limiting` options in the module documentation for more information.
## Examples
Broadway.get_rate_limiting(broadway)
#=> {:ok, %{allowed_messages: 2000, interval: 1000}}
"""
@doc since: "0.6.0"
@spec get_rate_limiting(server :: atom()) ::
{:ok, rate_limiting_info} | {:error, :rate_limiting_not_enabled}
when rate_limiting_info: %{
required(:interval) => non_neg_integer(),
required(:allowed_messages) => non_neg_integer()
}
def get_rate_limiting(broadway) when is_atom(broadway) do
with {:ok, rate_limiter_name} <- Topology.get_rate_limiter(broadway) do
{:ok, Topology.RateLimiter.get_rate_limiting(rate_limiter_name)}
end
end
@doc """
Updates the producer rate limiting of the given pipeline at runtime.
Supports the following options (see the `:rate_limiting` options in the module
documentation for more information):
* `:allowed_messages`
* `:interval`
Returns an `{:error, reason}` tuple if the given `broadway` pipeline doesn't
have rate limiting enabled.
## Examples
Broadway.update_rate_limiting(broadway, allowed_messages: 100)
"""
@doc since: "0.6.0"
@spec update_rate_limiting(server :: atom(), opts :: Keyword.t()) ::
:ok | {:error, :rate_limiting_not_enabled}
def update_rate_limiting(broadway, opts) when is_atom(broadway) and is_list(opts) do
definition = [
allowed_messages: [type: :pos_integer],
interval: [type: :pos_integer]
]
with {:validate_opts, {:ok, opts}} <-
{:validate_opts, NimbleOptions.validate(opts, definition)},
{:get_name, {:ok, rate_limiter_name}} <- {:get_name, Topology.get_rate_limiter(broadway)} do
Topology.RateLimiter.update_rate_limiting(rate_limiter_name, opts)
else
{:validate_opts, {:error, %ValidationError{message: message}}} ->
raise ArgumentError, "invalid options, " <> message
{:get_name, {:error, reason}} ->
{:error, reason}
end
end
end
|
lib/broadway.ex
| 0.894427 | 0.869659 |
broadway.ex
|
starcoder
|
defmodule PaymentMessenger.Types.Validator do
@moduledoc """
The custom Ecto types validator for ISO-8583
"""
# Guard to check if given value is a valid tag
defguardp is_tag(tag) when byte_size(tag) == 3 or byte_size(tag) == 7
@typep value :: String.t() | integer()
@typep tlv :: {String.t(), pos_integer() | Range.t(), value()}
@typep success :: {:ok, tlv()}
@typep error :: {:error, keyword(String.t())}
@typep result :: success() | error()
@doc """
Cast TLV tuple into given data
"""
@spec cast(tlv(), Regex.t()) :: result()
def cast({tag, tag_length, value}, regex) when is_tag(tag) do
validate_value(value, regex, tag_length)
end
def cast(tuple, _) when is_tuple(tuple) do
{:error, [message: "invalid tuple format"]}
end
def cast(_, _) do
{:error, [message: "isn't tuple"]}
end
@doc """
Load TLV tuple into valid TLV tuple
"""
@spec load(tlv()) :: result()
def load({tag, tag_length, value}) when is_tag(tag) do
{:ok, {tag, length_to_string(value, tag_length), value}}
end
def load(tuple) when is_tuple(tuple) do
{:error, [message: "invalid tuple format"]}
end
def load(_) do
{:error, [message: "isn't tuple"]}
end
@doc """
Dump TLV tuple into valid TLV tuple
"""
@spec dump(tlv()) :: result()
def dump({tag, tag_length, value}) when is_tag(tag) do
{:ok, {tag, length_to_string(value, tag_length), value}}
end
def dump(tuple) when is_tuple(tuple) do
{:error, [message: "invalid tuple format"]}
end
def dump(_) do
{:error, [message: "isn't tuple"]}
end
defp validate_value(value, regex, tag_length) do
string_value = to_string(value)
with {:match, true} <- {:match, String.match?(string_value, regex)},
{:length, true} <- {:length, valid_length?(string_value, tag_length)} do
{:ok, value}
else
{:match, false} ->
{:error, [message: "invalid format"]}
{:length, false} ->
{:error, [message: "invalid size"]}
end
end
defp valid_length?(value, tag_length = _start_size.._end_size) do
String.length(value) in tag_length
end
defp valid_length?(value, tag_length) when is_integer(tag_length) do
String.length(value) == tag_length
end
defp valid_length?(_, _), do: false
defp length_to_string(_value, tag_length) when is_integer(tag_length) do
tag_length
|> to_string()
|> String.pad_leading(3, "0")
end
defp length_to_string(value, _start_size.._end_size) do
value
|> String.length()
|> to_string()
|> String.pad_leading(3, "0")
end
end
|
lib/payment_messenger/types/validator.ex
| 0.803135 | 0.421224 |
validator.ex
|
starcoder
|
defmodule OMG.Watcher.State.Core do
@moduledoc """
The state meant here is the state of the ledger (UTXO set), that determines spendability of coins and forms blocks.
All spend transactions, deposits and exits should sync on this for validity of moving funds.
### Notes on loading of the UTXO set
We experienced long startup times on large UTXO set, which in some case caused timeouts and lethal `OMG.Watcher.State`
restart loop. To mitigate this issue we introduced loading UTXO set on demand (see GH#1103) instead of full load
on process startup.
During OMG.Watcher.State startup no UTXOs are fetched from DB, which is no longer blocking significantly.
Then during each of 6 utxo-related operations (see below) UTXO set is extended with UTXOs from DB to ensure operation
behavior hasn't been changed.
Transaction processing is populating the in-memory UTXO set and once block is formed newly created UTXO are inserted
to DB, but are also kept in process State. Service restart looses all UTXO created by transactions processed as well
as mempool transactions therefore DB content stays block-by-block consistent.
Operations that require full ledger information are:
- utxo_exists?
- exec
- form_block (and `close_block`)
- deposit
- exit_utxos
These operations assume that passed `OMG.Watcher.State.Core` struct instance contains sufficient UTXO information to proceed.
Therefore the UTXOs that in-memory state is unaware of are fetched from the `OMG.DB` and then merged into state.
As not every operation updates `OMG.DB` immediately additional `recently_spent` collection was added to in-memory
state to defend against double spends in transactions within the same block.
After block is formed `OMG.DB` contains full information up to the current block so we could waste in-memory
info about utxos and spends. If the process gets restarted before form_block all mempool transactions along with
created and spent utxos are lost and the ledger state basically resets to the previous block.
"""
defstruct [
:height,
:fee_claimer_address,
:child_block_interval,
utxos: %{},
pending_txs: [],
tx_index: 0,
utxo_db_updates: [],
recently_spent: MapSet.new(),
fees_paid: %{},
fee_claiming_started: false
]
alias OMG.Output
alias OMG.Watcher.Block
alias OMG.Watcher.Crypto
alias OMG.Watcher.Fees
alias OMG.Watcher.State.Core
alias OMG.Watcher.State.Transaction
alias OMG.Watcher.State.Transaction.Validator
alias OMG.Watcher.State.UtxoSet
alias OMG.Watcher.Utxo
require Logger
require Utxo
@type fee_summary_t() :: %{Transaction.Payment.currency() => pos_integer()}
@type t() :: %__MODULE__{
height: non_neg_integer(),
utxos: utxos,
pending_txs: list(Transaction.Recovered.t()),
tx_index: non_neg_integer(),
# NOTE: that this list is being build reverse, in some cases it may matter. It is reversed just before
# it leaves this module in `form_block/3`
utxo_db_updates: list(db_update()),
# NOTE: because UTXO set is not loaded from DB entirely, we need to remember the UTXOs spent in already
# processed transaction before they get removed from DB on form_block.
recently_spent: MapSet.t(OMG.Watcher.Utxo.Position.t()),
# Summarizes fees paid by pending transactions that will be formed into current block. Fees will be claimed
# by appending `Transaction.Fee` txs after pending txs in current block.
fees_paid: fee_summary_t(),
# fees can be claimed at the end of the block, no other payments can be processed until next block
fee_claiming_started: boolean(),
fee_claimer_address: Crypto.address_t(),
child_block_interval: non_neg_integer()
}
@type deposit() :: %{
root_chain_txhash: Crypto.hash_t(),
log_index: non_neg_integer(),
blknum: non_neg_integer(),
currency: Crypto.address_t(),
owner: Crypto.address_t(),
amount: pos_integer(),
eth_height: pos_integer()
}
@type exit_t() :: %{utxo_pos: pos_integer()}
@type exit_finalization_t() :: %{utxo_pos: pos_integer()}
@type exiting_utxo_triggers_t() ::
[Utxo.Position.t()]
| [non_neg_integer()]
| [exit_t()]
| [exit_finalization_t()]
| [piggyback()]
| [in_flight_exit()]
@type in_flight_exit() :: %{in_flight_tx: binary()}
@type piggyback() :: %{tx_hash: Transaction.tx_hash(), output_index: non_neg_integer}
@type validities_t() :: {list(Utxo.Position.t()), list(Utxo.Position.t() | piggyback())}
@type utxos() :: %{Utxo.Position.t() => Utxo.t()}
@type db_update ::
{:put, :utxo, {Utxo.Position.db_t(), map()}}
| {:delete, :utxo, Utxo.Position.db_t()}
| {:put, :child_top_block_number, pos_integer()}
| {:put, :block, Block.db_t()}
@type exitable_utxos :: %{
owner: Crypto.address_t(),
currency: Crypto.address_t(),
amount: non_neg_integer(),
blknum: pos_integer(),
txindex: non_neg_integer(),
oindex: non_neg_integer()
}
@doc """
Initializes the state from the values stored in `OMG.DB`
"""
@spec extract_initial_state(
height_query_result :: non_neg_integer() | :not_found,
child_block_interval :: pos_integer(),
fee_claimer_address :: Crypto.address_t()
) :: {:ok, t()} | {:error, :top_block_number_not_found}
def extract_initial_state(height_query_result, child_block_interval, fee_claimer_address)
when is_integer(height_query_result) and is_integer(child_block_interval) do
state = %__MODULE__{
height: height_query_result + child_block_interval,
fee_claimer_address: fee_claimer_address,
child_block_interval: child_block_interval
}
{:ok, state}
end
def extract_initial_state(:not_found, _child_block_interval, _fee_claimer_address) do
{:error, :top_block_number_not_found}
end
@doc """
Tell whether utxo position was created or spent by current state.
"""
@spec utxo_processed?(OMG.Watcher.Utxo.Position.t(), t()) :: boolean()
def utxo_processed?(utxo_pos, %Core{utxos: utxos, recently_spent: recently_spent}) do
Map.has_key?(utxos, utxo_pos) or MapSet.member?(recently_spent, utxo_pos)
end
@doc """
Extends in-memory utxo set with needed utxos loaded from DB
See also: State.init_utxos_from_db/2
"""
@spec with_utxos(t(), utxos()) :: t()
def with_utxos(%Core{utxos: utxos} = state, db_utxos) do
%{state | utxos: UtxoSet.apply_effects(utxos, [], db_utxos)}
end
@doc """
Includes the transaction into the state when valid, rejects otherwise.
NOTE that tx is assumed to have distinct inputs, that should be checked in prior state-less validation
See docs/transaction_validation.md for more information about stateful and stateless validation.
"""
@spec exec(state :: t(), tx :: Transaction.Recovered.t(), fees :: Fees.optional_fee_t()) ::
{:ok, {Transaction.tx_hash(), pos_integer, non_neg_integer}, t()}
| {{:error, Validator.can_process_tx_error()}, t()}
def exec(%Core{} = state, %Transaction.Recovered{} = tx, fees) do
tx_hash = Transaction.raw_txhash(tx)
case Validator.can_process_tx(state, tx, fees) do
{:ok, fees_paid} ->
{:ok, {tx_hash, state.height, state.tx_index},
state
|> apply_tx(tx)
|> add_pending_tx(tx)
|> handle_fees(tx, fees_paid)}
{{:error, _reason}, _state} = error ->
error
end
end
@doc """
Filter user utxos from db response.
It may take a while for a large response from db
"""
@spec standard_exitable_utxos(UtxoSet.query_result_t(), Crypto.address_t()) ::
list(exitable_utxos)
def standard_exitable_utxos(utxos_query_result, address) do
utxos_query_result
|> UtxoSet.init()
|> UtxoSet.filter_owned_by(address)
|> UtxoSet.zip_with_positions()
|> Enum.map(fn {{_, utxo}, position} -> utxo_to_exitable_utxo_map(utxo, position) end)
end
@doc """
- Generates block and calculates it's root hash for submission
- generates requests to the persistence layer for a block
- processes pending txs gathered, updates height etc
- clears `recently_spent` collection
"""
@spec form_block(state :: t()) :: {:ok, {Block.t(), [db_update]}, new_state :: t()}
def form_block(state) do
txs = Enum.reverse(state.pending_txs)
block = Block.hashed_txs_at(txs, state.height)
db_updates_block = {:put, :block, Block.to_db_value(block)}
db_updates_top_block_number = {:put, :child_top_block_number, state.height}
db_updates = [db_updates_top_block_number, db_updates_block | state.utxo_db_updates] |> Enum.reverse()
new_state = %Core{
state
| tx_index: 0,
height: state.height + state.child_block_interval,
pending_txs: [],
utxo_db_updates: [],
recently_spent: MapSet.new(),
fees_paid: %{},
fee_claiming_started: false
}
_ = :telemetry.execute([:block_transactions, __MODULE__], %{txs: txs}, %{})
{:ok, {block, db_updates}, new_state}
end
@doc """
Processes a deposit event, introducing a UTXO into the ledger's state. From then on it is spendable on the child chain
**NOTE** this expects that each deposit event is fed to here exactly once, so this must be ensured elsewhere.
There's no double-checking of this constraint done here.
"""
@spec deposit(deposits :: [deposit()], state :: t()) :: {:ok, [db_update], new_state :: t()}
def deposit(deposits, %Core{utxos: utxos} = state) do
new_utxos_map = Enum.into(deposits, %{}, &deposit_to_utxo/1)
new_utxos = UtxoSet.apply_effects(utxos, [], new_utxos_map)
db_updates = UtxoSet.db_updates([], new_utxos_map)
_ = if deposits != [], do: Logger.info("Recognized deposits #{inspect(deposits)}")
new_state = %Core{state | utxos: new_utxos}
{:ok, db_updates, new_state}
end
@doc """
Retrieves exitable utxo positions from variety of exit events. Accepts either
- a list of utxo positions (decoded)
- a list of utxo positions (encoded)
- a list of full exit infos containing the utxo positions
- a list of full exit events (from ethereum listeners) containing the utxo positions
- a list of IFE started events
- a list of IFE input/output piggybacked events
NOTE: It is done like this to accommodate different clients of this function as they can either be
bare `EthereumEventListener` or `ExitProcessor`. Hence different forms it can get the exiting utxos delivered
"""
@spec extract_exiting_utxo_positions(exiting_utxo_triggers_t(), t()) :: list(Utxo.Position.t())
def extract_exiting_utxo_positions(exit_infos, state)
def extract_exiting_utxo_positions([], %Core{}), do: []
# list of full exit infos (from events) containing the utxo positions
def extract_exiting_utxo_positions([%{utxo_pos: _} | _] = utxo_position_events, state),
do: utxo_position_events |> Enum.map(& &1.utxo_pos) |> extract_exiting_utxo_positions(state)
# list of full exit events (from ethereum listeners)
def extract_exiting_utxo_positions([%{call_data: %{utxo_pos: _}} | _] = utxo_position_events, state),
do: utxo_position_events |> Enum.map(& &1.call_data) |> extract_exiting_utxo_positions(state)
# list of utxo positions (encoded)
def extract_exiting_utxo_positions([encoded_utxo_pos | _] = encoded_utxo_positions, %Core{})
when is_integer(encoded_utxo_pos),
do: Enum.map(encoded_utxo_positions, &Utxo.Position.decode!/1)
# list of IFE input/output piggybacked events
def extract_exiting_utxo_positions([%{call_data: %{in_flight_tx: _}} | _] = start_ife_events, %Core{}) do
_ = Logger.info("Recognized exits from IFE starts #{inspect(start_ife_events)}")
Enum.flat_map(start_ife_events, fn %{call_data: %{in_flight_tx: tx_bytes}} ->
{:ok, tx} = Transaction.decode(tx_bytes)
Transaction.get_inputs(tx)
end)
end
# list of IFE input piggybacked events (they're ignored)
def extract_exiting_utxo_positions(
[%{tx_hash: _, omg_data: %{piggyback_type: :input}} | _] = piggyback_events,
%Core{}
) do
_ = Logger.info("Ignoring input piggybacks #{inspect(piggyback_events)}")
[]
end
# list of IFE output piggybacked events. This is used by the child chain only. `OMG.Watcher.ExitProcessor` figures out
# the utxo positions to exit on its own
def extract_exiting_utxo_positions(
[%{tx_hash: _, omg_data: %{piggyback_type: :output}} | _] = piggyback_events,
%Core{} = state
) do
_ = Logger.info("Recognized exits from piggybacks #{inspect(piggyback_events)}")
piggyback_events
|> Enum.map(&find_utxo_matching_piggyback(&1, state))
|> Enum.filter(fn utxo -> utxo != nil end)
|> Enum.map(fn {position, _} -> position end)
end
# list of utxo positions (decoded)
def extract_exiting_utxo_positions([Utxo.position(_, _, _) | _] = utxo_positions, %Core{}), do: utxo_positions
@doc """
Spends exited utxos.
Note: state passed here is already extended with DB.
"""
@spec exit_utxos(exiting_utxos :: list(Utxo.Position.t()), state :: t()) ::
{:ok, {[db_update], validities_t()}, new_state :: t()}
def exit_utxos([], %Core{} = state), do: {:ok, {[], {[], []}}, state}
def exit_utxos(
[Utxo.position(_, _, _) | _] = exiting_utxos,
%Core{utxos: utxos, recently_spent: recently_spent} = state
) do
_ = Logger.info("Recognized exits #{inspect(exiting_utxos)}")
{valid, _invalid} = validities = Enum.split_with(exiting_utxos, &utxo_exists?(&1, state))
new_utxos = UtxoSet.apply_effects(utxos, valid, %{})
new_spends = MapSet.union(recently_spent, MapSet.new(valid))
db_updates = UtxoSet.db_updates(valid, %{})
new_state = %{state | utxos: new_utxos, recently_spent: new_spends}
{:ok, {db_updates, validities}, new_state}
end
@doc """
Checks whether utxo exists in UTXO set.
Note: state passed here is already extended with DB.
"""
@spec utxo_exists?(Utxo.Position.t(), t()) :: boolean()
def utxo_exists?(Utxo.position(_blknum, _txindex, _oindex) = utxo_pos, %Core{utxos: utxos}) do
UtxoSet.exists?(utxos, utxo_pos)
end
@doc """
Gets the current block's height and whether at the beginning of the block
"""
@spec get_status(t()) :: {current_block_height :: non_neg_integer(), is_block_beginning :: boolean()}
def get_status(%__MODULE__{height: height, tx_index: tx_index, pending_txs: pending}) do
is_beginning = tx_index == 0 && Enum.empty?(pending)
{height, is_beginning}
end
defp add_pending_tx(%Core{pending_txs: pending_txs, tx_index: tx_index} = state, %Transaction.Recovered{} = new_tx) do
_ = :telemetry.execute([:pending_transactions, __MODULE__], %{new_tx: new_tx}, %{})
%Core{
state
| tx_index: tx_index + 1,
pending_txs: [new_tx | pending_txs]
}
end
defp apply_tx(
%Core{
height: blknum,
tx_index: tx_index,
utxos: utxos,
recently_spent: recently_spent,
utxo_db_updates: db_updates
} = state,
%Transaction.Recovered{signed_tx: %{raw_tx: tx}}
) do
{spent_input_pointers, new_utxos_map} = get_effects(tx, blknum, tx_index)
new_utxos = UtxoSet.apply_effects(utxos, spent_input_pointers, new_utxos_map)
new_db_updates = UtxoSet.db_updates(spent_input_pointers, new_utxos_map)
# NOTE: child chain mode don't need 'spend' data for now. Consider to add only in Watcher's modes - OMG-382
spent_blknum_updates = Enum.map(spent_input_pointers, &{:put, :spend, {Utxo.Position.to_input_db_key(&1), blknum}})
%Core{
state
| utxos: new_utxos,
recently_spent: MapSet.union(recently_spent, MapSet.new(spent_input_pointers)),
utxo_db_updates: new_db_updates ++ spent_blknum_updates ++ db_updates
}
end
# Post-processing step of transaction execution. It either claim for Transaction.Fee and collect for the rest.
@spec handle_fees(state :: t(), Transaction.Recovered.t(), map()) :: t()
defp handle_fees(state, %Transaction.Recovered{signed_tx: %{raw_tx: %Transaction.Fee{}}} = tx, _fees_paid) do
[output] = Transaction.get_outputs(tx)
state
|> flush_collected_fees_for_token(output)
|> disallow_payments()
end
defp handle_fees(state, _tx, fees_paid) do
collect_fees(state, fees_paid)
end
# attempts to build a standard response data about a single UTXO, based on an abstract `output` structure
# so that the data can be useful to discover exitable UTXOs
defp utxo_to_exitable_utxo_map(%Utxo{output: %{output_type: otype} = output}, Utxo.position(blknum, txindex, oindex)) do
output
|> Map.from_struct()
|> Map.take([:owner, :currency, :amount])
|> Map.put(:otype, otype)
|> Map.put(:blknum, blknum)
|> Map.put(:txindex, txindex)
|> Map.put(:oindex, oindex)
end
defp collect_fees(%Core{fees_paid: fees_paid} = state, token_surpluses) do
fees_paid_with_new =
token_surpluses
|> Enum.reject(fn {_token, amount} -> amount == 0 end)
|> Map.new()
|> Map.merge(fees_paid, fn _token, collected, tx_surplus -> collected + tx_surplus end)
%Core{state | fees_paid: fees_paid_with_new}
end
defp disallow_payments(state), do: %Core{state | fee_claiming_started: true}
defp flush_collected_fees_for_token(state, %Output{currency: token}) do
%Core{state | fees_paid: Map.delete(state.fees_paid, token)}
end
# Effects of a payment transaction - spends all inputs and creates all outputs
# Relies on the polymorphic `get_inputs` and `get_outputs` of `Transaction`
defp get_effects(tx, blknum, tx_index) do
{Transaction.get_inputs(tx), utxos_from(tx, blknum, tx_index)}
end
defp utxos_from(tx, blknum, tx_index) do
hash = Transaction.raw_txhash(tx)
tx
|> Transaction.get_outputs()
|> Enum.with_index()
|> Enum.map(fn {output, oindex} ->
{Utxo.position(blknum, tx_index, oindex), output}
end)
|> Enum.into(%{}, fn {input_pointer, output} ->
{input_pointer, %Utxo{output: output, creating_txhash: hash}}
end)
end
defp deposit_to_utxo(deposit) do
%{blknum: blknum, currency: cur, owner: owner, amount: amount} = deposit
Transaction.Payment.new([], [{owner, cur, amount}])
|> utxos_from(blknum, 0)
|> Enum.map(& &1)
|> hd()
end
# We're looking for a UTXO that a piggyback of an in-flight IFE is referencing.
# This is useful when trying to do something with the outputs that are piggybacked (like exit them), without their
# position.
# Only relevant for output piggybacks
defp find_utxo_matching_piggyback(piggyback_events, state) do
%{omg_data: %{piggyback_type: :output}, tx_hash: tx_hash, output_index: oindex} = piggyback_events
UtxoSet.find_matching_utxo(state.utxos, tx_hash, oindex)
end
end
|
apps/omg_watcher/lib/omg_watcher/state/core.ex
| 0.837387 | 0.582254 |
core.ex
|
starcoder
|
defmodule Logger do
@moduledoc ~S"""
A logger for Elixir applications.
It includes many features:
* Provides debug, info, warn, and error levels.
* Supports multiple backends which are automatically
supervised when plugged into `Logger`.
* Formats and truncates messages on the client
to avoid clogging `Logger` backends.
* Alternates between sync and async modes to remain
performant when required but also apply backpressure
when under stress.
* Wraps OTP's [`:error_logger`](http://erlang.org/doc/man/error_logger.html)
to prevent it from overflowing.
Logging is useful for tracking when an event of interest happens in your
system. For example, it may be helpful to log whenever a user is deleted.
def delete_user(user) do
Logger.info fn ->
"Deleting user from the system: #{inspect(user)}"
end
# ...
end
The `Logger.info/2` macro emits the provided message at the `:info`
level. There are additional macros for other levels. Notice the argument
passed to `Logger.info/2` in the above example is a zero argument function.
The `Logger` macros also accept messages as strings, but keep in mind that
strings are **always** evaluated regardless of log-level. As such, it is
recommended to use a function whenever the message is expensive to compute.
Another option that does not depend on the message type is to purge the log
calls at compile-time using the `:compile_time_purge_level` option (see
below).
## Levels
The supported levels are:
* `:debug` - for debug-related messages
* `:info` - for information of any kind
* `:warn` - for warnings
* `:error` - for errors
## Configuration
`Logger` supports a wide range of configurations.
This configuration is split in three categories:
* Application configuration - must be set before the `:logger`
application is started
* Runtime configuration - can be set before the `:logger`
application is started, but may be changed during runtime
* Error logger configuration - configuration for the
wrapper around OTP's [`:error_logger`](http://erlang.org/doc/man/error_logger.html)
### Application configuration
The following configuration must be set via config files (such as
`config/config.exs`) before the `:logger` application is started.
* `:backends` - the backends to be used. Defaults to `[:console]`.
See the "Backends" section for more information.
* `:compile_time_purge_level` - purges *at compilation time* all calls that
have log level lower than the value of this option. This means that
`Logger` calls with level lower than this option will be completely
removed at compile time, accruing no overhead at runtime. Defaults to
`:debug` and only applies to the `Logger.debug/2`, `Logger.info/2`,
`Logger.warn/2`, and `Logger.error/2` macros (for example, it doesn't apply to
`Logger.log/3`). Note that arguments passed to `Logger` calls that are
removed from the AST at compilation time are never evaluated, thus any
function call that occurs in these arguments is never executed. As a
consequence, avoid code that looks like `Logger.debug("Cleanup:
#{perform_cleanup()}")` as in the example `perform_cleanup/0` won't be
executed if the `:compile_time_purge_level` is `:info` or higher.
* `:compile_time_application` - sets the `:application` metadata value
to the configured value at compilation time. This configuration is
usually only useful for build tools to automatically add the
application to the metadata for `Logger.debug/2`, `Logger.info/2`, etc.
style of calls.
For example, to configure the `:backends` and `compile_time_purge_level`
options in a `config/config.exs` file:
config :logger,
backends: [:console],
compile_time_purge_level: :info
### Runtime Configuration
All configuration below can be set via config files (such as
`config/config.exs`) but also changed dynamically during runtime via
`Logger.configure/1`.
* `:level` - the logging level. Attempting to log any message
with severity less than the configured level will simply
cause the message to be ignored. Keep in mind that each backend
may have its specific level, too. Note that, unlike what happens with the
`:compile_time_purge_level` option, the argument passed to `Logger` calls
is evaluated even if the level of the call is lower than
`:level`. For this reason, messages that are expensive to
compute should be wrapped in 0-arity anonymous functions that are
evaluated only when the `:level` option demands it.
* `:utc_log` - when `true`, uses UTC in logs. By default it uses
local time (i.e., it defaults to `false`).
* `:truncate` - the maximum message size to be logged (in bytes). Defaults
to 8192 bytes. Note this configuration is approximate. Truncated messages
will have `" (truncated)"` at the end. The atom `:infinity` can be passed
to disable this behavior.
* `:sync_threshold` - if the `Logger` manager has more than
`:sync_threshold` messages in its queue, `Logger` will change
to *sync mode*, to apply backpressure to the clients.
`Logger` will return to *async mode* once the number of messages
in the queue is reduced to `sync_threshold * 0.75` messages.
Defaults to 20 messages. `:sync_threshold` can be set to `0` to force *sync mode*.
* `:discard_threshold` - if the `Logger` manager has more than
`:discard_threshold` messages in its queue, `Logger` will change
to *discard mode* and messages will be discarded directly in the
clients. `Logger` will return to *sync mode* once the number of
messages in the queue is reduced to `discard_threshold * 0.75`
messages. Defaults to 500 messages.
* `:translator_inspect_opts` - when translating OTP reports and
errors, the last message and state must be inspected in the
error reports. This configuration allow developers to change
how much and how the data should be inspected.
For example, to configure the `:level` and `:truncate` options in a
`config/config.exs` file:
config :logger,
level: :warn,
truncate: 4096
### Error logger configuration
The following configuration applies to `Logger`'s wrapper around
OTP's [`:error_logger`](http://erlang.org/doc/man/error_logger.html).
All the configurations below must be set before the `:logger` application starts.
* `:handle_otp_reports` - redirects OTP reports to `Logger` so
they are formatted in Elixir terms. This uninstalls OTP's
logger that prints terms to terminal. Defaults to `true`.
* `:handle_sasl_reports` - redirects supervisor, crash and
progress reports to `Logger` so they are formatted in Elixir
terms. Your application must guarantee `:sasl` is started before
`:logger`. This means you may see some initial reports written
in Erlang syntax until the Logger application kicks in and
uninstalls SASL's logger in favor of its own. Defaults to `false`.
* `:discard_threshold_for_error_logger` - if `:error_logger` has more than
`discard_threshold` messages in its inbox, messages will be dropped
until the message queue goes down to `discard_threshold * 0.75`
entries. The threshold will be checked once again after 10% of threshold
messages are processed, to avoid messages from being constantly dropped.
For exmaple, if the threshold is 500 (the default) and the inbox has
600 messages, 250 messages will dropped, bringing the inbox down to
350 (0.75 * threshold) entries and 50 (0.1 * threshold) messages will
be processed before the threshold is checked once again.
For example, to configure `Logger` to redirect all
[`:error_logger`](http://erlang.org/doc/man/error_logger.html) messages
using a `config/config.exs` file:
config :logger,
handle_otp_reports: true,
handle_sasl_reports: true
Furthermore, `Logger` allows messages sent by OTP's `:error_logger`
to be translated into an Elixir format via translators. Translators
can be dynamically added at any time with the `add_translator/1`
and `remove_translator/1` APIs. Check `Logger.Translator` for more
information.
## Backends
`Logger` supports different backends where log messages are written to.
The available backends by default are:
* `:console` - logs messages to the console (enabled by default)
Developers may also implement their own backends, an option that
is explored in more detail below.
The initial backends are loaded via the `:backends` configuration,
which must be set before the `:logger` application is started.
### Console backend
The console backend logs messages by printing them to the console. It supports
the following options:
* `:level` - the level to be logged by this backend.
Note that messages are filtered by the general
`:level` configuration for the `:logger` application first.
* `:format` - the format message used to print logs.
Defaults to: `"\n$time $metadata[$level] $levelpad$message\n"`.
It may also be a `{module, function}` tuple that is invoked
with the log level, the message, the current timestamp and
the metadata.
* `:metadata` - the metadata to be printed by `$metadata`.
Defaults to an empty list (no metadata).
Setting `:metadata` to `:all` prints all metadata.
* `:colors` - a keyword list of coloring options.
* `:device` - the device to log error messages to. Defaults to
`:user` but can be changed to something else such as `:standard_error`.
* `:max_buffer` - maximum events to buffer while waiting
for a confirmation from the IO device (default: 32).
Once the buffer is full, the backend will block until
a confirmation is received.
In addition to the keys provided by the user via `Logger.metadata/1`,
the following extra keys are available to the `:metadata` list:
* `:application` - the current application
* `:module` - the current module
* `:function` - the current function
* `:file` - the current file
* `:line` - the current line
* `:pid` - the current process ID
The supported keys in the `:colors` keyword list are:
* `:enabled` - boolean value that allows for switching the
coloring on and off. Defaults to: `IO.ANSI.enabled?/0`
* `:debug` - color for debug messages. Defaults to: `:cyan`
* `:info` - color for info messages. Defaults to: `:normal`
* `:warn` - color for warn messages. Defaults to: `:yellow`
* `:error` - color for error messages. Defaults to: `:red`
See the `IO.ANSI` module for a list of colors and attributes.
Here is an example of how to configure the `:console` backend in a
`config/config.exs` file:
config :logger, :console,
format: "\n$time $metadata[$level] $levelpad$message\n",
metadata: [:user_id]
### Custom formatting
The console backend allows you to customize the format of your log messages
with the `:format` option.
You may set `:format` to either a string or a `{module, function}` tuple if
you wish to provide your own format function. The `{module, function}` will be
invoked with the log level, the message, the current timestamp and the
metadata.
Here is an example of how to configure the `:console` backend in a
`config/config.exs` file:
config :logger, :console,
format: {MyConsoleLogger, :format}
And here is an example of how you can define `MyConsoleLogger.format/4` from the
above configuration:
defmodule MyConsoleLogger do
def format(level, message, timestamp, metadata) do
# Custom formatting logic...
end
end
It is extremely important that **the formatting function does not fail**, as
it will bring that particular logger instance down, causing your system to
temporarily lose messages. If necessary, wrap the function in a "rescue" and
log a default message instead:
defmodule MyConsoleLogger do
def format(level, message, timestamp, metadata) do
# Custom formatting logic...
rescue
_ -> "could not format: #{inspect {level, message, metadata}}"
end
end
You can read more about formatting in `Logger.Formatter`.
### Custom backends
Any developer can create their own `Logger` backend.
Since `Logger` is an event manager powered by `:gen_event`,
writing a new backend is a matter of creating an event
handler, as described in the [`:gen_event`](http://erlang.org/doc/man/gen_event.html)
documentation.
From now on, we will be using the term "event handler" to refer
to your custom backend, as we head into implementation details.
Once the `:logger` application starts, it installs all event handlers listed under
the `:backends` configuration into the `Logger` event manager. The event
manager and all added event handlers are automatically supervised by `Logger`.
Once initialized, the handler should be designed to handle events
in the following format:
{level, group_leader, {Logger, message, timestamp, metadata}} | :flush
where:
* `level` is one of `:debug`, `:info`, `:warn`, or `:error`, as previously
described
* `group_leader` is the group leader of the process which logged the message
* `{Logger, message, timestamp, metadata}` is a tuple containing information
about the logged message:
* the first element is always the atom `Logger`
* `message` is the actual message (as chardata)
* `timestamp` is the timestamp for when the message was logged, as a
`{{year, month, day}, {hour, minute, second, millisecond}}` tuple
* `metadata` is a keyword list of metadata used when logging the message
It is recommended that handlers ignore messages where
the group leader is in a different node than the one where
the handler is installed. For example:
def handle_event({_level, gl, {Logger, _, _, _}}, state)
when node(gl) != node() do
{:ok, state}
end
In the case of the event `:flush` handlers should flush any pending data. This
event is triggered by `flush/0`.
Furthermore, backends can be configured via the
`configure_backend/2` function which requires event handlers
to handle calls of the following format:
{:configure, options}
where `options` is a keyword list. The result of the call is
the result returned by `configure_backend/2`. The recommended
return value for successful configuration is `:ok`.
It is recommended that backends support at least the following
configuration options:
* `:level` - the logging level for that backend
* `:format` - the logging format for that backend
* `:metadata` - the metadata to include in that backend
Check the implementation for `Logger.Backends.Console`, for
examples on how to handle the recommendations in this section
and how to process the existing options.
"""
@type backend :: :gen_event.handler()
@type message :: IO.chardata() | String.Chars.t()
@type level :: :error | :info | :warn | :debug
@type metadata :: keyword(String.Chars.t())
@levels [:error, :info, :warn, :debug]
@metadata :logger_metadata
@compile {:inline, __metadata__: 0}
defp __metadata__ do
Process.get(@metadata) || {true, []}
end
@doc """
Alters the current process metadata according the given keyword list.
This function will merge the given keyword list into the existing metadata,
with the exception of setting a key to `nil`, which will remove that key
from the metadata.
"""
@spec metadata(metadata) :: :ok
def metadata(keyword) do
{enabled?, metadata} = __metadata__()
metadata =
Enum.reduce(keyword, metadata, fn
{key, nil}, acc -> Keyword.delete(acc, key)
{key, val}, acc -> Keyword.put(acc, key, val)
end)
Process.put(@metadata, {enabled?, metadata})
:ok
end
@doc """
Reads the current process metadata.
"""
@spec metadata() :: metadata
def metadata() do
__metadata__() |> elem(1)
end
@doc """
Resets the current process metadata to the given keyword list.
"""
@spec reset_metadata(metadata) :: :ok
def reset_metadata(keywords \\ []) do
{enabled?, _metadata} = __metadata__()
Process.put(@metadata, {enabled?, []})
metadata(keywords)
end
@doc """
Enables logging for the current process.
Currently the only accepted PID is `self()`.
"""
@spec enable(pid) :: :ok
def enable(pid) when pid == self() do
Process.put(@metadata, {true, metadata()})
:ok
end
@doc """
Disables logging for the current process.
Currently the only accepted PID is `self()`.
"""
@spec disable(pid) :: :ok
def disable(pid) when pid == self() do
Process.put(@metadata, {false, metadata()})
:ok
end
@doc """
Retrieves the `Logger` level.
The `Logger` level can be changed via `configure/1`.
"""
@spec level() :: level
def level() do
%{level: level} = Logger.Config.__data__()
level
end
@doc """
Compares log levels.
Receives two log levels and compares the `left` level
against the `right` level and returns
* `:lt` if `left` is less than `right`
* `:eq` if `left` and `right` are equal
* `:gt` if `left` is greater than `right`
## Examples
iex> Logger.compare_levels(:debug, :warn)
:lt
iex> Logger.compare_levels(:error, :info)
:gt
"""
@spec compare_levels(level, level) :: :lt | :eq | :gt
def compare_levels(level, level) do
:eq
end
def compare_levels(left, right) do
if level_to_number(left) > level_to_number(right), do: :gt, else: :lt
end
defp level_to_number(:debug), do: 0
defp level_to_number(:info), do: 1
defp level_to_number(:warn), do: 2
defp level_to_number(:error), do: 3
@doc """
Configures the logger.
See the "Runtime Configuration" section in the `Logger` module
documentation for the available options.
"""
@valid_options [
:compile_time_purge_level,
:compile_time_application,
:sync_threshold,
:truncate,
:level,
:utc_log
]
@spec configure(keyword) :: :ok
def configure(options) do
Logger.Config.configure(Keyword.take(options, @valid_options))
end
@doc """
Flushes the logger.
This guarantees all messages sent to `Logger` prior to this call will
be processed. This is useful for testing and it should not be called
in production code.
"""
@spec flush :: :ok
def flush do
_ = :gen_event.which_handlers(:error_logger)
:gen_event.sync_notify(Logger, :flush)
end
@doc """
Adds a new backend.
## Options
* `:flush` - when `true`, guarantees all messages currently sent
to both Logger and OTP's [`:error_logger`](http://erlang.org/doc/man/error_logger.html)
are processed before the backend is added
"""
@spec add_backend(atom, keyword) :: Supervisor.on_start_child()
def add_backend(backend, opts \\ []) do
_ = if opts[:flush], do: flush()
case Logger.WatcherSupervisor.watch(Logger, Logger.Config.translate_backend(backend), backend) do
{:ok, _} = ok ->
Logger.Config.add_backend(backend)
ok
{:error, {:already_started, _pid}} ->
{:error, :already_present}
{:error, _} = error ->
error
end
end
@doc """
Removes a backend.
## Options
* `:flush` - when `true`, guarantees all messages currently sent
to both Logger and OTP's [`:error_logger`](http://erlang.org/doc/man/error_logger.html)
are processed before the backend is removed
"""
@spec remove_backend(atom, keyword) :: :ok | {:error, term}
def remove_backend(backend, opts \\ []) do
_ = if opts[:flush], do: flush()
Logger.Config.remove_backend(backend)
Logger.WatcherSupervisor.unwatch(Logger, Logger.Config.translate_backend(backend))
end
@doc """
Adds a new translator.
"""
@spec add_translator({module, function :: atom}) :: :ok
def add_translator({mod, fun} = translator) when is_atom(mod) and is_atom(fun) do
Logger.Config.add_translator(translator)
end
@doc """
Removes a translator.
"""
@spec remove_translator({module, function :: atom}) :: :ok
def remove_translator({mod, fun} = translator) when is_atom(mod) and is_atom(fun) do
Logger.Config.remove_translator(translator)
end
@doc """
Configures the given backend.
The backend needs to be started and running in order to
be configured at runtime.
"""
@spec configure_backend(backend, keyword) :: term
def configure_backend(backend, options) when is_list(options) do
:gen_event.call(Logger, Logger.Config.translate_backend(backend), {:configure, options})
end
@doc """
Logs a message dynamically.
Use this function only when there is a need to
explicitly avoid embedding metadata.
"""
@spec bare_log(level, message | (() -> message | {message, keyword}), keyword) ::
:ok | {:error, :noproc} | {:error, term}
def bare_log(level, chardata_or_fun, metadata \\ [])
when level in @levels and is_list(metadata) do
case __metadata__() do
{true, pdict} ->
%{mode: mode, truncate: truncate, level: min_level, utc_log: utc_log?} =
Logger.Config.__data__()
if compare_levels(level, min_level) != :lt and mode != :discard do
metadata = [pid: self()] ++ Keyword.merge(pdict, metadata)
{message, metadata} = normalize_message(chardata_or_fun, metadata)
truncated = truncate(message, truncate)
tuple = {Logger, truncated, Logger.Utils.timestamp(utc_log?), metadata}
try do
notify(mode, {level, Process.group_leader(), tuple})
:ok
rescue
ArgumentError -> {:error, :noproc}
catch
:exit, reason -> {:error, reason}
end
else
:ok
end
{false, _} ->
:ok
end
end
@doc """
Logs a warning message.
Returns `:ok` or an `{:error, reason}` tuple.
## Examples
Logger.warn "knob turned too far to the right"
Logger.warn fn -> "expensive to calculate warning" end
Logger.warn fn -> {"expensive to calculate warning", [additional: :metadata]} end
"""
defmacro warn(chardata_or_fun, metadata \\ []) do
maybe_log(:warn, chardata_or_fun, metadata, __CALLER__)
end
@doc """
Logs an info message.
Returns `:ok` or an `{:error, reason}` tuple.
## Examples
Logger.info "mission accomplished"
Logger.info fn -> "expensive to calculate info" end
Logger.info fn -> {"expensive to calculate info", [additional: :metadata]} end
"""
defmacro info(chardata_or_fun, metadata \\ []) do
maybe_log(:info, chardata_or_fun, metadata, __CALLER__)
end
@doc """
Logs an error message.
Returns `:ok` or an `{:error, reason}` tuple.
## Examples
Logger.error "oops"
Logger.error fn -> "expensive to calculate error" end
Logger.error fn -> {"expensive to calculate error", [additional: :metadata]} end
"""
defmacro error(chardata_or_fun, metadata \\ []) do
maybe_log(:error, chardata_or_fun, metadata, __CALLER__)
end
@doc """
Logs a debug message.
Returns `:ok` or an `{:error, reason}` tuple.
## Examples
Logger.debug "hello?"
Logger.debug fn -> "expensive to calculate debug" end
Logger.debug fn -> {"expensive to calculate debug", [additional: :metadata]} end
"""
defmacro debug(chardata_or_fun, metadata \\ []) do
maybe_log(:debug, chardata_or_fun, metadata, __CALLER__)
end
@doc """
Logs a message with the given `level`.
Returns `:ok` or an `{:error, reason}` tuple.
The macros `debug/2`, `warn/2`, `info/2`, and `error/2` are
preferred over this macro as they can automatically eliminate
the call to `Logger` altogether at compile time if desired
(see the documentation for the `Logger` module).
"""
defmacro log(level, chardata_or_fun, metadata \\ []) do
macro_log(level, chardata_or_fun, metadata, __CALLER__)
end
defp macro_log(level, data, metadata, caller) do
%{module: module, function: fun, file: file, line: line} = caller
caller =
compile_time_application_and_file(file) ++
[module: module, function: form_fa(fun), line: line]
metadata =
if Keyword.keyword?(metadata) do
Keyword.merge(caller, metadata)
else
quote do
Keyword.merge(unquote(caller), unquote(metadata))
end
end
quote do
Logger.bare_log(unquote(level), unquote(data), unquote(metadata))
end
end
defp compile_time_application_and_file(file) do
if app = Application.get_env(:logger, :compile_time_application) do
[application: app, file: Path.relative_to_cwd(file)]
else
[file: file]
end
end
defp maybe_log(level, data, metadata, caller) do
min_level = Application.get_env(:logger, :compile_time_purge_level, :debug)
if compare_levels(level, min_level) != :lt do
macro_log(level, data, metadata, caller)
else
# We wrap the contents in an anonymous function
# to avoid unused variable warnings.
quote do
_ = fn -> {unquote(data), unquote(metadata)} end
:ok
end
end
end
defp normalize_message(fun, metadata) when is_function(fun, 0) do
normalize_message(fun.(), metadata)
end
defp normalize_message({message, fun_metadata}, metadata) when is_list(fun_metadata) do
{message, Keyword.merge(metadata, fun_metadata)}
end
defp normalize_message(message, metadata), do: {message, metadata}
defp truncate(data, n) when is_list(data) or is_binary(data), do: Logger.Utils.truncate(data, n)
defp truncate(data, n), do: Logger.Utils.truncate(to_string(data), n)
defp form_fa({name, arity}) do
Atom.to_string(name) <> "/" <> Integer.to_string(arity)
end
defp form_fa(nil), do: nil
defp notify(:sync, msg), do: :gen_event.sync_notify(Logger, msg)
defp notify(:async, msg), do: :gen_event.notify(Logger, msg)
end
|
lib/logger/lib/logger.ex
| 0.907776 | 0.569942 |
logger.ex
|
starcoder
|
defmodule Membrane.FramerateConverter do
@moduledoc """
Element converts video to target constant frame rate, by dropping and duplicating frames as necessary.
Input video may have constant or variable frame rate.
Element expects each frame to be received in separate buffer.
Additionally, presentation timestamps must be passed in each buffer's `pts` fields.
"""
use Bunch
use Membrane.Filter
alias Membrane.Caps.Video.Raw
require Membrane.Logger
def_options framerate: [
spec: {pos_integer(), pos_integer()},
default: {30, 1},
description: """
Target framerate.
"""
]
def_output_pad :output,
caps: {Raw, aligned: true}
def_input_pad :input,
caps: {Raw, aligned: true},
demand_unit: :buffers
@impl true
def handle_init(%__MODULE__{} = options) do
state =
options
|> Map.from_struct()
|> Map.merge(%{
last_buffer: nil,
input_framerate: {0, 1},
target_pts: 0,
exact_target_pts: 0
})
{:ok, state}
end
@impl true
def handle_demand(:output, size, :buffers, _ctx, state) do
{{:ok, demand: {:input, size}}, state}
end
@impl true
def handle_process(
:input,
buffer,
_ctx,
%{last_buffer: nil} = state
) do
state = put_first_buffer(buffer, state)
state = bump_target_pts(state)
{{:ok, buffer: {:output, buffer}, redemand: :output}, state}
end
@impl true
def handle_process(:input, buffer, _ctx, state) do
{buffers, state} = create_new_frames(buffer, state)
{{:ok, [buffer: {:output, buffers}, redemand: :output]}, state}
end
@impl true
def handle_caps(:input, caps, _context, %{framerate: framerate} = state) do
state = %{state | input_framerate: caps.framerate}
{{:ok, caps: {:output, %{caps | framerate: framerate}}}, state}
end
@impl true
def handle_end_of_stream(:input, _ctx, %{input_framerate: {0, _denom}} = state) do
{{:ok, end_of_stream: :output}, state}
end
def handle_end_of_stream(:input, _ctx, %{last_buffer: nil} = state) do
{{:ok, end_of_stream: :output}, state}
end
def handle_end_of_stream(
:input,
_ctx,
%{last_buffer: last_buffer} = state
) do
use Ratio
input_frame_duration = get_frame_duration(state.input_framerate)
output_frame_duration = get_frame_duration(state.framerate)
input_video_duration = last_buffer.pts + input_frame_duration
# calculate last target timestamp so that the output video duration is closest to original:
# ideal last timestamp would be `input_video_duration - output_frame_duration`.
# Target timestamps repeat every output_frame_duration.
# To be the closest to the ideal last timestamp, last target timestamp must fall between
# ideal_last_timestamp - output_frame_duration/2 and ideal_last_timestamp + output_frame_duration/2.
# That means that last timestamp must not be greater than `input_video_duration - output_frame_duration/2`
best_last_timestamp = Ratio.floor(input_video_duration - output_frame_duration / 2)
buffers = fill_to_last_timestamp(best_last_timestamp, state)
{{:ok, [buffer: {:output, buffers}, end_of_stream: :output]}, state}
end
defp get_frame_duration({num, denom}) do
Ratio.new(denom * Membrane.Time.second(), num)
end
defp fill_to_last_timestamp(last_timestamp, state, buffers \\ []) do
if state.target_pts > last_timestamp do
Enum.reverse(buffers)
else
new_buffer = %{state.last_buffer | pts: state.target_pts}
state = bump_target_pts(state)
fill_to_last_timestamp(last_timestamp, state, [new_buffer | buffers])
end
end
defp put_first_buffer(first_buffer, state) do
%{
state
| target_pts: first_buffer.pts,
exact_target_pts: first_buffer.pts,
last_buffer: first_buffer
}
end
defp bump_target_pts(%{exact_target_pts: exact_pts, framerate: framerate} = state) do
use Ratio
next_exact_pts = exact_pts + get_frame_duration(framerate)
next_target_pts = Ratio.floor(next_exact_pts)
%{state | target_pts: next_target_pts, exact_target_pts: next_exact_pts}
end
defp create_new_frames(input_buffer, state, buffers \\ []) do
if state.target_pts > input_buffer.pts do
state = %{state | last_buffer: input_buffer}
{Enum.reverse(buffers), state}
else
last_buffer = state.last_buffer
dist_right = input_buffer.pts - state.target_pts
dist_left = state.target_pts - last_buffer.pts
new_buffer =
if dist_left <= dist_right do
%{last_buffer | pts: state.target_pts}
else
%{input_buffer | pts: state.target_pts}
end
state = bump_target_pts(state)
create_new_frames(input_buffer, state, [new_buffer | buffers])
end
end
end
|
lib/membrane_framerate_converter.ex
| 0.898664 | 0.556339 |
membrane_framerate_converter.ex
|
starcoder
|
defmodule Priorityqueue do
@moduledoc"""
The prioriy queue is used by the shortest path algorithm of the `Graph` module. It keeps all the nodes that are to be evaluated and determines which node is to evaluated next.
As the name suggests it works as a queue. So the main methods are `push` for adding an entry and `pop` for getting the next one.
"""
defstruct entries: %{}
@type key :: atom
@type node_id :: atom
@type path_costs :: non_neg_integer
@type heuristic_costs :: non_neg_integer
@type to_costs :: non_neg_integer
@type t :: %__MODULE__{
entries: %{key => %{pcosts: path_costs, hcosts: heuristic_costs, tcosts: to_costs, from: node_id}}
}
@doc"""
Returns a new struct of `PriorityQueue` with zero entries.
## Example
iex(15)> Priorityqueue.new
%Priorityqueue {entries: %{}}
"""
@spec new :: t
def new, do: %__MODULE__{}
@doc"""
Adds a new entry to an existing priority queue. The entry must contain the path costs to the node, the costs of the latest hop to the node, the heuristic costs of the node and the node from which the added one is reached (needed for reconstructing the path later on).
## Example
iex> pq = Priorityqueue.new |> Priorityqueue.push(:a, %{costs_to: 15, costs_hop: 3, costs_heur: 4, from: :s})
%Priorityqueue {entries: %{a: %{costs_heur: 4, costs_hop: 3, costs_to: 15, from: :s}}}
"""
@spec push(t, key, %{costs_to: path_costs, costs_hop: path_costs, costs_heur: heuristic_costs, from: key}) :: t
def push(%__MODULE__{entries: e} = pq, node, %{costs_to: cto, costs_hop: _, costs_heur: _, from: _} = prop) do
case Map.get(e, node) do
nil ->
%__MODULE__{pq | entries: Map.put(e, node, prop)}
entry ->
if Map.get(entry, :costs_to) > cto do
%__MODULE__{pq | entries: Map.put(e, node, prop)}
else
pq
end
end
end
@doc"""
Returns the next item in the queue. As the shortest path algorithm uses the principle of the a star algorithm, the queue returns the element with the lowest costs. In detail, the function returns the updated priority queue, the key (the node) of the element to be evaluated next and the data of that element.
## Example
iex> Priorityqueue.new |> Priorityqueue.push(:a, %{costs_to: 15, costs_hop: 3, costs_heur: 4, from: :s}) |> Priorityqueue.push(:b, %{costs_to: 10, costs_hop: 4, costs_heur: 3, from: :s}) |>
...> Priorityqueue.pop
{%Priorityqueue {entries: %{a: %{costs_heur: 4, costs_hop: 3, costs_to: 15, from: :s}}}, :b, %{costs_heur: 3, costs_hop: 4, costs_to: 10, from: :s}}
"""
@spec pop(t) :: {t, key, %{costs_heur: heuristic_costs, costs_hop: path_costs, costs_to: path_costs, from: key}}
def pop(%__MODULE__{entries: e} = pq) do
case Map.to_list(e) do
[] -> nil
list ->
{skey, _} = Enum.min_by(list, fn(x) -> elem(x, 1)[:costs_to] + elem(x, 1)[:costs_heur] end)
pq = %__MODULE__{pq | entries: Map.delete(e, skey)}
{pq, skey, Map.get(e, skey)}
end
end
end
|
lib/priority_queue.ex
| 0.923558 | 0.707518 |
priority_queue.ex
|
starcoder
|
defmodule Radixir.System do
@moduledoc """
Provides high level interaction with the System API.
"""
alias Radixir.System.API
@type options :: keyword
@type error_message :: String.t()
@doc """
Gets system version.
## Parameters
- `options`: Keyword list that contains
- `api`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
If the following usernames and passwords are exported as follows:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>!'
```
then an `auth_index` of 0 would result in `admin` being used as the username and `<PASSWORD>!` being used as the password.
"""
@spec get_version(options) :: {:ok, map} | {:error, map | error_message}
def get_version(options \\ []), do: API.get_version(Keyword.get(options, :api, []))
@doc """
Gets system health.
## Parameters
- `options`: Keyword list that contains
- `api`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
If the following usernames and passwords are exported as follows:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>!'
```
then an `auth_index` of 0 would result in `admin` being used as the username and `funny cats very Jack 21!` being used as the password.
"""
@spec get_health(options) :: {:ok, map} | {:error, map | error_message}
def get_health(options \\ []), do: API.get_health(Keyword.get(options, :api, []))
@doc """
Gets system configuration.
## Parameters
- `options`: Keyword list that contains
- `api`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
If the following usernames and passwords are exported as follows:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>!'
```
then an `auth_index` of 0 would result in `admin` being used as the username and `funny cats very Jack 21!` being used as the password.
"""
@spec get_configuration(options) :: {:ok, map} | {:error, map | error_message}
def get_configuration(options \\ []), do: API.get_configuration(Keyword.get(options, :api, []))
@doc """
Gets system peers.
## Parameters
- `options`: Keyword list that contains
- `api`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
If the following usernames and passwords are exported as follows:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>, <PASSWORD>!'
```
then an `auth_index` of 0 would result in `admin` being used as the username and `fun<PASSWORD>!` being used as the password.
"""
@spec get_peers(options) :: {:ok, map} | {:error, map | error_message}
def get_peers(options \\ []), do: API.get_peers(Keyword.get(options, :api, []))
@doc """
Gets system addressbook.
## Parameters
- `options`: Keyword list that contains
- `api`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
If the following usernames and passwords are exported as follows:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>!'
```
then an `auth_index` of 0 would result in `admin` being used as the username and `funny cats very Jack 21!` being used as the password.
"""
@spec get_address_book(options) :: {:ok, map} | {:error, map | error_message}
def get_address_book(options \\ []), do: API.get_address_book(Keyword.get(options, :api, []))
@doc """
Gets system metrics.
## Parameters
- `options`: Keyword list that contains
- `api`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
If the following usernames and passwords are exported as follows:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>!'
```
then an `auth_index` of 0 would result in `admin` being used as the username and `funny cats very Jack 21!` being used as the password.
"""
@spec get_metrics(options) :: {:ok, map} | {:error, map | error_message}
def get_metrics(options \\ []), do: API.get_metrics(Keyword.get(options, :api, []))
@doc """
Gets prometheus metrics.
## Parameters
- `options`: Keyword list that contains
- `api`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
If the following usernames and passwords are exported as follows:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>, <PASSWORD>!'
```
then an `auth_index` of 0 would result in `admin` being used as the username and `fun<PASSWORD>!` being used as the password.
"""
@spec get_prometheus_metrics(options) :: {:ok, map} | {:error, map | error_message}
def get_prometheus_metrics(options \\ []),
do: API.get_prometheus_metrics(Keyword.get(options, :api, []))
end
|
lib/radixir/system.ex
| 0.917889 | 0.775307 |
system.ex
|
starcoder
|
defmodule Joken.Error do
@moduledoc """
Errors for the Joken API.
"""
defexception [:reason]
alias Joken.Signer
@doc false
def exception(reason), do: %__MODULE__{reason: reason}
def message(%__MODULE__{reason: :no_default_signer}),
do: """
Can't sign your token because couldn't create a signer.
To create a signer we need a key in config.exs. You can define
a key in your config.exs in several ways:
1. For the default key, use `config :joken, default_signer: <key_params>`
2. For other keys, use `config :joken, <key_name>: <key_params>`
If you are using different than default keys, you can pass it as the second
argument to `generate_and_sign/2` or as a parameter for `use Joken.Config`,
example: `use Joken.Config, default_signer: <key_name>`
See configuration docs for possible values of <key_params>.
"""
def message(%__MODULE__{reason: [:bad_generate_and_sign, reason: result]}),
do: """
Error while calling `generate_and_sign!`. Reason: #{inspect(result)}.
"""
def message(%__MODULE__{reason: [:bad_verify_and_validate, reason: result]}),
do: """
Error while calling `verify_and_validate!`. Reason: #{inspect(result)}.
"""
def message(%__MODULE__{reason: :invalid_default_claims}),
do: """
Invalid argument to default claims. Verify the types of the arguments to
Joken.Config.default_claims/1.
"""
def message(%__MODULE__{reason: :unrecognized_algorithm}),
do: """
Couldn't recognize the signer algorithm.
Possible values are:
#{inspect(Signer.algorithms())}
"""
def message(%__MODULE__{reason: :claim_not_valid}),
do: """
Claim did not pass validation.
Set log level to debug for more information.
"""
def message(%__MODULE__{reason: :claim_configuration_not_valid}),
do: """
Claim configuration is not valid. You must have either a generation function or a
validation function.
If both are nil you don`t need a Joken.Claim configuration. You can pass any map of values
to `Joken.Config.generate_and_sign/3`. Verify will only use claims that have a validation
function on your configuration. Example:
defmodule CustomClaimTest do
use Joken.Config
end
CustomClaimTest.generate_and_sign %{"a claim without configuration" => "any value"}
"""
def message(%__MODULE__{reason: :bad_validate_fun_arity}),
do: """
Claim validate function must have either arity 1 or 2.
When arity is 1, it receives the claim value in a given JWT.
When it is 2, besides the claim value, it receives a context map. You can pass dynamic
values on this context and pass it to the validate function.
See `Joken.Config.validate/3` for more information on Context
"""
def message(%__MODULE__{reason: :wrong_key_parameters}),
do: """
Couldn't create a signer because there are missing parameters.
Check the Joken.Signer.parse_config/2 documentation for the types of parameters needed
for each type of algorithm.
"""
end
|
lib/joken/error.ex
| 0.863334 | 0.460228 |
error.ex
|
starcoder
|
defmodule RDF.PrefixMap do
@moduledoc """
A mapping a prefix atoms to IRI namespaces.
`RDF.PrefixMap` implements the `Enumerable` protocol.
"""
alias RDF.IRI
@type prefix :: atom
@type namespace :: IRI.t
@type coercible_prefix :: atom | String.t
@type coercible_namespace :: atom | String.t | IRI.t
@type prefix_map :: %{prefix => namespace}
@type conflict_resolver ::
(coercible_prefix, coercible_namespace, coercible_namespace -> coercible_namespace)
@type t :: %__MODULE__{
map: prefix_map
}
defstruct map: %{}
@doc """
Creates an empty `RDF.PrefixMap`.
"""
@spec new :: t
def new, do: %__MODULE__{}
@doc """
Creates a new `RDF.PrefixMap`.
The prefix mappings can be passed as keyword lists or maps.
The keys for the prefixes can be given as atoms or strings and will be normalized to atoms.
The namespaces can be given as `RDF.IRI`s or strings and will be normalized to `RDF.IRI`s.
"""
@spec new(t | map | keyword) :: t
def new(map)
def new(%__MODULE__{} = prefix_map), do: prefix_map
def new(map) when is_map(map) do
%__MODULE__{map: Map.new(map, &normalize/1)}
end
def new(map) when is_list(map) do
map |> Map.new() |> new()
end
defp normalize({prefix, namespace}) when is_atom(prefix),
do: {prefix, IRI.coerce_base(namespace)}
defp normalize({prefix, namespace}) when is_binary(prefix),
do: normalize({String.to_atom(prefix), namespace})
defp normalize({prefix, namespace}),
do:
raise(ArgumentError, "Invalid prefix mapping: #{inspect(prefix)} => #{inspect(namespace)}")
@doc """
Adds a prefix mapping to the given `RDF.PrefixMap`.
Unless a mapping of the given prefix to a different namespace already exists,
an ok tuple is returned, other an error tuple.
"""
@spec add(t, coercible_prefix, coercible_namespace) :: {:ok, t} | {:error, String.t}
def add(prefix_map, prefix, namespace)
def add(%__MODULE__{map: map}, prefix, %IRI{} = namespace) when is_atom(prefix) do
if conflicts?(map, prefix, namespace) do
{:error, "prefix #{inspect(prefix)} is already mapped to another namespace"}
else
{:ok, %__MODULE__{map: Map.put(map, prefix, namespace)}}
end
end
def add(%__MODULE__{} = prefix_map, prefix, namespace) do
with {prefix, namespace} = normalize({prefix, namespace}) do
add(prefix_map, prefix, namespace)
end
end
@doc """
Adds a prefix mapping to the given `RDF.PrefixMap` and raises an exception in error cases.
"""
@spec add!(t, coercible_prefix, coercible_namespace) :: t
def add!(prefix_map, prefix, namespace) do
with {:ok, new_prefix_map} <- add(prefix_map, prefix, namespace) do
new_prefix_map
else
{:error, error} -> raise error
end
end
@doc """
Merges two `RDF.PrefixMap`s.
The second prefix map can also be given as any structure which can converted
to a `RDF.PrefixMap` via `new/1`.
If the prefix maps can be merged without conflicts, that is there are no
prefixes mapped to different namespaces an `:ok` tuple is returned.
Otherwise an `:error` tuple with the list of prefixes with conflicting
namespaces is returned.
See also `merge/3` which allows you to resolve conflicts with a function.
"""
@spec merge(t, t | map | keyword) :: {:ok, t} | {:error, [atom | String.t]}
def merge(prefix_map1, prefix_map2)
def merge(%__MODULE__{map: map1}, %__MODULE__{map: map2}) do
with [] <- merge_conflicts(map1, map2) do
{:ok, %__MODULE__{map: Map.merge(map1, map2)}}
else
conflicts -> {:error, conflicts}
end
end
def merge(%__MODULE__{} = prefix_map, other_prefixes) do
merge(prefix_map, new(other_prefixes))
rescue
FunctionClauseError ->
raise ArgumentError, "#{inspect(other_prefixes)} is not convertible to a RDF.PrefixMap"
end
@doc """
Merges two `RDF.PrefixMap`s, resolving conflicts through the given `conflict_resolver` function.
The second prefix map can also be given as any structure which can converted
to a `RDF.PrefixMap` via `new/1`.
The given function will be invoked when there are conflicting mappings of
prefixes to different namespaces; its arguments are `prefix`, `namespace1`
(the namespace for the prefix in the first prefix map),
and `namespace2` (the namespace for the prefix in the second prefix map).
The value returned by the `conflict_resolver` function is used as the namespace
for the prefix in the resulting prefix map.
Non-`RDF.IRI` values will be tried to be converted to converted to `RDF.IRI`
via `RDF.IRI.new` implicitly.
If a conflict can't be resolved, the provided function can return `nil`.
This will result in an overall return of an `:error` tuple with the list of
prefixes for which the conflict couldn't be resolved.
If everything could be merged, an `:ok` tuple is returned.
"""
@spec merge(t, t | map | keyword, conflict_resolver | nil) :: {:ok, t} | {:error, [atom | String.t]}
def merge(prefix_map1, prefix_map2, conflict_resolver)
def merge(%__MODULE__{map: map1}, %__MODULE__{map: map2}, conflict_resolver)
when is_function(conflict_resolver) do
conflict_resolution = fn prefix, namespace1, namespace2 ->
case conflict_resolver.(prefix, namespace1, namespace2) do
nil -> :conflict
result -> IRI.new(result)
end
end
with resolved_merge = Map.merge(map1, map2, conflict_resolution),
[] <- resolved_merge_rest_conflicts(resolved_merge) do
{:ok, %__MODULE__{map: resolved_merge}}
else
conflicts -> {:error, conflicts}
end
end
def merge(%__MODULE__{} = prefix_map1, prefix_map2, conflict_resolver)
when is_function(conflict_resolver) do
merge(prefix_map1, new(prefix_map2), conflict_resolver)
end
def merge(prefix_map1, prefix_map2, nil), do: merge(prefix_map1, prefix_map2)
defp resolved_merge_rest_conflicts(map) do
Enum.reduce(map, [], fn
{prefix, :conflict}, conflicts -> [prefix | conflicts]
_, conflicts -> conflicts
end)
end
defp merge_conflicts(map1, map2) do
Enum.reduce(map1, [], fn {prefix, namespace}, conflicts ->
if conflicts?(map2, prefix, namespace) do
[prefix | conflicts]
else
conflicts
end
end)
end
defp conflicts?(map, prefix, namespace) do
(existing_namespace = Map.get(map, prefix)) && existing_namespace != namespace
end
@doc """
Merges two `RDF.PrefixMap`s and raises an exception in error cases.
See `merge/2` and `merge/3` for more information on merging prefix maps.
"""
@spec merge!(t, t | map | keyword, conflict_resolver | nil) :: t
def merge!(prefix_map1, prefix_map2, conflict_resolver \\ nil) do
with {:ok, new_prefix_map} <- merge(prefix_map1, prefix_map2, conflict_resolver) do
new_prefix_map
else
{:error, conflicts} ->
conflicts = conflicts |> Stream.map(&inspect/1) |> Enum.join(", ")
raise "conflicting prefix mappings: #{conflicts}"
end
end
@doc """
Deletes a prefix mapping from the given `RDF.PrefixMap`.
"""
@spec delete(t, coercible_prefix) :: t
def delete(prefix_map, prefix)
def delete(%__MODULE__{map: map}, prefix) when is_atom(prefix) do
%__MODULE__{map: Map.delete(map, prefix)}
end
def delete(prefix_map, prefix) when is_binary(prefix) do
delete(prefix_map, String.to_atom(prefix))
end
@doc """
Drops the given `prefixes` from the given `prefix_map`.
If `prefixes` contains prefixes that are not in `prefix_map`, they're simply ignored.
"""
@spec drop(t, [coercible_prefix]) :: t
def drop(prefix_map, prefixes)
def drop(%__MODULE__{map: map}, prefixes) do
%__MODULE__{
map:
Map.drop(
map,
Enum.map(prefixes, fn
prefix when is_binary(prefix) -> String.to_atom(prefix)
other -> other
end)
)
}
end
@doc """
Returns the namespace for the given prefix in the given `RDF.PrefixMap`.
Returns `nil`, when the given `prefix` is not present in `prefix_map`.
"""
@spec namespace(t, coercible_prefix) :: namespace | nil
def namespace(prefix_map, prefix)
def namespace(%__MODULE__{map: map}, prefix) when is_atom(prefix) do
Map.get(map, prefix)
end
def namespace(prefix_map, prefix) when is_binary(prefix) do
namespace(prefix_map, String.to_atom(prefix))
end
@doc """
Returns the prefix for the given namespace in the given `RDF.PrefixMap`.
Returns `nil`, when the given `namespace` is not present in `prefix_map`.
"""
@spec prefix(t, coercible_namespace) :: coercible_prefix | nil
def prefix(prefix_map, namespace)
def prefix(%__MODULE__{map: map}, %IRI{} = namespace) do
Enum.find_value(map, fn {prefix, ns} -> ns == namespace && prefix end)
end
def prefix(prefix_map, namespace) when is_binary(namespace) do
prefix(prefix_map, IRI.new(namespace))
end
@doc """
Returns whether the given prefix exists in the given `RDF.PrefixMap`.
"""
@spec has_prefix?(t, coercible_prefix) :: boolean
def has_prefix?(prefix_map, prefix)
def has_prefix?(%__MODULE__{map: map}, prefix) when is_atom(prefix) do
Map.has_key?(map, prefix)
end
def has_prefix?(prefix_map, prefix) when is_binary(prefix) do
has_prefix?(prefix_map, String.to_atom(prefix))
end
@doc """
Returns all prefixes from the given `RDF.PrefixMap`.
"""
@spec prefixes(t) :: [coercible_prefix]
def prefixes(%__MODULE__{map: map}) do
Map.keys(map)
end
@doc """
Returns all namespaces from the given `RDF.PrefixMap`.
"""
@spec namespaces(t) :: [coercible_namespace]
def namespaces(%__MODULE__{map: map}) do
Map.values(map)
end
defimpl Enumerable do
def reduce(%RDF.PrefixMap{map: map}, acc, fun), do: Enumerable.reduce(map, acc, fun)
def member?(%RDF.PrefixMap{map: map}, mapping), do: Enumerable.member?(map, mapping)
def count(%RDF.PrefixMap{map: map}), do: Enumerable.count(map)
def slice(_prefix_map), do: {:error, __MODULE__}
end
end
|
lib/rdf/prefix_map.ex
| 0.892516 | 0.619759 |
prefix_map.ex
|
starcoder
|
defmodule Mix do
@moduledoc """
Mix is a build tool that provides tasks for creating, compiling, testing
(and soon deploying) Elixir projects. Mix is inspired by the Leiningen
build tool for Clojure and was written by one of its contributors.
This module works as a facade for accessing the most common functionality
in Elixir, such as the shell and the current project configuration.
For getting started with Elixir, checkout out the guide available on
[Elixir's website](http://elixir-lang.org).
"""
use Application.Behaviour
# Used internally to start the mix application and its dependencies.
@doc false
def start do
:application.start(:elixir)
:application.start(:mix)
end
# Application behaviour callback
@doc false
def start(_, []) do
resp = Mix.Sup.start_link get_env
Mix.SCM.register_builtin
resp
end
@doc """
Returns the mix environment.
"""
def env do
Mix.Server.call(:env)
end
@doc """
Changes the current mix env. Project configuration loaded
per environment will not be reloaded.
"""
def env(env) when is_atom(env) do
Mix.Server.cast({ :env, env })
end
defp get_env do
if env = System.get_env("MIX_ENV") do
binary_to_atom env
else
:dev
end
end
@doc """
Starts mix and loads the project and dependencies in
one step. Useful when invoking mix from an external tool.
"""
def loadpaths do
Mix.start
Mix.Task.run "loadpaths"
end
@doc """
The shell is a wrapper for doing IO.
It contains conveniences for asking the user information,
printing status and so forth. It is also swappable,
allowing developers to use a test shell that simply sends the
messages to the current process.
"""
def shell do
Mix.Server.call(:shell)
end
@doc """
Sets the current shell.
"""
def shell(shell) do
Mix.Server.cast({ :shell, shell })
end
@doc """
Retrieves the current project configuration, with the current
environment configuration applied.
If there is no project defined, it still returns a keyword
list with default values. This allows many mix tasks to work
without the need for an underlying project.
"""
def project do
Mix.Project.config
end
end
|
lib/mix/lib/mix.ex
| 0.724286 | 0.422832 |
mix.ex
|
starcoder
|
defmodule Timelapse do
use Evercam.Schema
@required_fields [:camera_id, :user_id, :title, :frequency, :status, :date_always, :time_always]
@optional_fields [:exid, :snapshot_count, :resolution, :from_datetime, :to_datetime, :watermark_logo, :watermark_position, :recreate_hls, :start_recreate_hls, :last_snapshot_at, :extra]
# @status %{active: 0, scheduled: 1, expired: 2, paused: 3, stopped: 4}
schema "timelapses" do
belongs_to :user, User, foreign_key: :user_id
belongs_to :camera, Camera, foreign_key: :camera_id
field :exid, :string
field :title, :string
field :frequency, :integer
field :snapshot_count, :integer
field :resolution, :string
field :status, :integer, default: 0
field :date_always, :boolean
field :from_datetime, :utc_datetime_usec, default: Calendar.DateTime.now_utc
field :time_always, :boolean
field :to_datetime, :utc_datetime_usec, default: Calendar.DateTime.now_utc
field :watermark_logo, :string
field :watermark_position, :string
field :recreate_hls, :boolean, default: false
field :start_recreate_hls, :boolean, default: false
field :hls_created, :boolean, default: false
field :last_snapshot_at, :utc_datetime_usec
field :extra, Evercam.Types.JSON
timestamps(type: :utc_datetime, default: Calendar.DateTime.now_utc)
end
def all do
Timelapse
|> preload(:camera)
|> preload([camera: :owner])
|> preload([camera: :vendor_model])
|> preload([camera: [vendor_model: :vendor]])
|> Repo.all
end
def by_camera_id(id) do
Timelapse
|> where(camera_id: ^id)
|> preload(:user)
|> preload(:camera)
|> preload([camera: :owner])
|> order_by([t], desc: t.inserted_at)
|> Repo.all
end
def by_user_id(user_id) do
Timelapse
|> where(user_id: ^user_id)
|> preload(:user)
|> preload(:camera)
|> preload([camera: :owner])
|> order_by([t], desc: t.inserted_at)
|> Repo.all
end
def by_exid(exid) do
Timelapse
|> where(exid: ^String.downcase(exid))
|> preload(:user)
|> preload(:camera)
|> preload([camera: :owner])
|> Repo.one
end
def delete_by_exid(exid) do
Timelapse
|> where(exid: ^exid)
|> Repo.delete_all
end
def delete_by_id(id) do
Timelapse
|> where(id: ^id)
|> Repo.delete_all
end
def delete_by_camera_id(camera_id) do
Timelapse
|> where(camera_id: ^camera_id)
|> Repo.delete_all
end
def scheduled_now?(timezone, from_date, to_date, date_always, time_always) do
current_time = Calendar.DateTime.now!(timezone)
from_date = Calendar.DateTime.shift_zone!(from_date, timezone)
to_date = Calendar.DateTime.shift_zone!(to_date, timezone)
is_scheduled_now?(timezone, current_time, from_date, to_date, date_always, time_always)
end
def is_scheduled_now?(_timezone, _current_time, _from_date, _to_date, true, true), do: {:ok, true}
def is_scheduled_now?(_timezone, current_time, from_date, to_date, false, false) do
between?(current_time, from_date, to_date)
end
def is_scheduled_now?(timezone, current_time, from_date, to_date, true, false) do
%{year: current_year, month: current_month, day: current_day} = current_time
%{hour: from_hour, minute: from_minute} = from_date
%{hour: to_hour, minute: to_minute} = to_date
start_date = {{current_year, current_month, current_day}, {from_hour, from_minute, 0}} |> Calendar.DateTime.from_erl!(timezone)
case Calendar.DateTime.diff(from_date, to_date) do
{:ok, _seconds, _, :after} ->
%{year: next_year, month: next_month, day: next_day} = current_time |> Calendar.DateTime.advance!(60 * 60 * 24)
end_date = {{next_year, next_month, next_day}, {to_hour, to_minute, 59}} |> Calendar.DateTime.from_erl!(timezone)
between?(current_time, start_date, end_date)
_ ->
end_date = {{current_year, current_month, current_day}, {to_hour, to_minute, 59}} |> Calendar.DateTime.from_erl!(timezone)
between?(current_time, start_date, end_date)
end
end
def is_scheduled_now?(timezone, current_time, from_date, to_date, false, true) do
%{year: from_year, month: from_month, day: from_day} = from_date
%{year: to_year, month: to_month, day: to_day} = to_date
start_date = {{from_year, from_month, from_day}, {0, 0, 0}} |> Calendar.DateTime.from_erl!(timezone)
end_date = {{to_year, to_month, to_day}, {23, 59, 59}} |> Calendar.DateTime.from_erl!(timezone)
between?(current_time, start_date, end_date)
end
defp between?(current_time, from_date, to_date) do
check = current_time |> Calendar.DateTime.Format.unix
start = from_date |> Calendar.DateTime.Format.unix
the_end = to_date |> Calendar.DateTime.Format.unix
case check >= start && check < the_end do
true ->
{:ok, true}
_ ->
{:ok, false}
end
end
def create_timelapse(timelapse_params) do
timelapse_changeset = create_changeset(%Timelapse{}, timelapse_params)
case Repo.insert(timelapse_changeset) do
{:ok, timelapse} ->
camera_timelapse =
timelapse
|> Repo.preload(:user)
|> Repo.preload(:camera)
|> Repo.preload([camera: :owner])
|> Repo.preload([camera: :vendor_model])
|> Repo.preload([camera: [vendor_model: :vendor]])
{:ok, camera_timelapse}
{:error, changeset} ->
{:error, changeset}
end
end
def update_timelapse(timelapse, params) do
timelapse_changeset = changeset(timelapse, params)
case Repo.update(timelapse_changeset) do
{:ok, timelapse} ->
camera_timelapse =
timelapse
|> Repo.preload(:user)
|> Repo.preload(:camera)
|> Repo.preload([camera: :owner])
|> Repo.preload([camera: :vendor_model])
|> Repo.preload([camera: [vendor_model: :vendor]])
{:ok, camera_timelapse}
{:error, changeset} ->
{:error, changeset}
end
end
defp validate_exid(changeset) do
case get_field(changeset, :exid) do
nil -> auto_generate_camera_id(changeset)
_exid -> changeset |> update_change(:exid, &String.downcase/1)
end
end
defp auto_generate_camera_id(changeset) do
case get_field(changeset, :title) do
nil ->
changeset
subject ->
camera_id =
subject
|> Util.slugify
|> String.replace(" ", "")
|> String.replace("-", "")
|> String.downcase
|> String.slice(0..4)
put_change(changeset, :exid, "#{camera_id}-#{Enum.take_random(?a..?z, 5)}")
end
end
defp validate_from_to_datetime(changeset) do
date_always = get_field(changeset, :date_always)
time_always = get_field(changeset, :time_always)
from_datetime = get_field(changeset, :from_datetime)
to_datetime = get_field(changeset, :to_datetime)
case validate_datetime_parameter(from_datetime, to_datetime, date_always, time_always) do
{:ok} -> changeset
{:invalid, message} -> add_error(changeset, :invalid_from_to_datetime, message)
end
end
defp validate_datetime_parameter(_from, _to, true, true), do: {:ok}
defp validate_datetime_parameter(from, to, date_always, time_alwasy) do
cond do
!date_always && is_nil(from) || is_nil(to) ->
{:invalid, "From and To date can't be blank."}
!time_alwasy && is_nil(from) || is_nil(to) ->
{:invalid, "From and To time can't be blank."}
is_valid_datetime?(from, to) ->
{:invalid, "From date cannot be greater than current time."}
is_valid_datetime?(Calendar.DateTime.now!("UTC"), to) ->
{:invalid, "To date cannot be less than current time."}
true -> {:ok}
end
end
def is_valid_datetime?(from, to) do
case Calendar.DateTime.diff(from, to) do
{:ok, _, _, :after} -> true
_ -> false
end
end
def create_changeset(model, params \\ :invalid) do
model
|> changeset(params)
|> validate_from_to_datetime
end
def changeset(model, params \\ :invalid) do
model
|> cast(params, @required_fields ++ @optional_fields)
|> validate_required(@required_fields)
|> validate_exid
end
end
|
lib/evercam_models/timelapse.ex
| 0.636353 | 0.4165 |
timelapse.ex
|
starcoder
|
defmodule Farmbot.CeleryScript.AST.Heap do
@moduledoc """
A heap-ish data structure required when converting canonical CeleryScript AST
nodes into the Flat IR form.
This data structure is useful because it addresses each node in the
CeleryScript tree via a unique numerical index, rather than using mutable
references.
MORE INFO: https://github.com/FarmBot-Labs/Celery-Slicer
"""
alias Farmbot.CeleryScript.AST
alias AST.Heap
defmodule Address do
@moduledoc "Address on the heap."
defstruct [:value]
@doc "New heap address."
def new(num) when is_integer(num) do
%__MODULE__{value: num}
end
@doc "Increment an address."
def inc(%__MODULE__{value: num}) do
%__MODULE__{value: num + 1}
end
@doc "Decrement an address."
def dec(%__MODULE__{value: num}) do
%__MODULE__{value: num - 1}
end
defimpl Inspect, for: __MODULE__ do
def inspect(%{value: val}, _), do: "HeapAddress(#{val})"
end
end
# Constants and key names.
@link "🔗"
@body String.to_atom(@link <> "body")
@next String.to_atom(@link <> "next")
@parent String.to_atom(@link <> "parent")
@kind String.to_atom(@link <> "kind")
@primary_fields [@parent, @body, @kind, @next]
@null Address.new(0)
@nothing %{
@kind => AST.Node.Nothing,
@parent => @null,
@body => @null,
@next => @null
}
def link, do: @link
def parent, do: @parent
def body, do: @body
def next, do: @next
def kind, do: @kind
def primary_fields, do: @primary_fields
def null, do: @null
defstruct [:entries, :here]
@doc "Initialize a new heap."
def new do
%{struct(Heap) | here: @null, entries: %{@null => @nothing}}
end
@doc "Alot a new kind on the heap. Increments `here` on the heap."
def alot(%Heap{} = heap, kind) do
here_plus_one = Address.inc(heap.here)
new_entries = Map.put(heap.entries, here_plus_one, %{@kind => kind})
%{heap | here: here_plus_one, entries: new_entries}
end
@doc "Puts a key/value pair at `here` on the heap."
def put(%Heap{here: addr} = heap, key, value) do
put(heap, addr, key, value)
end
@doc "Puts a key/value pair at an arbitrary address on the heap."
def put(%Heap{} = heap, %Address{} = addr, key, value) do
block = heap[addr] || raise "Bad node address: #{inspect addr}"
new_block = Map.put(block, String.to_atom(to_string(key)), value)
new_entries = Map.put(heap.entries, addr, new_block)
%{heap | entries: new_entries}
end
@doc "Gets the values of the heap entries."
def values(%Heap{entries: entries}), do: entries
# Access behaviour.
@doc false
def fetch(%Heap{} = heap, %Address{} = adr), do: Map.fetch(heap.entries, adr)
end
|
lib/farmbot/celery_script/ast/heap.ex
| 0.844601 | 0.440349 |
heap.ex
|
starcoder
|
defmodule AWS.WAFRegional do
@moduledoc """
This is **AWS WAF Classic Regional** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
This is the *AWS WAF Regional Classic API Reference* for using AWS WAF Classic
with the AWS resources, Elastic Load Balancing (ELB) Application Load Balancers
and API Gateway APIs. The AWS WAF Classic actions and data types listed in the
reference are available for protecting Elastic Load Balancing (ELB) Application
Load Balancers and API Gateway APIs. You can use these actions and data types by
means of the endpoints listed in [AWS Regions and Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#waf_region).
This guide is for developers who need detailed information about the AWS WAF
Classic API actions, data types, and errors. For detailed information about AWS
WAF Classic features and an overview of how to use the AWS WAF Classic API, see
the [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "WAF Regional",
api_version: "2016-11-28",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "waf-regional",
global?: false,
protocol: "json",
service_id: "WAF Regional",
signature_version: "v4",
signing_name: "waf-regional",
target_prefix: "AWSWAF_Regional_20161128"
}
end
@doc """
This is **AWS WAF Classic Regional** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Associates a web ACL with a resource, either an application load balancer or
Amazon API Gateway stage.
"""
def associate_web_acl(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssociateWebACL", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Creates a `ByteMatchSet`. You then use `UpdateByteMatchSet` to identify the part
of a web request that you want AWS WAF to inspect, such as the values of the
`User-Agent` header or the query string. For example, you can create a
`ByteMatchSet` that matches any requests with `User-Agent` headers that contain
the string `BadBot`. You can then configure AWS WAF to reject those requests.
To create and configure a `ByteMatchSet`, perform the following steps:
1. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateByteMatchSet` request.
2. Submit a `CreateByteMatchSet` request.
3. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateByteMatchSet` request.
4. Submit an `UpdateByteMatchSet` request to specify the part of the
request that you want AWS WAF to inspect (for example, the header or the URI)
and the value that you want AWS WAF to watch for.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_byte_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateByteMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Creates an `GeoMatchSet`, which you use to specify which web requests you want
to allow or block based on the country that the requests originate from. For
example, if you're receiving a lot of requests from one or more countries and
you want to block the requests, you can create an `GeoMatchSet` that contains
those countries and then configure AWS WAF to block the requests.
To create and configure a `GeoMatchSet`, perform the following steps:
1. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateGeoMatchSet` request.
2. Submit a `CreateGeoMatchSet` request.
3. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateGeoMatchSet` request.
4. Submit an `UpdateGeoMatchSetSet` request to specify the countries
that you want AWS WAF to watch for.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_geo_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateGeoMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Creates an `IPSet`, which you use to specify which web requests that you want to
allow or block based on the IP addresses that the requests originate from. For
example, if you're receiving a lot of requests from one or more individual IP
addresses or one or more ranges of IP addresses and you want to block the
requests, you can create an `IPSet` that contains those IP addresses and then
configure AWS WAF to block the requests.
To create and configure an `IPSet`, perform the following steps:
1. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateIPSet` request.
2. Submit a `CreateIPSet` request.
3. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateIPSet` request.
4. Submit an `UpdateIPSet` request to specify the IP addresses that
you want AWS WAF to watch for.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_ip_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateIPSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Creates a `RateBasedRule`. The `RateBasedRule` contains a `RateLimit`, which
specifies the maximum number of requests that AWS WAF allows from a specified IP
address in a five-minute period. The `RateBasedRule` also contains the `IPSet`
objects, `ByteMatchSet` objects, and other predicates that identify the requests
that you want to count or block if these requests exceed the `RateLimit`.
If you add more than one predicate to a `RateBasedRule`, a request not only must
exceed the `RateLimit`, but it also must match all the conditions to be counted
or blocked. For example, suppose you add the following to a `RateBasedRule`:
* An `IPSet` that matches the IP address `192.0.2.44/32`
* A `ByteMatchSet` that matches `BadBot` in the `User-Agent` header
Further, you specify a `RateLimit` of 1,000.
You then add the `RateBasedRule` to a `WebACL` and specify that you want to
block requests that meet the conditions in the rule. For a request to be
blocked, it must come from the IP address 192.0.2.44 *and* the `User-Agent`
header in the request must contain the value `BadBot`. Further, requests that
match these two conditions must be received at a rate of more than 1,000
requests every five minutes. If both conditions are met and the rate is
exceeded, AWS WAF blocks the requests. If the rate drops below 1,000 for a
five-minute period, AWS WAF no longer blocks the requests.
As a second example, suppose you want to limit requests to a particular page on
your site. To do this, you could add the following to a `RateBasedRule`:
* A `ByteMatchSet` with `FieldToMatch` of `URI`
* A `PositionalConstraint` of `STARTS_WITH`
* A `TargetString` of `login`
Further, you specify a `RateLimit` of 1,000.
By adding this `RateBasedRule` to a `WebACL`, you could limit requests to your
login page without affecting the rest of your site.
To create and configure a `RateBasedRule`, perform the following steps:
1. Create and update the predicates that you want to include in the
rule. For more information, see `CreateByteMatchSet`, `CreateIPSet`, and
`CreateSqlInjectionMatchSet`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateRule` request.
3. Submit a `CreateRateBasedRule` request.
4. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRule` request.
5. Submit an `UpdateRateBasedRule` request to specify the predicates
that you want to include in the rule.
6. Create and update a `WebACL` that contains the `RateBasedRule`.
For more information, see `CreateWebACL`.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_rate_based_rule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateRateBasedRule", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Creates a `RegexMatchSet`. You then use `UpdateRegexMatchSet` to identify the
part of a web request that you want AWS WAF to inspect, such as the values of
the `User-Agent` header or the query string. For example, you can create a
`RegexMatchSet` that contains a `RegexMatchTuple` that looks for any requests
with `User-Agent` headers that match a `RegexPatternSet` with pattern
`B[a@]dB[o0]t`. You can then configure AWS WAF to reject those requests. To create and configure a `RegexMatchSet`, perform the following steps:
1. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateRegexMatchSet` request.
2. Submit a `CreateRegexMatchSet` request.
3. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRegexMatchSet` request.
4. Submit an `UpdateRegexMatchSet` request to specify the part of
the request that you want AWS WAF to inspect (for example, the header or the
URI) and the value, using a `RegexPatternSet`, that you want AWS WAF to watch
for.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_regex_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateRegexMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Creates a `RegexPatternSet`. You then use `UpdateRegexPatternSet` to specify the
regular expression (regex) pattern that you want AWS WAF to search for, such as
`B[a@]dB[o0]t`. You can then configure AWS WAF to reject those requests. To create and configure a `RegexPatternSet`, perform the following steps:
1. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateRegexPatternSet` request.
2. Submit a `CreateRegexPatternSet` request.
3. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRegexPatternSet` request.
4. Submit an `UpdateRegexPatternSet` request to specify the string
that you want AWS WAF to watch for.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_regex_pattern_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateRegexPatternSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Creates a `Rule`, which contains the `IPSet` objects, `ByteMatchSet` objects,
and other predicates that identify the requests that you want to block. If you
add more than one predicate to a `Rule`, a request must match all of the
specifications to be allowed or blocked. For example, suppose that you add the
following to a `Rule`:
* An `IPSet` that matches the IP address `192.0.2.44/32`
* A `ByteMatchSet` that matches `BadBot` in the `User-Agent` header
You then add the `Rule` to a `WebACL` and specify that you want to blocks
requests that satisfy the `Rule`. For a request to be blocked, it must come from
the IP address 192.0.2.44 *and* the `User-Agent` header in the request must
contain the value `BadBot`.
To create and configure a `Rule`, perform the following steps:
1. Create and update the predicates that you want to include in the
`Rule`. For more information, see `CreateByteMatchSet`, `CreateIPSet`, and
`CreateSqlInjectionMatchSet`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateRule` request.
3. Submit a `CreateRule` request.
4. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRule` request.
5. Submit an `UpdateRule` request to specify the predicates that you
want to include in the `Rule`.
6. Create and update a `WebACL` that contains the `Rule`. For more
information, see `CreateWebACL`.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_rule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateRule", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Creates a `RuleGroup`. A rule group is a collection of predefined rules that you
add to a web ACL. You use `UpdateRuleGroup` to add rules to the rule group.
Rule groups are subject to the following limits:
* Three rule groups per account. You can request an increase to this
limit by contacting customer support.
* One rule group per web ACL.
* Ten rules per rule group.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_rule_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateRuleGroup", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Creates a `SizeConstraintSet`. You then use `UpdateSizeConstraintSet` to
identify the part of a web request that you want AWS WAF to check for length,
such as the length of the `User-Agent` header or the length of the query string.
For example, you can create a `SizeConstraintSet` that matches any requests that
have a query string that is longer than 100 bytes. You can then configure AWS
WAF to reject those requests.
To create and configure a `SizeConstraintSet`, perform the following steps:
1. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateSizeConstraintSet` request.
2. Submit a `CreateSizeConstraintSet` request.
3. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateSizeConstraintSet` request.
4. Submit an `UpdateSizeConstraintSet` request to specify the part
of the request that you want AWS WAF to inspect (for example, the header or the
URI) and the value that you want AWS WAF to watch for.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_size_constraint_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateSizeConstraintSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Creates a `SqlInjectionMatchSet`, which you use to allow, block, or count
requests that contain snippets of SQL code in a specified part of web requests.
AWS WAF searches for character sequences that are likely to be malicious
strings.
To create and configure a `SqlInjectionMatchSet`, perform the following steps:
1. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateSqlInjectionMatchSet` request.
2. Submit a `CreateSqlInjectionMatchSet` request.
3. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateSqlInjectionMatchSet` request.
4. Submit an `UpdateSqlInjectionMatchSet` request to specify the
parts of web requests in which you want to allow, block, or count malicious SQL
code.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_sql_injection_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateSqlInjectionMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Creates a `WebACL`, which contains the `Rules` that identify the CloudFront web
requests that you want to allow, block, or count. AWS WAF evaluates `Rules` in
order based on the value of `Priority` for each `Rule`.
You also specify a default action, either `ALLOW` or `BLOCK`. If a web request
doesn't match any of the `Rules` in a `WebACL`, AWS WAF responds to the request
with the default action.
To create and configure a `WebACL`, perform the following steps:
1. Create and update the `ByteMatchSet` objects and other predicates
that you want to include in `Rules`. For more information, see
`CreateByteMatchSet`, `UpdateByteMatchSet`, `CreateIPSet`, `UpdateIPSet`,
`CreateSqlInjectionMatchSet`, and `UpdateSqlInjectionMatchSet`.
2. Create and update the `Rules` that you want to include in the
`WebACL`. For more information, see `CreateRule` and `UpdateRule`.
3. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateWebACL` request.
4. Submit a `CreateWebACL` request.
5. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateWebACL` request.
6. Submit an `UpdateWebACL` request to specify the `Rules` that you
want to include in the `WebACL`, to specify the default action, and to associate
the `WebACL` with a CloudFront distribution.
For more information about how to use the AWS WAF API, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_web_acl(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateWebACL", input, options)
end
@doc """
Creates an AWS CloudFormation WAFV2 template for the specified web ACL in the
specified Amazon S3 bucket.
Then, in CloudFormation, you create a stack from the template, to create the web
ACL and its resources in AWS WAFV2. Use this to migrate your AWS WAF Classic web
ACL to the latest version of AWS WAF.
This is part of a larger migration procedure for web ACLs from AWS WAF Classic
to the latest version of AWS WAF. For the full procedure, including caveats and
manual steps to complete the migration and switch over to the new web ACL, see
[Migrating your AWS WAF Classic resources to AWS WAF](https://docs.aws.amazon.com/waf/latest/developerguide/waf-migrating-from-classic.html)
in the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
"""
def create_web_acl_migration_stack(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateWebACLMigrationStack", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Creates an `XssMatchSet`, which you use to allow, block, or count requests that
contain cross-site scripting attacks in the specified part of web requests. AWS
WAF searches for character sequences that are likely to be malicious strings.
To create and configure an `XssMatchSet`, perform the following steps:
1. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateXssMatchSet` request.
2. Submit a `CreateXssMatchSet` request.
3. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateXssMatchSet` request.
4. Submit an `UpdateXssMatchSet` request to specify the parts of web
requests in which you want to allow, block, or count cross-site scripting
attacks.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_xss_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateXssMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Permanently deletes a `ByteMatchSet`. You can't delete a `ByteMatchSet` if it's
still used in any `Rules` or if it still includes any `ByteMatchTuple` objects
(any filters).
If you just want to remove a `ByteMatchSet` from a `Rule`, use `UpdateRule`.
To permanently delete a `ByteMatchSet`, perform the following steps:
1. Update the `ByteMatchSet` to remove filters, if any. For more
information, see `UpdateByteMatchSet`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteByteMatchSet` request.
3. Submit a `DeleteByteMatchSet` request.
"""
def delete_byte_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteByteMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Permanently deletes a `GeoMatchSet`. You can't delete a `GeoMatchSet` if it's
still used in any `Rules` or if it still includes any countries.
If you just want to remove a `GeoMatchSet` from a `Rule`, use `UpdateRule`.
To permanently delete a `GeoMatchSet` from AWS WAF, perform the following steps:
1. Update the `GeoMatchSet` to remove any countries. For more
information, see `UpdateGeoMatchSet`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteGeoMatchSet` request.
3. Submit a `DeleteGeoMatchSet` request.
"""
def delete_geo_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteGeoMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Permanently deletes an `IPSet`. You can't delete an `IPSet` if it's still used
in any `Rules` or if it still includes any IP addresses.
If you just want to remove an `IPSet` from a `Rule`, use `UpdateRule`.
To permanently delete an `IPSet` from AWS WAF, perform the following steps:
1. Update the `IPSet` to remove IP address ranges, if any. For more
information, see `UpdateIPSet`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteIPSet` request.
3. Submit a `DeleteIPSet` request.
"""
def delete_ip_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteIPSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Permanently deletes the `LoggingConfiguration` from the specified web ACL.
"""
def delete_logging_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteLoggingConfiguration", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Permanently deletes an IAM policy from the specified RuleGroup.
The user making the request must be the owner of the RuleGroup.
"""
def delete_permission_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeletePermissionPolicy", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Permanently deletes a `RateBasedRule`. You can't delete a rule if it's still
used in any `WebACL` objects or if it still includes any predicates, such as
`ByteMatchSet` objects.
If you just want to remove a rule from a `WebACL`, use `UpdateWebACL`.
To permanently delete a `RateBasedRule` from AWS WAF, perform the following
steps:
1. Update the `RateBasedRule` to remove predicates, if any. For more
information, see `UpdateRateBasedRule`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteRateBasedRule` request.
3. Submit a `DeleteRateBasedRule` request.
"""
def delete_rate_based_rule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteRateBasedRule", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Permanently deletes a `RegexMatchSet`. You can't delete a `RegexMatchSet` if
it's still used in any `Rules` or if it still includes any `RegexMatchTuples`
objects (any filters).
If you just want to remove a `RegexMatchSet` from a `Rule`, use `UpdateRule`.
To permanently delete a `RegexMatchSet`, perform the following steps:
1. Update the `RegexMatchSet` to remove filters, if any. For more
information, see `UpdateRegexMatchSet`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteRegexMatchSet` request.
3. Submit a `DeleteRegexMatchSet` request.
"""
def delete_regex_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteRegexMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Permanently deletes a `RegexPatternSet`. You can't delete a `RegexPatternSet` if
it's still used in any `RegexMatchSet` or if the `RegexPatternSet` is not empty.
"""
def delete_regex_pattern_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteRegexPatternSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Permanently deletes a `Rule`. You can't delete a `Rule` if it's still used in
any `WebACL` objects or if it still includes any predicates, such as
`ByteMatchSet` objects.
If you just want to remove a `Rule` from a `WebACL`, use `UpdateWebACL`.
To permanently delete a `Rule` from AWS WAF, perform the following steps:
1. Update the `Rule` to remove predicates, if any. For more
information, see `UpdateRule`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteRule` request.
3. Submit a `DeleteRule` request.
"""
def delete_rule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteRule", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Permanently deletes a `RuleGroup`. You can't delete a `RuleGroup` if it's still
used in any `WebACL` objects or if it still includes any rules.
If you just want to remove a `RuleGroup` from a `WebACL`, use `UpdateWebACL`.
To permanently delete a `RuleGroup` from AWS WAF, perform the following steps:
1. Update the `RuleGroup` to remove rules, if any. For more
information, see `UpdateRuleGroup`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteRuleGroup` request.
3. Submit a `DeleteRuleGroup` request.
"""
def delete_rule_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteRuleGroup", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Permanently deletes a `SizeConstraintSet`. You can't delete a
`SizeConstraintSet` if it's still used in any `Rules` or if it still includes
any `SizeConstraint` objects (any filters).
If you just want to remove a `SizeConstraintSet` from a `Rule`, use
`UpdateRule`.
To permanently delete a `SizeConstraintSet`, perform the following steps:
1. Update the `SizeConstraintSet` to remove filters, if any. For
more information, see `UpdateSizeConstraintSet`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteSizeConstraintSet` request.
3. Submit a `DeleteSizeConstraintSet` request.
"""
def delete_size_constraint_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteSizeConstraintSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Permanently deletes a `SqlInjectionMatchSet`. You can't delete a
`SqlInjectionMatchSet` if it's still used in any `Rules` or if it still contains
any `SqlInjectionMatchTuple` objects.
If you just want to remove a `SqlInjectionMatchSet` from a `Rule`, use
`UpdateRule`.
To permanently delete a `SqlInjectionMatchSet` from AWS WAF, perform the
following steps:
1. Update the `SqlInjectionMatchSet` to remove filters, if any. For
more information, see `UpdateSqlInjectionMatchSet`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteSqlInjectionMatchSet` request.
3. Submit a `DeleteSqlInjectionMatchSet` request.
"""
def delete_sql_injection_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteSqlInjectionMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Permanently deletes a `WebACL`. You can't delete a `WebACL` if it still contains
any `Rules`.
To delete a `WebACL`, perform the following steps:
1. Update the `WebACL` to remove `Rules`, if any. For more
information, see `UpdateWebACL`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteWebACL` request.
3. Submit a `DeleteWebACL` request.
"""
def delete_web_acl(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteWebACL", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Permanently deletes an `XssMatchSet`. You can't delete an `XssMatchSet` if it's
still used in any `Rules` or if it still contains any `XssMatchTuple` objects.
If you just want to remove an `XssMatchSet` from a `Rule`, use `UpdateRule`.
To permanently delete an `XssMatchSet` from AWS WAF, perform the following
steps:
1. Update the `XssMatchSet` to remove filters, if any. For more
information, see `UpdateXssMatchSet`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteXssMatchSet` request.
3. Submit a `DeleteXssMatchSet` request.
"""
def delete_xss_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteXssMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic Regional** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Removes a web ACL from the specified resource, either an application load
balancer or Amazon API Gateway stage.
"""
def disassociate_web_acl(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisassociateWebACL", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the `ByteMatchSet` specified by `ByteMatchSetId`.
"""
def get_byte_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetByteMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
When you want to create, update, or delete AWS WAF objects, get a change token
and include the change token in the create, update, or delete request. Change
tokens ensure that your application doesn't submit conflicting requests to AWS
WAF.
Each create, update, or delete request must use a unique change token. If your
application submits a `GetChangeToken` request and then submits a second
`GetChangeToken` request before submitting a create, update, or delete request,
the second `GetChangeToken` request returns the same value as the first
`GetChangeToken` request.
When you use a change token in a create, update, or delete request, the status
of the change token changes to `PENDING`, which indicates that AWS WAF is
propagating the change to all AWS WAF servers. Use `GetChangeTokenStatus` to
determine the status of your change token.
"""
def get_change_token(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetChangeToken", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the status of a `ChangeToken` that you got by calling `GetChangeToken`.
`ChangeTokenStatus` is one of the following values:
* `PROVISIONED`: You requested the change token by calling
`GetChangeToken`, but you haven't used it yet in a call to create, update, or
delete an AWS WAF object.
* `PENDING`: AWS WAF is propagating the create, update, or delete
request to all AWS WAF servers.
* `INSYNC`: Propagation is complete.
"""
def get_change_token_status(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetChangeTokenStatus", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the `GeoMatchSet` that is specified by `GeoMatchSetId`.
"""
def get_geo_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetGeoMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the `IPSet` that is specified by `IPSetId`.
"""
def get_ip_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetIPSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the `LoggingConfiguration` for the specified web ACL.
"""
def get_logging_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetLoggingConfiguration", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the IAM policy attached to the RuleGroup.
"""
def get_permission_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetPermissionPolicy", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the `RateBasedRule` that is specified by the `RuleId` that you included
in the `GetRateBasedRule` request.
"""
def get_rate_based_rule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRateBasedRule", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of IP addresses currently being blocked by the `RateBasedRule`
that is specified by the `RuleId`. The maximum number of managed keys that will
be blocked is 10,000. If more than 10,000 addresses exceed the rate limit, the
10,000 addresses with the highest rates will be blocked.
"""
def get_rate_based_rule_managed_keys(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRateBasedRuleManagedKeys", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the `RegexMatchSet` specified by `RegexMatchSetId`.
"""
def get_regex_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRegexMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the `RegexPatternSet` specified by `RegexPatternSetId`.
"""
def get_regex_pattern_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRegexPatternSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the `Rule` that is specified by the `RuleId` that you included in the
`GetRule` request.
"""
def get_rule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRule", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the `RuleGroup` that is specified by the `RuleGroupId` that you included
in the `GetRuleGroup` request.
To view the rules in a rule group, use `ListActivatedRulesInRuleGroup`.
"""
def get_rule_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRuleGroup", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Gets detailed information about a specified number of requests--a sample--that
AWS WAF randomly selects from among the first 5,000 requests that your AWS
resource received during a time range that you choose. You can specify a sample
size of up to 500 requests, and you can specify any time range in the previous
three hours.
`GetSampledRequests` returns a time range, which is usually the time range that
you specified. However, if your resource (such as a CloudFront distribution)
received 5,000 requests before the specified time range elapsed,
`GetSampledRequests` returns an updated time range. This new time range
indicates the actual period during which AWS WAF selected the requests in the
sample.
"""
def get_sampled_requests(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetSampledRequests", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the `SizeConstraintSet` specified by `SizeConstraintSetId`.
"""
def get_size_constraint_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetSizeConstraintSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the `SqlInjectionMatchSet` that is specified by
`SqlInjectionMatchSetId`.
"""
def get_sql_injection_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetSqlInjectionMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the `WebACL` that is specified by `WebACLId`.
"""
def get_web_acl(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetWebACL", input, options)
end
@doc """
This is **AWS WAF Classic Regional** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the web ACL for the specified resource, either an application load
balancer or Amazon API Gateway stage.
"""
def get_web_acl_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetWebACLForResource", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns the `XssMatchSet` that is specified by `XssMatchSetId`.
"""
def get_xss_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetXssMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of `ActivatedRule` objects.
"""
def list_activated_rules_in_rule_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListActivatedRulesInRuleGroup", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of `ByteMatchSetSummary` objects.
"""
def list_byte_match_sets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListByteMatchSets", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of `GeoMatchSetSummary` objects in the response.
"""
def list_geo_match_sets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListGeoMatchSets", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of `IPSetSummary` objects in the response.
"""
def list_ip_sets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListIPSets", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of `LoggingConfiguration` objects.
"""
def list_logging_configurations(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListLoggingConfigurations", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of `RuleSummary` objects.
"""
def list_rate_based_rules(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListRateBasedRules", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of `RegexMatchSetSummary` objects.
"""
def list_regex_match_sets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListRegexMatchSets", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of `RegexPatternSetSummary` objects.
"""
def list_regex_pattern_sets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListRegexPatternSets", input, options)
end
@doc """
This is **AWS WAF Classic Regional** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of resources associated with the specified web ACL.
"""
def list_resources_for_web_acl(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListResourcesForWebACL", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of `RuleGroup` objects.
"""
def list_rule_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListRuleGroups", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of `RuleSummary` objects.
"""
def list_rules(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListRules", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of `SizeConstraintSetSummary` objects.
"""
def list_size_constraint_sets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListSizeConstraintSets", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of `SqlInjectionMatchSet` objects.
"""
def list_sql_injection_match_sets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListSqlInjectionMatchSets", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of `RuleGroup` objects that you are subscribed to.
"""
def list_subscribed_rule_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListSubscribedRuleGroups", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Retrieves the tags associated with the specified AWS resource. Tags are
key:value pairs that you can use to categorize and manage your resources, for
purposes like billing. For example, you might set the tag key to "customer" and
the value to the customer name or ID. You can specify one or more tags to add to
each AWS resource, up to 50 tags for a resource.
Tagging is only available through the API, SDKs, and CLI. You can't manage or
view tags through the AWS WAF Classic console. You can tag the AWS resources
that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of `WebACLSummary` objects in the response.
"""
def list_web_acls(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListWebACLs", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Returns an array of `XssMatchSet` objects.
"""
def list_xss_match_sets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListXssMatchSets", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Associates a `LoggingConfiguration` with a specified web ACL.
You can access information about all traffic that AWS WAF inspects using the
following steps:
1. Create an Amazon Kinesis Data Firehose.
Create the data firehose with a PUT source and in the region that you are
operating. However, if you are capturing logs for Amazon CloudFront, always
create the firehose in US East (N. Virginia).
Do not create the data firehose using a `Kinesis stream` as your source.
2. Associate that firehose to your web ACL using a
`PutLoggingConfiguration` request.
When you successfully enable logging using a `PutLoggingConfiguration` request,
AWS WAF will create a service linked role with the necessary permissions to
write logs to the Amazon Kinesis Data Firehose. For more information, see
[Logging Web ACL Traffic Information](https://docs.aws.amazon.com/waf/latest/developerguide/logging.html)
in the *AWS WAF Developer Guide*.
"""
def put_logging_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutLoggingConfiguration", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Attaches an IAM policy to the specified resource. The only supported use for
this action is to share a RuleGroup across accounts.
The `PutPermissionPolicy` is subject to the following restrictions:
* You can attach only one policy with each `PutPermissionPolicy`
request.
* The policy must include an `Effect`, `Action` and `Principal`.
* `Effect` must specify `Allow`.
* The `Action` in the policy must be `waf:UpdateWebACL`,
`waf-regional:UpdateWebACL`, `waf:GetRuleGroup` and `waf-regional:GetRuleGroup`
. Any extra or wildcard actions in the policy will be rejected.
* The policy cannot include a `Resource` parameter.
* The ARN in the request must be a valid WAF RuleGroup ARN and the
RuleGroup must exist in the same region.
* The user making the request must be the owner of the RuleGroup.
* Your policy must be composed using IAM Policy version 2012-10-17.
For more information, see [IAM Policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html).
An example of a valid policy parameter is shown in the Examples section below.
"""
def put_permission_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutPermissionPolicy", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Associates tags with the specified AWS resource. Tags are key:value pairs that
you can use to categorize and manage your resources, for purposes like billing.
For example, you might set the tag key to "customer" and the value to the
customer name or ID. You can specify one or more tags to add to each AWS
resource, up to 50 tags for a resource.
Tagging is only available through the API, SDKs, and CLI. You can't manage or
view tags through the AWS WAF Classic console. You can use this action to tag
the AWS resources that you manage through AWS WAF Classic: web ACLs, rule
groups, and rules.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Inserts or deletes `ByteMatchTuple` objects (filters) in a `ByteMatchSet`. For
each `ByteMatchTuple` object, you specify the following values:
* Whether to insert or delete the object from the array. If you want
to change a `ByteMatchSetUpdate` object, you delete the existing object and add
a new one.
* The part of a web request that you want AWS WAF to inspect, such
as a query string or the value of the `User-Agent` header.
* The bytes (typically a string that corresponds with ASCII
characters) that you want AWS WAF to look for. For more information, including
how you specify the values for the AWS WAF API and the AWS CLI or SDKs, see
`TargetString` in the `ByteMatchTuple` data type.
* Where to look, such as at the beginning or the end of a query
string.
* Whether to perform any conversions on the request, such as
converting it to lowercase, before inspecting it for the specified string.
For example, you can add a `ByteMatchSetUpdate` object that matches web requests
in which `User-Agent` headers contain the string `BadBot`. You can then
configure AWS WAF to block those requests.
To create and configure a `ByteMatchSet`, perform the following steps:
1. Create a `ByteMatchSet.` For more information, see
`CreateByteMatchSet`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateByteMatchSet` request.
3. Submit an `UpdateByteMatchSet` request to specify the part of the
request that you want AWS WAF to inspect (for example, the header or the URI)
and the value that you want AWS WAF to watch for.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_byte_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateByteMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Inserts or deletes `GeoMatchConstraint` objects in an `GeoMatchSet`. For each
`GeoMatchConstraint` object, you specify the following values:
* Whether to insert or delete the object from the array. If you want
to change an `GeoMatchConstraint` object, you delete the existing object and add
a new one.
* The `Type`. The only valid value for `Type` is `Country`.
* The `Value`, which is a two character code for the country to add
to the `GeoMatchConstraint` object. Valid codes are listed in
`GeoMatchConstraint$Value`.
To create and configure an `GeoMatchSet`, perform the following steps:
1. Submit a `CreateGeoMatchSet` request.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateGeoMatchSet` request.
3. Submit an `UpdateGeoMatchSet` request to specify the country that
you want AWS WAF to watch for.
When you update an `GeoMatchSet`, you specify the country that you want to add
and/or the country that you want to delete. If you want to change a country, you
delete the existing country and add the new one.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_geo_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateGeoMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Inserts or deletes `IPSetDescriptor` objects in an `IPSet`. For each
`IPSetDescriptor` object, you specify the following values:
* Whether to insert or delete the object from the array. If you want
to change an `IPSetDescriptor` object, you delete the existing object and add a
new one.
* The IP address version, `IPv4` or `IPv6`.
* The IP address in CIDR notation, for example, `192.0.2.0/24` (for
the range of IP addresses from `192.0.2.0` to `192.0.2.255`) or `192.0.2.44/32`
(for the individual IP address `192.0.2.44`).
AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32.
AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128. For
more information about CIDR notation, see the Wikipedia entry [Classless Inter-Domain
Routing](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing).
IPv6 addresses can be represented using any of the following formats:
* fc00:db20:35b:7399::5/128
* fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/128
* fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/128
* fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/128
You use an `IPSet` to specify which web requests you want to allow or block
based on the IP addresses that the requests originated from. For example, if
you're receiving a lot of requests from one or a small number of IP addresses
and you want to block the requests, you can create an `IPSet` that specifies
those IP addresses, and then configure AWS WAF to block the requests.
To create and configure an `IPSet`, perform the following steps:
1. Submit a `CreateIPSet` request.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateIPSet` request.
3. Submit an `UpdateIPSet` request to specify the IP addresses that
you want AWS WAF to watch for.
When you update an `IPSet`, you specify the IP addresses that you want to add
and/or the IP addresses that you want to delete. If you want to change an IP
address, you delete the existing IP address and add the new one.
You can insert a maximum of 1000 addresses in a single request.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_ip_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateIPSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Inserts or deletes `Predicate` objects in a rule and updates the `RateLimit` in
the rule.
Each `Predicate` object identifies a predicate, such as a `ByteMatchSet` or an
`IPSet`, that specifies the web requests that you want to block or count. The
`RateLimit` specifies the number of requests every five minutes that triggers
the rule.
If you add more than one predicate to a `RateBasedRule`, a request must match
all the predicates and exceed the `RateLimit` to be counted or blocked. For
example, suppose you add the following to a `RateBasedRule`:
* An `IPSet` that matches the IP address `192.0.2.44/32`
* A `ByteMatchSet` that matches `BadBot` in the `User-Agent` header
Further, you specify a `RateLimit` of 1,000.
You then add the `RateBasedRule` to a `WebACL` and specify that you want to
block requests that satisfy the rule. For a request to be blocked, it must come
from the IP address 192.0.2.44 *and* the `User-Agent` header in the request must
contain the value `BadBot`. Further, requests that match these two conditions
much be received at a rate of more than 1,000 every five minutes. If the rate
drops below this limit, AWS WAF no longer blocks the requests.
As a second example, suppose you want to limit requests to a particular page on
your site. To do this, you could add the following to a `RateBasedRule`:
* A `ByteMatchSet` with `FieldToMatch` of `URI`
* A `PositionalConstraint` of `STARTS_WITH`
* A `TargetString` of `login`
Further, you specify a `RateLimit` of 1,000.
By adding this `RateBasedRule` to a `WebACL`, you could limit requests to your
login page without affecting the rest of your site.
"""
def update_rate_based_rule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateRateBasedRule", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Inserts or deletes `RegexMatchTuple` objects (filters) in a `RegexMatchSet`. For
each `RegexMatchSetUpdate` object, you specify the following values:
* Whether to insert or delete the object from the array. If you want
to change a `RegexMatchSetUpdate` object, you delete the existing object and add
a new one.
* The part of a web request that you want AWS WAF to inspectupdate,
such as a query string or the value of the `User-Agent` header.
* The identifier of the pattern (a regular expression) that you want
AWS WAF to look for. For more information, see `RegexPatternSet`.
* Whether to perform any conversions on the request, such as
converting it to lowercase, before inspecting it for the specified string.
For example, you can create a `RegexPatternSet` that matches any requests with
`User-Agent` headers that contain the string `B[a@]dB[o0]t`. You can then configure AWS WAF to reject those requests.
To create and configure a `RegexMatchSet`, perform the following steps:
1. Create a `RegexMatchSet.` For more information, see
`CreateRegexMatchSet`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRegexMatchSet` request.
3. Submit an `UpdateRegexMatchSet` request to specify the part of
the request that you want AWS WAF to inspect (for example, the header or the
URI) and the identifier of the `RegexPatternSet` that contain the regular
expression patters you want AWS WAF to watch for.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_regex_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateRegexMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Inserts or deletes `RegexPatternString` objects in a `RegexPatternSet`. For each
`RegexPatternString` object, you specify the following values:
* Whether to insert or delete the `RegexPatternString`.
* The regular expression pattern that you want to insert or delete.
For more information, see `RegexPatternSet`.
For example, you can create a `RegexPatternString` such as `B[a@]dB[o0]t`. AWS WAF will match this `RegexPatternString` to:
* BadBot
* BadB0t
* B@dBot
* B@dB0t
To create and configure a `RegexPatternSet`, perform the following steps:
1. Create a `RegexPatternSet.` For more information, see
`CreateRegexPatternSet`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRegexPatternSet` request.
3. Submit an `UpdateRegexPatternSet` request to specify the regular
expression pattern that you want AWS WAF to watch for.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_regex_pattern_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateRegexPatternSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Inserts or deletes `Predicate` objects in a `Rule`. Each `Predicate` object
identifies a predicate, such as a `ByteMatchSet` or an `IPSet`, that specifies
the web requests that you want to allow, block, or count. If you add more than
one predicate to a `Rule`, a request must match all of the specifications to be
allowed, blocked, or counted. For example, suppose that you add the following to
a `Rule`:
* A `ByteMatchSet` that matches the value `BadBot` in the
`User-Agent` header
* An `IPSet` that matches the IP address `192.0.2.44`
You then add the `Rule` to a `WebACL` and specify that you want to block
requests that satisfy the `Rule`. For a request to be blocked, the `User-Agent`
header in the request must contain the value `BadBot` *and* the request must
originate from the IP address 192.0.2.44.
To create and configure a `Rule`, perform the following steps:
1. Create and update the predicates that you want to include in the
`Rule`.
2. Create the `Rule`. See `CreateRule`.
3. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRule` request.
4. Submit an `UpdateRule` request to add predicates to the `Rule`.
5. Create and update a `WebACL` that contains the `Rule`. See
`CreateWebACL`.
If you want to replace one `ByteMatchSet` or `IPSet` with another, you delete
the existing one and add the new one.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_rule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateRule", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Inserts or deletes `ActivatedRule` objects in a `RuleGroup`.
You can only insert `REGULAR` rules into a rule group.
You can have a maximum of ten rules per rule group.
To create and configure a `RuleGroup`, perform the following steps:
1. Create and update the `Rules` that you want to include in the
`RuleGroup`. See `CreateRule`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRuleGroup` request.
3. Submit an `UpdateRuleGroup` request to add `Rules` to the
`RuleGroup`.
4. Create and update a `WebACL` that contains the `RuleGroup`. See
`CreateWebACL`.
If you want to replace one `Rule` with another, you delete the existing one and
add the new one.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_rule_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateRuleGroup", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Inserts or deletes `SizeConstraint` objects (filters) in a `SizeConstraintSet`.
For each `SizeConstraint` object, you specify the following values:
* Whether to insert or delete the object from the array. If you want
to change a `SizeConstraintSetUpdate` object, you delete the existing object and
add a new one.
* The part of a web request that you want AWS WAF to evaluate, such
as the length of a query string or the length of the `User-Agent` header.
* Whether to perform any transformations on the request, such as
converting it to lowercase, before checking its length. Note that
transformations of the request body are not supported because the AWS resource
forwards only the first `8192` bytes of your request to AWS WAF.
You can only specify a single type of TextTransformation.
* A `ComparisonOperator` used for evaluating the selected part of
the request against the specified `Size`, such as equals, greater than, less
than, and so on.
* The length, in bytes, that you want AWS WAF to watch for in
selected part of the request. The length is computed after applying the
transformation.
For example, you can add a `SizeConstraintSetUpdate` object that matches web
requests in which the length of the `User-Agent` header is greater than 100
bytes. You can then configure AWS WAF to block those requests.
To create and configure a `SizeConstraintSet`, perform the following steps:
1. Create a `SizeConstraintSet.` For more information, see
`CreateSizeConstraintSet`.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateSizeConstraintSet` request.
3. Submit an `UpdateSizeConstraintSet` request to specify the part
of the request that you want AWS WAF to inspect (for example, the header or the
URI) and the value that you want AWS WAF to watch for.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_size_constraint_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateSizeConstraintSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Inserts or deletes `SqlInjectionMatchTuple` objects (filters) in a
`SqlInjectionMatchSet`. For each `SqlInjectionMatchTuple` object, you specify
the following values:
* `Action`: Whether to insert the object into or delete the object
from the array. To change a `SqlInjectionMatchTuple`, you delete the existing
object and add a new one.
* `FieldToMatch`: The part of web requests that you want AWS WAF to
inspect and, if you want AWS WAF to inspect a header or custom query parameter,
the name of the header or parameter.
* `TextTransformation`: Which text transformation, if any, to
perform on the web request before inspecting the request for snippets of
malicious SQL code.
You can only specify a single type of TextTransformation.
You use `SqlInjectionMatchSet` objects to specify which CloudFront requests that
you want to allow, block, or count. For example, if you're receiving requests
that contain snippets of SQL code in the query string and you want to block the
requests, you can create a `SqlInjectionMatchSet` with the applicable settings,
and then configure AWS WAF to block the requests.
To create and configure a `SqlInjectionMatchSet`, perform the following steps:
1. Submit a `CreateSqlInjectionMatchSet` request.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateIPSet` request.
3. Submit an `UpdateSqlInjectionMatchSet` request to specify the
parts of web requests that you want AWS WAF to inspect for snippets of SQL code.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_sql_injection_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateSqlInjectionMatchSet", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Inserts or deletes `ActivatedRule` objects in a `WebACL`. Each `Rule` identifies
web requests that you want to allow, block, or count. When you update a
`WebACL`, you specify the following values:
* A default action for the `WebACL`, either `ALLOW` or `BLOCK`. AWS
WAF performs the default action if a request doesn't match the criteria in any
of the `Rules` in a `WebACL`.
* The `Rules` that you want to add or delete. If you want to replace
one `Rule` with another, you delete the existing `Rule` and add the new one.
* For each `Rule`, whether you want AWS WAF to allow requests, block
requests, or count requests that match the conditions in the `Rule`.
* The order in which you want AWS WAF to evaluate the `Rules` in a
`WebACL`. If you add more than one `Rule` to a `WebACL`, AWS WAF evaluates each
request against the `Rules` in order based on the value of `Priority`. (The
`Rule` that has the lowest value for `Priority` is evaluated first.) When a web
request matches all the predicates (such as `ByteMatchSets` and `IPSets`) in a
`Rule`, AWS WAF immediately takes the corresponding action, allow or block, and
doesn't evaluate the request against the remaining `Rules` in the `WebACL`, if
any.
To create and configure a `WebACL`, perform the following steps:
1. Create and update the predicates that you want to include in
`Rules`. For more information, see `CreateByteMatchSet`, `UpdateByteMatchSet`,
`CreateIPSet`, `UpdateIPSet`, `CreateSqlInjectionMatchSet`, and
`UpdateSqlInjectionMatchSet`.
2. Create and update the `Rules` that you want to include in the
`WebACL`. For more information, see `CreateRule` and `UpdateRule`.
3. Create a `WebACL`. See `CreateWebACL`.
4. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateWebACL` request.
5. Submit an `UpdateWebACL` request to specify the `Rules` that you
want to include in the `WebACL`, to specify the default action, and to associate
the `WebACL` with a CloudFront distribution.
The `ActivatedRule` can be a rule group. If you specify a rule group as your
`ActivatedRule` , you can exclude specific rules from that rule group.
If you already have a rule group associated with a web ACL and want to submit an
`UpdateWebACL` request to exclude certain rules from that rule group, you must
first remove the rule group from the web ACL, the re-insert it again, specifying
the excluded rules. For details, see `ActivatedRule$ExcludedRules` .
Be aware that if you try to add a RATE_BASED rule to a web ACL without setting
the rule type when first creating the rule, the `UpdateWebACL` request will fail
because the request tries to add a REGULAR rule (the default rule type) with the
specified ID, which does not exist.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_web_acl(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateWebACL", input, options)
end
@doc """
This is **AWS WAF Classic** documentation.
For more information, see [AWS WAF Classic](https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
in the developer guide.
**For the latest version of AWS WAF**, use the AWS WAFV2 API and see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
With the latest version, AWS WAF has a single set of endpoints for regional and
global use.
Inserts or deletes `XssMatchTuple` objects (filters) in an `XssMatchSet`. For
each `XssMatchTuple` object, you specify the following values:
* `Action`: Whether to insert the object into or delete the object
from the array. To change an `XssMatchTuple`, you delete the existing object and
add a new one.
* `FieldToMatch`: The part of web requests that you want AWS WAF to
inspect and, if you want AWS WAF to inspect a header or custom query parameter,
the name of the header or parameter.
* `TextTransformation`: Which text transformation, if any, to
perform on the web request before inspecting the request for cross-site
scripting attacks.
You can only specify a single type of TextTransformation.
You use `XssMatchSet` objects to specify which CloudFront requests that you want
to allow, block, or count. For example, if you're receiving requests that
contain cross-site scripting attacks in the request body and you want to block
the requests, you can create an `XssMatchSet` with the applicable settings, and
then configure AWS WAF to block the requests.
To create and configure an `XssMatchSet`, perform the following steps:
1. Submit a `CreateXssMatchSet` request.
2. Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateIPSet` request.
3. Submit an `UpdateXssMatchSet` request to specify the parts of web
requests that you want AWS WAF to inspect for cross-site scripting attacks.
For more information about how to use the AWS WAF API to allow or block HTTP
requests, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_xss_match_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateXssMatchSet", input, options)
end
end
|
lib/aws/generated/waf_regional.ex
| 0.827724 | 0.615492 |
waf_regional.ex
|
starcoder
|
defmodule Aino.Middleware do
@moduledoc """
Middleware functions for processing a request into a response
Included in Aino are common functions that deal with requests, such
as parsing the POST body for form data or parsing query/path params.
"""
require Logger
alias Aino.Token
@doc """
Common middleware that process low level request data
Processes the request:
- method
- path
- headers
- query parameters
- parses response body
- parses cookies
"""
def common() do
[
&method/1,
&path/1,
&headers/1,
&query_params/1,
&request_body/1,
&adjust_method/1,
&cookies/1
]
end
@doc """
Processes request headers
Downcases all of the headers and stores in the key `:headers`
"""
def headers(%{request: request} = token) do
headers =
Enum.map(request.headers, fn {header, value} ->
{String.downcase(header), value}
end)
Map.put(token, :headers, headers)
end
@doc """
Processes the `Cookie` request header
Defaults to an empty map if no `Cookie` header is present.
Stores cookies as a map in the key `:cookies`
"""
def cookies(token) do
case Token.request_header(token, "cookie") do
[cookies] ->
cookies =
cookies
|> String.split(";")
|> Enum.map(fn cookie ->
[variable | cookie] =
cookie
|> String.split("=")
|> Enum.map(&String.trim/1)
{variable, Enum.join(cookie, "=")}
end)
|> Enum.into(%{})
Map.put(token, :cookies, cookies)
[] ->
Map.put(token, :cookies, %{})
end
end
@doc """
Stores the request method on the token
Downcases and converts to an atom on the key `:method`
iex> request = %Aino.Request{method: :GET}
iex> token = %{request: request}
iex> token = Middleware.method(token)
iex> token.method
:get
"""
def method(%{request: request} = token) do
method =
request.method
|> to_string()
|> String.downcase()
|> String.to_atom()
Map.put(token, :method, method)
end
@doc """
Stores the request path on the token on the key `:path`
iex> request = %Aino.Request{path: ["orders", "10"]}
iex> token = %{request: request}
iex> token = Middleware.path(token)
iex> token.path
["orders", "10"]
"""
def path(%{request: request} = token) do
Map.put(token, :path, request.path)
end
@doc """
Stores query parameters on the token
Converts map and stores on the key `:query_params`
iex> request = %Aino.Request{args: [{"key", "value"}]}
iex> token = %{request: request}
iex> token = Middleware.query_params(token)
iex> token.query_params
%{"key" => "value"}
"""
def query_params(%{request: request} = token) do
params = Enum.into(request.args, %{})
Map.put(token, :query_params, params)
end
@doc """
Processes the request body
Only if the request should have a body (e.g. POST requests)
Handles the following content types:
- `application/x-www-form-urlencoded`
- `application/json`
"""
def request_body(token) do
case token.method do
:post ->
[content_type | _] = Token.request_header(token, "content-type")
case content_type do
"application/x-www-form-urlencoded" ->
parse_form_urlencoded(token)
"application/json" ->
parse_json(token)
end
_ ->
token
end
end
defp parse_form_urlencoded(token) do
parsed_body =
token.request.body
|> String.split("&")
|> Enum.map(fn token ->
case String.split(token, "=") do
[token] -> {token, true}
[name, value] -> {name, URI.decode_www_form(value)}
end
end)
|> Enum.into(%{})
Map.put(token, :parsed_body, parsed_body)
end
defp parse_json(token) do
case Jason.decode(token.request.body) do
{:ok, json} ->
Map.put(token, :parsed_body, json)
:error ->
token
end
end
@doc """
Adjust the request's method based on a special post body parameter
Since browsers cannot perform DELETE/PUT/PATCH requests, allow overriding the method
based on the `_method` parameter.
POST body data *must* be parsed before being able to adjust the method.
iex> token = %{method: :post, parsed_body: %{"_method" => "delete"}}
iex> token = Middleware.adjust_method(token)
iex> token.method
:delete
iex> token = %{method: :post, parsed_body: %{"_method" => "patch"}}
iex> token = Middleware.adjust_method(token)
iex> token.method
:patch
iex> token = %{method: :post, parsed_body: %{"_method" => "put"}}
iex> token = Middleware.adjust_method(token)
iex> token.method
:put
Ignored adjustments
iex> token = %{method: :post, parsed_body: %{"_method" => "new"}}
iex> token = Middleware.adjust_method(token)
iex> token.method
:post
iex> token = %{method: :get}
iex> token = Middleware.adjust_method(token)
iex> token.method
:get
"""
def adjust_method(%{method: :post} = token) do
case token.parsed_body["_method"] do
"delete" ->
Map.put(token, :method, :delete)
"patch" ->
Map.put(token, :method, :patch)
"put" ->
Map.put(token, :method, :put)
_ ->
token
end
end
def adjust_method(token), do: token
@doc """
Merge params into a single map
Merges in the following order:
- Path params
- Query params
- POST body
"""
def params(token) do
param_providers = [
token[:path_params],
token[:query_params],
token[:parsed_body]
]
params =
Enum.reduce(param_providers, %{}, fn provider, params ->
case is_map(provider) do
true ->
provider = stringify_keys(provider)
Map.merge(params, provider)
false ->
params
end
end)
Map.put(token, :params, params)
end
defp stringify_keys(map) do
Enum.into(map, %{}, fn {key, value} ->
{to_string(key), value}
end)
end
@doc """
Serve static assets
Loads static files out the `priv/static` folder for your OTP app. Looks for
the path to begin with `/assets` and everything afterwards is used as a file.
If the file exists, it is returned with a `200` status code.
Example: `/assets/js/app.js` will look for a file in `priv/static/js/app.js`
"""
def assets(token) do
case token.path do
["assets" | path] ->
path = Path.join(:code.priv_dir(token.otp_app), Enum.join(["static" | path], "/"))
case File.exists?(path) do
true ->
data = File.read!(path)
method = String.upcase(to_string(token.method))
path = "/" <> Enum.join(token.path, "/")
Logger.info("#{method} #{path}")
token
|> Map.put(:halt, true)
|> Token.response_status(200)
|> Token.response_header("Cache-Control", asset_cache_control(token))
|> Token.response_body(data)
false ->
token
|> Map.put(:halt, true)
|> Token.response_status(404)
|> Token.response_header("Content-Type", "text/plain")
|> Token.response_body("Not found")
end
_ ->
token
end
end
defp asset_cache_control(token) do
case token.environment do
"production" ->
"public, max-age=604800"
"development" ->
"no-cache"
end
end
def logging(token) do
method = String.upcase(to_string(token.method))
path = "/" <> Enum.join(token.path, "/")
case Map.keys(token.params) == [] do
true ->
Logger.info("#{method} #{path}")
false ->
Logger.info("#{method} #{path}\nParameters: #{inspect(token.params)}")
end
token
end
end
defmodule Aino.Middleware.Development do
@moduledoc """
Development only middleware
These should *not* be used in production.
"""
require Logger
@doc """
Recompiles the application
"""
def recompile(%{halt: true} = token), do: token
def recompile(token) do
IEx.Helpers.recompile()
token
end
@doc """
Debug log a key on the token
"""
def inspect(token, key) do
Logger.debug(inspect(token[key]))
token
end
end
|
lib/aino/middleware.ex
| 0.806319 | 0.483526 |
middleware.ex
|
starcoder
|
defmodule ExTwitter.Parser do
@moduledoc """
Provides parser logics for API results.
"""
alias ExTwitter.Model
@doc """
Parse tweet record from the API response JSON.
"""
@spec parse_tweet(map | nil) :: Model.Tweet.t() | nil
def parse_tweet(nil), do: nil
def parse_tweet(object) do
tweet = struct(Model.Tweet, object)
user = parse_user(tweet.user)
coordinates = parse_coordinates(tweet.coordinates)
place = parse_place(tweet.place)
entities = parse_entities(tweet.entities)
ex_entities = parse_extended_entities(tweet.extended_entities)
rules = parse_rules(tweet.matching_rules)
quoted_status = parse_tweet(tweet.quoted_status)
retweeted_status = parse_tweet(tweet.retweeted_status)
%{
tweet | user: user, coordinates: coordinates, place: place,
entities: entities, extended_entities: ex_entities,
matching_rules: rules, quoted_status: quoted_status,
retweeted_status: retweeted_status, raw_data: object
}
end
@doc """
Parse direct message record from the API response JSON.
"""
def parse_direct_message(object) do
direct_message = struct(Model.DirectMessage, object)
recipient = parse_user(direct_message.recipient)
sender = parse_user(direct_message.sender)
%{direct_message | recipient: recipient, sender: sender}
end
@spec parse_upload(map) :: Model.Upload.t()
def parse_upload(object) do
Model.Upload |> struct(object) |> Map.put(:raw_data, object)
end
@doc """
Parse user record from the API response JSON.
"""
@spec parse_user(map) :: Model.User.t()
def parse_user(object) do
user = struct(Model.User, object)
derived = if user.derived, do: parse_profile_geo(user.derived.locations), else: nil
%{user | derived: derived, raw_data: object}
end
@doc """
Parse profile geo record from the API response JSON.
"""
@spec parse_profile_geo(map | [map] | nil) :: Model.ProfileGeo.t() | [Model.ProfileGeo.t()] | nil
def parse_profile_geo(nil), do: nil
def parse_profile_geo(objects) when is_list(objects) do
Enum.map(objects, &parse_profile_geo/1)
end
def parse_profile_geo(object) do
profile_geo = struct(Model.ProfileGeo, object)
geo = parse_geo(profile_geo.geo)
%{profile_geo | geo: geo, raw_data: object}
end
@doc """
Parse trend record from the API response JSON.
"""
@spec parse_trend(map) :: Model.Trend.t()
def parse_trend(object) do
trend = struct(Model.Trend, object)
%{trend | query: (trend.query |> URI.decode), raw_data: object}
end
@doc """
Parse list record from the API response JSON.
"""
@spec parse_list(map) :: Model.List.t()
def parse_list(object) do
list = struct(Model.List, object)
user = parse_user(list.user)
%{list | user: user, raw_data: object}
end
@doc """
Parse place record from the API response JSON.
"""
@spec parse_place(map | nil) :: Model.Place.t() | nil
def parse_place(nil), do: nil
def parse_place(object) do
place = struct(Model.Place, object)
bounding_box = parse_bounding_box(place.bounding_box)
con = Enum.map((place.contained_within || []), &parse_place/1)
%{place | bounding_box: bounding_box, contained_within: con, raw_data: object}
end
@doc """
Parse bounding box record from the API response JSON.
"""
@spec parse_bounding_box(map | nil) :: Model.BoundingBox.t() | nil
def parse_bounding_box(nil), do: nil
def parse_bounding_box(object) do
Model.BoundingBox |> struct(object) |> Map.put(:raw_data, object)
end
@doc """
Parse coordinates record from the API response JSON.
"""
@spec parse_coordinates(map | nil) :: Model.Coordinates.t() | nil
def parse_coordinates(nil), do: nil
def parse_coordinates(object) do
Model.Coordinates |> struct(object) |> Map.put(:raw_data, object)
end
@doc """
Parse geo record from the API response JSON.
"""
@spec parse_geo(map | nil) :: Model.Geo.t() | nil
def parse_geo(nil), do: nil
def parse_geo(object) do
Model.Geo |> struct(object) |> Map.put(:raw_data, object)
end
@spec parse_rules(map | [map] | nil) :: Model.Role.t() | [Model.Role.t()]
def parse_rules(nil), do: []
def parse_rules(objects) when is_list(objects) do
Enum.map(objects, &parse_rules/1)
end
def parse_rules(object) do
Model.Rule |> struct(object) |> Map.put(:raw_data, object)
end
@spec parse_entities(map) :: Model.Entities.t()
def parse_entities(object) do
%Model.Entities{
hashtags: parse_hashtags(object[:hashtags]),
media: parse_media(object[:media]),
symbols: parse_symbols(object[:symbols]),
urls: parse_urls(object[:urls]),
user_mentions: parse_user_mentions(object[:user_mentions]),
polls: parse_polls(object[:polls]),
raw_data: object
}
end
@spec parse_extended_entities(map | nil) :: Model.ExtendedEntities.t() | nil
def parse_extended_entities(nil), do: nil
def parse_extended_entities(object) do
%Model.ExtendedEntities{
hashtags: parse_hashtags(object[:hashtags]),
media: parse_media(object[:media]),
symbols: parse_symbols(object[:symbols]),
urls: parse_urls(object[:urls]),
user_mentions: parse_user_mentions(object[:user_mentions]),
polls: parse_polls(object[:polls]),
raw_data: object
}
end
@doc """
Parse hashtags record from the API response JSON.
"""
@spec parse_hashtags(map | [map] | nil) :: Model.Hashtag.t() | [Model.Hashtag.t()]
def parse_hashtags(nil), do: []
def parse_hashtags(objects) when is_list(objects) do
Enum.map(objects, &parse_hashtags/1)
end
def parse_hashtags(object) do
Model.Hashtag |> struct(object) |> Map.put(:raw_data, object)
end
@doc """
Parse media record from the API response JSON.
"""
@spec parse_media(map | [map] | nil) :: Model.Media.t() | [Model.Media.t()]
def parse_media(nil), do: []
def parse_media(objects) when is_list(objects) do
Enum.map(objects, &parse_media/1)
end
def parse_media(object) do
media = struct(Model.Media, object)
sizes = media.sizes |> Stream.map(fn {key, val} -> {key, parse_size(val)} end)
|> Enum.into(%{})
%{media | sizes: sizes, raw_data: object}
end
@doc """
Parse size record from the API response JSON.
"""
@spec parse_size(map) :: Model.Size.t()
def parse_size(object) do
Model.Size |> struct(object) |> Map.put(:raw_data, object)
end
@doc """
Parse symbols record from the API response JSON.
"""
@spec parse_symbols(map | [map] | nil) :: Model.Symbol.t() | [Model.Symbol.t()]
def parse_symbols(nil), do: []
def parse_symbols(objects) when is_list(objects) do
Enum.map(objects, &parse_symbols/1)
end
def parse_symbols(object) do
Model.Symbol |> struct(object) |> Map.put(:raw_data, object)
end
@doc """
Parse urls record from the API response JSON.
"""
@spec parse_urls(map | [map] | nil) :: Model.URL.t() | [Model.URL.t()]
def parse_urls(nil), do: []
def parse_urls(objects) when is_list(objects) do
Enum.map(objects, &parse_urls/1)
end
def parse_urls(object) do
Model.URL |> struct(object) |> Map.put(:raw_data, object)
end
@doc """
Parse user mentions record from the API response JSON.
"""
@spec parse_user_mentions(map | [map] | nil) :: Model.UserMention.t() | [Model.UserMention.t()]
def parse_user_mentions(nil), do: []
def parse_user_mentions(objects) when is_list(objects) do
Enum.map(objects, &parse_user_mentions/1)
end
def parse_user_mentions(object) do
Model.UserMention |> struct(object) |> Map.put(:raw_data, object)
end
@doc """
Parse polls record from the API response JSON.
"""
@spec parse_polls(map | [map] | nil) :: Model.Poll.t() | [Model.Poll.t()]
def parse_polls(nil), do: []
def parse_polls(objects) when is_list(objects) do
Enum.map(objects, &parse_polls/1)
end
def parse_polls(object) do
Model.Poll |> struct(object) |> Map.put(:raw_data, object)
end
@doc """
Parse trend record from the API response JSON.
"""
@spec parse_ids(map) :: [pos_integer]
def parse_ids(object) do
Enum.find(object, fn({key, _value}) -> key == :ids end) |> elem(1)
end
@doc """
Parse cursored ids.
"""
@spec parse_ids_with_cursor(map) :: Model.Cursor.t()
def parse_ids_with_cursor(object) do
ids = object |> ExTwitter.JSON.get(:ids)
cursor = struct(Model.Cursor, object)
%{cursor | items: ids, raw_data: object}
end
@doc """
Parse cursored users.
"""
@spec parse_users_with_cursor(map) :: Model.Cursor.t()
def parse_users_with_cursor(object) do
users = object |> ExTwitter.JSON.get(:users)
|> Enum.map(&ExTwitter.Parser.parse_user/1)
cursor = struct(Model.Cursor, object)
%{cursor | items: users, raw_data: object}
end
@doc """
Parse request parameters for the API.
"""
@spec parse_request_params(keyword) :: [{String.t(), String.t()}]
def parse_request_params(options) do
Enum.map(options, &stringify_params/1)
end
@doc """
Parse batch user/lookup request parameters for the API.
"""
@spec parse_batch_user_lookup_params(keyword) :: [{String.t(), String.t()}]
def parse_batch_user_lookup_params(options) do
Enum.map(options, &stringify_params/1)
end
@doc """
Parse request_token response
"""
@spec parse_request_token(map) :: Model.RequestToken.t()
def parse_request_token(object) do
Model.RequestToken |> struct(object) |> Map.put(:raw_data, object)
end
@doc """
Parse access_token response.
"""
@spec parse_access_token(map) :: Model.AccessToken.t()
def parse_access_token(object) do
Model.AccessToken |> struct(object) |> Map.put(:raw_data, object)
end
@doc """
Parse user profile banner from the API response JSON.
"""
@spec parse_profile_banner(map) :: Model.ProfileBanner.t()
def parse_profile_banner(object) do
Model.ProfileBanner |> struct(object) |> Map.put(:raw_data, object)
end
@doc """
Parse follower relationship from the API response JSON.
"""
@spec parse_relationships(map) :: [Model.Relationship.t()]
def parse_relationships(object) do
Enum.map(object, fn relation ->
Model.Relationship |> struct(relation) |> Map.put(:raw_data, relation)
end)
end
@spec stringify_params({any, [any] | any}) :: {String.t(), String.t()}
defp stringify_params({key, values}) when is_list(values) do
{to_string(key), Enum.join(values, ",")}
end
defp stringify_params({key, value}) do
{to_string(key), to_string(value)}
end
end
|
lib/extwitter/parser.ex
| 0.861392 | 0.485661 |
parser.ex
|
starcoder
|
defmodule Surface do
@moduledoc """
Surface is component based library for **Phoenix LiveView**.
Built on top of the new `Phoenix.LiveComponent` API, Surface provides
a more declarative way to express and use components in Phoenix.
A work-in-progress live demo with more details can be found at [surface-demo.msaraiva.io](http://surface-demo.msaraiva.io)
This module defines the `~H` sigil that should be used to translate Surface
code into Phoenix templates.
In order to have `~H` available for any Phoenix view, add the following import to your web
file in `lib/my_app_web.ex`:
# lib/my_app_web.ex
...
def view do
quote do
...
import Surface
end
end
## Defining components
To create a component you need to define a module and `use` one of the available component types:
* `Surface.Component` - A functional (stateless) component.
* `Surface.LiveComponent` - A live (stateless or stateful) component. A wrapper around `Phoenix.LiveComponent`.
* `Surface.LiveView` - A wrapper component around `Phoenix.LiveView`.
* `Surface.DataComponent` - A component that serves as a customizable data holder for the parent component.
* `Surface.MacroComponent` - A low-level component which is responsible for translating its own content at compile time.
### Example
# A functional steteless component
defmodule Button do
use Surface.Component
property click, :event
property kind, :string, default: "is-info"
def render(assigns) do
~H"\""
<button class="button {{ @kind }}" phx-click={{ @click }}>
{{ @inner_content.() }}
</button>
"\""
end
end
You can visit the documentation of each type of component for further explanation and examples.
## Directives
Directives are built-in attributes that can modify the translated code of a component
at compile time. Currently, the following directives are supported:
* `:for` - Iterates over a list (generator) and renders the content of the tag (or component)
for each item in the list.
* `:if` - Conditionally render a tag (or component). The code will be rendered if the expression
is evaluated to a truthy value.
* `:bindings` - Defines the name of the variables (bindings) in the current scope that represent
the values passed internally by the component when calling the `@inner_content` function.
### Example
<div>
<div class="header" :if={{ @showHeader }}>
The Header
</div>
<ul>
<li :for={{ item <- @items }}>
{{ item }}
</li>
</ul>
</div>
"""
@doc """
Translates Surface code into Phoenix templates.
"""
defmacro sigil_H({:<<>>, _, [string]}, _) do
line_offset = __CALLER__.line + 1
string
|> Surface.Translator.run(line_offset, __CALLER__, __CALLER__.file)
|> EEx.compile_string(engine: Phoenix.LiveView.Engine, line: line_offset)
end
@doc false
def component(module, assigns) do
module.render(assigns)
end
@doc false
def component(module, assigns, []) do
module.render(assigns)
end
@doc false
def put_default_props(props, mod) do
Enum.reduce(mod.__props(), props, fn %{name: name, default: default}, acc ->
Map.put_new(acc, name, default)
end)
end
@doc false
def css_class(list) when is_list(list) do
Enum.reduce(list, [], fn item, classes ->
case item do
{class, true} ->
[to_kebab_case(class) | classes]
class when is_binary(class) or is_atom(class) ->
[to_kebab_case(class) | classes]
_ ->
classes
end
end) |> Enum.reverse() |> Enum.join(" ")
end
@doc false
def css_class(value) when is_binary(value) do
value
end
# TODO: Find a better way to do this
defp to_kebab_case(value) do
value
|> to_string()
|> Macro.underscore()
|> String.replace("_", "-")
end
end
|
lib/surface.ex
| 0.816077 | 0.722135 |
surface.ex
|
starcoder
|
defmodule Typo do
@moduledoc """
Typo is an Elixir library for programatically generating PDF documents.
Functionality is split across a number of modules:
* `Typo.PDF.Annotation` - Annotations (links etc).
* `Typo.PDF.Canvas` - low-level graphics and text drawing functions.
* `Typo.PDF.Document` - document-level functions, such as loading images and
fonts.
* `Typo.PDF.Outline` - Outline tree handling.
* `Typo.PDF.Page` - page related functions, such as Bookmarks, page size
/ rotation and adding document pages.
* `Typo.PDF.Transform` - functions to generate transformation matrices.
* `Typo.PDF.Units` - unit conversions.
## PDF Imaging Model
The PDF format has a rich set of features which enable the drawing of
vector graphics, raster images and text.
### Vector Graphics
Vector graphics are drawn by means of creating a path object which is then
filled and/or stroked. Paths may consist of lines or Bézier curves (Typo
provides functions in `Typo.PDF.Canvas` to generate other shapes consisting
of these basic elements).
A path may be easily created by calling `Typo.PDF.Canvas.with_path/3` and
providing a function to draw the path. The first operation should be
normally to move the graphics cursor to the desired start position
using `Typo.PDF.Canvas.move_to/2`.
Alternatively, you may manually start a path by calling `Typo.PDF.Canvas.move_to/2`,
then call the required functions and finally complete the path by calling
either `Typo.PDF.Canvas.path_paint/2` or `Typo.PDF.Canvas.path_end/1`.
### Raster Images
Typo currently supports the embedding of JPEG and PNG images (although not all
PNG images are supported, e.g. interlaced PNGs).
Images can be loaded using `Typo.PDF.Document.load_image!/3`. If the image is
unsupported, then `Typo.ImageError` will be raised.
Once an image has been successfully loaded, it can be placed on a page using
`Typo.PDF.Canvas.image/4`, which offers numerous options including being able
to resize and rotate the image.
### Text Output
Text can be drawn by calling `Typo.PDF.Canvas.with_text/2` and providing a function
to output the desired text using `Typo.PDF.Canvas.text/3`.
Alternatively a text object can be manually defined by calling
`Typo.PDF.Canvas.begin_text/1`, `Typo.PDF.Canvas.text/3` to output the desired text and
`Typo.PDF.Canvas.end_text/1`.
Calling `Typo.PDF.Canvas.set_text_render/2` within a text object allows the
configuration of the filling/stroking behaviour of the text object.
All PDF readers support a standard set of 14 core fonts which are support
either WinAnsi or font-specific encodings.
Text should be supplied to `Typo.PDF.Canvas.text/3` in `UTF-8` format (the
default for Elixir strings) or as raw binaries for symbol fonts.
For non-symbol fonts the UTF-8 source is translated into WinAnsi encoding where
possible (not all UTF-8 characters are available with the standard 14 fonts).
Embedded TrueType font support is currently under development.
### Graphics State
An internal model of the PDF graphics state is maintained as the document is created
which contains the following items (the functions in brackets are getters and setters):
* Fill Colour (`Typo.PDF.Canvas.get_fill_colour/1` and `Typo.PDF.Canvas.set_fill_colour/2`).
* Font (`Typo.PDF.Canvas.get_font_attributes/1`, `Typo.PDF.Canvas.get_font_size/1`
and `Typo.PDF.Canvas.set_font!/5`).
* Graphics Mode (see below for more information).
* Graphics Positon (`Typo.PDF.Canvas.get_position/1` and `Typo.PDF.Canvas.move_to/2`).
* Graphics Transform Matrix (`Typo.PDF.Canvas.get_transform/1` and
`Typo.PDF.Canvas.transform/2`).
* Line Cap Style (`Typo.PDF.Canvas.get_line_cap/1` and
`Typo.PDF.Canvas.set_line_cap/2`).
* Line Dash (`Typo.PDF.Canvas.get_line_dash/1` and `Typo.PDF.Canvas.set_line_dash/2`).
* Line Join Style (`Typo.PDF.Canvas.get_line_join/1` and `Typo.PDF.Canvas.set_line_join/2`).
* Line Width (`Typo.PDF.Canvas.get_line_width/1` and `Typo.PDF.Canvas.set_line_width/2`).
* Mitre Limit (`Typo.PDF.Canvas.get_mitre_limit/1` and `Typo.PDF.Canvas.set_mitre_limit/2`).
* Stroke Colour (`Typo.PDF.Canvas.get_stroke_colour/1` and
`Typo.PDF.Canvas.set_stroke_colour/2`).
* Text Character Spacing (`Typo.PDF.Canvas.get_char_spacing/1` and
`Typo.PDF.Canvas.set_char_spacing/2`).
* Text Horizontal Scale (`Typo.PDF.Canvas.get_horizontal_scale/1` and
`Typo.PDF.Canvas.set_horizontal_scale/2`).
* Text Leading (`Typo.PDF.Canvas.get_text_leading/1` and `Typo.PDF.Canvas.set_text_leading/2`).
* Text Position (`Typo.PDF.Canvas.get_text_position/1` and `Typo.PDF.Canvas.move_text_to/2`).
* Text Render Mode (`Typo.PDF.Canvas.get_text_render/1` and
`Typo.PDF.Canvas.set_text_render/2`).
* Text Rise (`Typo.PDF.Canvas.get_text_rise/1` and `Typo.PDF.Canvas.set_text_rise/2`).
* Text Transform Matrix (`Typo.PDF.Canvas.get_text_transform/1` and
`Typo.PDF.Canvas.text_transform/2`).
* Text Word Spacing (`Typo.PDF.Canvas.get_word_spacing/1` and
`Typo.PDF.Canvas.set_word_spacing/2`).
#### Graphics Mode
Graphics Mode is one of `:page`, `:path` or `:text` which denotes Page description,
Path object construction of Text object construction. The Graphics Mode controls
which PDF operators are valid at any one time; Typo will detect the use of
unavailable operators and raise `Typo.GraphicsStateError`.
Page 113 of `PDF 32000-1:2008` contains a diagram which shows the relationship
between graphics mode states and available operators.
#### Graphics State Stack
The graphics state stack can be used to save (push) and restore (pop) the
graphics state. The function `Typo.PDF.Canvas.with_state/2` saves the current
graphics state, calls a user supplied function then restores the saved graphics
state (alternatively `Typo.PDF.Canvas.save_state/1` and `Typo.PDF.Canvas.restore_state/1`
can be used).
## Coordinates
Functions which take coordinates expect an `{x, y}` tuple. The lower left hand corner
of the page is at position `{0, 0}`. The default PDF coordinate resolution is 72 dpi.
The coordinate system can be modified by the use of transform matrices (paths and text
each have their own separate transform matrices).
## Colour
PDF has 3 main colour types:
* Greyscale - single value between 0.0 (black) and 1.0 (white).
* RGB - tuple of `{red, green, blue}` with each component being a value
between 0.0 and 1.0.
* CMYK - tuple of `{cyan, magenta, yellow, black}` with each component
being a value between 0.0 and 1.0.
Typo can accept all 3 colour types for functions that require colours to be specified,
and additionally can also accept any of the following colour definitions as well:
* `#rgb` - HTML shorthand hex-triplet e.g. `#f00`.
* `#rrggbb` - HTML hex-triplet e.g. `#ff0000`.
* HTML colour name as a binary string, e.g. `"blue"`.
* HTML colour name as an atom, e.g. `:blue`.
Note that hex-triplets and binary strings are case insensitive, and binary colour name
strings have any spaces removed.
## General Notes
The library functions are protected by guards to ensure that errors are caught
as early as possible - if you are getting `FunctionClauseError` exceptions,
it is likely that you have called a function with an incorrect data type.
Additionally there are Dialyzer specs for almost all functions which makes
catching errors during development slightly easier (although Dialyzer's errors
are somewhat inscrutable at times). Running Dialyzer (for example, using
`dialyxir`) regularly as code is developed makes pinpointing of errors somewhat
easier.
"""
@type colour :: colour_greyscale() | colour_cmyk() | colour_rgb() | colour_name()
@type colour_cmyk :: {number(), number(), number(), number()}
@type colour_greyscale :: number()
@type colour_name :: atom() | String.t()
@type colour_rgb :: {number(), number(), number()}
@type destination :: {internal_page(), :fit | :fit_h | :fit_r | :fit_v | :xyz, Keyword.t()}
@type error :: {:error, any()}
@type filter_fun :: (integer() -> boolean())
@type filter_op_fun :: (Typo.PDF.t(), integer() -> Typo.PDF.t())
@type font_attrs :: [:bold | :italic]
@type font_type :: :standard | :truetype
@type form_number :: integer()
@type gid :: non_neg_integer()
@type graphics_mode :: :page | :path | :text
@type id :: atom() | binary() | integer() | tuple()
@type image_dimensions :: {number(), number()}
@type image_options :: [
{:height, number()}
| {:rotate, number()}
| {:rotate_about, :centre | :center | :corner}
| {:width, number()}
]
@type internal_page :: {:form, form_number()} | {:page, page_number()}
@type line_cap :: :butt | :round | :square
@type line_dash :: {[number()], integer()}
@type line_join :: :bevel | :mitre | :miter | :round
@type line_width :: number()
@type mitre_limit :: number()
@type oid :: pos_integer()
@type op_fun :: (Typo.PDF.t() -> Typo.PDF.t())
@type opacity :: number()
@type outline_walk_fun :: (id(), any() -> any())
@type page_dimensions :: {number(), number()}
@type page_geometry :: [number(), ...]
@type page_layout ::
:single_page
| :one_column
| :two_column_left
| :two_column_right
| :two_page_left
| :two_page_right
@type page_number :: integer()
@type page_orientation :: :portrait | :landscape
@type page_rotation :: 0 | 90 | 180 | 270
@type path_paint :: [
{:close, boolean()}
| {:end, boolean()}
| {:fill, boolean()}
| {:stroke, boolean()}
| {:winding, :even_odd | :nonzero}
]
@type rectangle :: {number(), number(), number(), number()}
@type text_render_mode :: [{:clip, boolean()} | {:fill, boolean()} | {:stroke, boolean()}]
@type transform_matrix :: {number(), number(), number(), number(), number(), number()}
@type xy :: {number(), number()}
@type xyz :: {number(), number(), number()}
@type winding_rule :: :even_odd | :nonzero
# useful integer types
@type int8 :: -0x80..0x7F
@type int16 :: -0x8000..0x7FFF
@type int32 :: -0x8000_0000..0x7FFF_FFFF
@type int64 :: -0x8000_0000_0000_0000..0x7FFF_FFFF_FFFF_FFFF
@type uint8 :: 0..0xFF
@type uint16 :: 0..0xFFFF
@type uint32 :: 0..0xFFFF_FFFF
@type uint64 :: 0..0xFFFF_FFFF_FFFF_FFFF
defmodule FontError do
@moduledoc "Raised when problem dealing with Fonts."
defexception [:message]
end
defmodule GraphicsStateError do
@moduledoc "Raised when PDF in wrong state for given operation."
defexception [:message]
end
defmodule ImageError do
@moduledoc "Raised when PNG/JPEG image appears to be corrupt or is unsupported."
defexception [:message]
end
@doc """
Returns library version as a string.
"""
@spec version :: String.t()
def version, do: Application.spec(:typo, :vsn) |> to_string()
end
|
lib/typo.ex
| 0.930502 | 0.687105 |
typo.ex
|
starcoder
|
defmodule Weaver.Loom.Prosumer do
@moduledoc """
Represents a worker that handles one `Weaver.Step` at a time.
Dispatched steps are passed to the `GenStage` level below after each call
to `Weaver.Step.process/1`. A step is passed again to `Weaver.Step.process/1`
as long as it returns a new `Weaver.Step` as `next`.
Otherwise, it sends demand to the `GenStage` level above.
Implements a `GenStage` `producer` that is also a `consumer` with manual
demand handling via `GenStage.ask/2` (see `handle_subscribe/4` and `handle_info/2`).
"""
use GenStage
@max_demand 1
alias __MODULE__.State
defmodule State do
@moduledoc false
defstruct [:name, :status, :retrieval, producers: %{}, demand: 0, queue: []]
end
def start_link(opts = {name, _subscriptions}) do
GenStage.start_link(__MODULE__, opts, name: name)
end
@impl GenStage
def init({name, subscriptions}) do
Enum.each(subscriptions, fn subscription ->
opts =
case subscription do
{name, opts} -> [{:to, name} | opts]
name -> [to: name]
end
|> Keyword.put_new(:max_demand, @max_demand)
GenStage.async_subscribe(self(), opts)
end)
{:producer, %State{name: name, status: :waiting_for_consumers}}
end
@impl GenStage
def handle_subscribe(:producer, opts, from, state) do
pending = opts[:max_demand] || @max_demand
state = put_in(state.producers[from], pending)
if state.status == :waiting_for_producers, do: GenStage.ask(from, pending)
# Returns manual as we want control over the demand
{:manual, state}
end
def handle_subscribe(:consumer, _opts, _from, state) do
{:automatic, state}
end
@impl GenStage
def handle_cancel(_, from, state) do
# Remove the producers from the map on unsubscribe
producers = Map.delete(state.producers, from)
{:noreply, [], %{state | producers: producers}}
end
@impl GenStage
def handle_events(events, from, state) when is_list(events) do
state =
update_in(state.producers[from], &(&1 + length(events)))
|> Map.update!(:queue, &(&1 ++ events))
noreply([], state)
end
@impl GenStage
def handle_demand(demand, state) do
noreply([], state, demand)
end
@impl GenStage
def handle_info(:tick, state = %{demand: 0}) do
{:noreply, [], %{state | status: :waiting_for_consumers}}
end
def handle_info(:tick, state = %{retrieval: event}) when event != nil do
state = %{state | status: :working}
case Weaver.Loom.Event.process(event) do
{:ok, dispatched, next} ->
noreply(dispatched, %{state | retrieval: next})
{:retry, event, delay} ->
Process.send_after(self(), :tick, delay)
{:noreply, [], %{state | retrieval: event, status: :paused}}
{:error, _} ->
noreply([], %{state | retrieval: nil})
end
end
def handle_info(:tick, state = %{queue: [event | queue]}) do
noreply([], %{state | retrieval: event, queue: queue})
end
def handle_info(:tick, state) do
producers =
Enum.into(state.producers, %{}, fn {from, pending} ->
# Ask for any pending events
GenStage.ask(from, pending)
# Reset pending events to 0
{from, 0}
end)
{:noreply, [], %{state | producers: producers, status: :waiting_for_producers}}
end
defp noreply(events, state, demand \\ 0) do
count = length(events)
new_demand = max(state.demand + demand - count, 0)
state = %{state | demand: new_demand}
if new_demand > 0, do: send(self(), :tick)
{:noreply, events, state}
end
end
|
lib/weaver/loom/prosumer.ex
| 0.859884 | 0.681601 |
prosumer.ex
|
starcoder
|
defmodule XDR.FixedArray do
@moduledoc """
This module manages the `Fixed-Length Array` type based on the RFC4506 XDR Standard.
"""
@behaviour XDR.Declaration
alias XDR.Error.FixedArray, as: FixedArrayError
defstruct [:elements, :type, :length]
@typedoc """
`XDR.FixedArray` structure type specification.
"""
@type t :: %XDR.FixedArray{elements: list | nil, type: module, length: integer}
@doc """
Create a new `XDR.FixedArray` structure with the `elements`, `type` and `length` passed.
"""
@spec new(elements :: list | binary, type :: module, length :: integer) :: t
def new(elements, type, length),
do: %XDR.FixedArray{elements: elements, type: type, length: length}
@impl XDR.Declaration
@doc """
Encode a `XDR.FixedArray` structure into a XDR format.
"""
@spec encode_xdr(fixed_array :: t) ::
{:ok, binary} | {:error, :not_number | :invalid_length | :not_list}
def encode_xdr(%{length: length}) when not is_integer(length), do: {:error, :not_number}
def encode_xdr(%{elements: elements, length: length}) when length(elements) !== length,
do: {:error, :invalid_length}
def encode_xdr(%{elements: elements}) when not is_list(elements), do: {:error, :not_list}
def encode_xdr(%{type: type}) when not is_atom(type), do: {:error, :invalid_type}
def encode_xdr(%{elements: elements, type: type}) do
binary =
Enum.reduce(elements, <<>>, fn element, bytes -> bytes <> encode_element(element, type) end)
{:ok, binary}
end
@impl XDR.Declaration
@doc """
Encode a `XDR.FixedArray` structure into a XDR format.
If the `fixed_array` is not valid, an exception is raised.
"""
@spec encode_xdr!(fixed_array :: t) :: binary()
def encode_xdr!(fixed_array) do
case encode_xdr(fixed_array) do
{:ok, result} -> result
{:error, reason} -> raise(FixedArrayError, reason)
end
end
@impl XDR.Declaration
@doc """
Decode the Fixed-Length Array in XDR format to a `XDR.FixedArray` structure.
"""
@spec decode_xdr(bytes :: binary, fixed_array :: t | map()) ::
{:ok, {list, binary}} | {:error, :not_number | :not_binary | :not_valid_binary}
def decode_xdr(_bytes, %{length: length}) when not is_integer(length), do: {:error, :not_number}
def decode_xdr(bytes, _struct) when not is_binary(bytes), do: {:error, :not_binary}
def decode_xdr(bytes, _struct) when rem(byte_size(bytes), 4) != 0,
do: {:error, :not_valid_binary}
def decode_xdr(_bytes, %{type: type}) when not is_atom(type), do: {:error, :invalid_type}
def decode_xdr(bytes, %{type: type, length: length}) do
{:ok, decode_elements_from_fixed_array(type, [], bytes, length)}
end
@impl XDR.Declaration
@doc """
Decode the Fixed-Length Array in XDR format to a `XDR.FixedArray` structure.
If the binaries are not valid, an exception is raised.
"""
@spec decode_xdr!(bytes :: binary, fixed_array :: t | map()) :: {list, binary}
def decode_xdr!(bytes, fixed_array) do
case decode_xdr(bytes, fixed_array) do
{:ok, result} -> result
{:error, reason} -> raise(FixedArrayError, reason)
end
end
@spec encode_element(element :: any(), type :: module()) :: binary()
defp encode_element(element, type), do: element |> type.new() |> type.encode_xdr!()
@spec decode_elements_from_fixed_array(
type :: module,
acc :: list,
rest :: binary,
array_length :: integer
) :: {list, binary}
defp decode_elements_from_fixed_array(_type, acc, rest, 0), do: {Enum.reverse(acc), rest}
defp decode_elements_from_fixed_array(type, acc, bytes, array_length) do
{decoded, rest} = type.decode_xdr!(bytes)
decode_elements_from_fixed_array(type, [decoded | acc], rest, array_length - 1)
end
end
|
lib/xdr/fixed_array.ex
| 0.926844 | 0.581422 |
fixed_array.ex
|
starcoder
|
defmodule Surface.LiveView do
@moduledoc """
A wrapper component around `Phoenix.LiveView`.
Since this module is just a wrapper around `Phoenix.LiveView`, you
cannot define custom properties for it. Only `:id` and `:session`
are available. However, built-in directives like `:for` and `:if`
can be used normally.
## Example
defmodule Example do
use Surface.LiveView
def render(assigns) do
~H"\""
<Dialog title="Alert" id="dialog">
This <b>Dialog</b> is a stateful component. Cool!
</Dialog>
<Button click="show_dialog">Click to open the dialog</Button>
"\""
end
def handle_event("show_dialog", _, socket) do
Dialog.show("dialog")
{:noreply, socket}
end
end
"""
defmacro __using__(opts) do
quote do
use Surface.BaseComponent, type: unquote(__MODULE__)
use Surface.API, include: [:prop, :data]
import Phoenix.HTML
alias Surface.Constructs.{For, If}
alias Surface.Components.Context
@before_compile Surface.Renderer
@before_compile unquote(__MODULE__)
@doc "The id of the live view"
prop id, :string, required: true
@doc """
The request info necessary for the view, such as params, cookie session info, etc.
The session is signed and stored on the client, then provided back to the server
when the client connects, or reconnects to the stateful view.
"""
prop session, :map
use Phoenix.LiveView, unquote(opts)
end
end
defmacro __before_compile__(env) do
quoted_mount(env)
end
defp quoted_mount(env) do
defaults = env.module |> Surface.API.get_defaults() |> Macro.escape()
if Module.defines?(env.module, {:mount, 3}) do
quote do
defoverridable mount: 3
def mount(params, session, socket) do
socket =
socket
|> Surface.init()
|> assign(unquote(defaults))
super(params, session, socket)
end
end
else
quote do
def mount(_params, _session, socket) do
{:ok,
socket
|> Surface.init()
|> assign(unquote(defaults))}
end
end
end
end
end
|
lib/surface/live_view.ex
| 0.869493 | 0.457076 |
live_view.ex
|
starcoder
|
defmodule ElixirRigidPhysics.Geometry.Tetrahedron do
@moduledoc """
Module for handling queries related to tetrahedra.
"""
alias Graphmath.Vec3
require Record
Record.defrecord(:tetrahedron,
a: {0.0, 0.0, 0.0},
b: {1.0, 0.0, 0.0},
c: {0.0, 1.0, 0.0},
d: {0.0, 0.0, 1.0}
)
@type tetrahedron ::
record(:tetrahedron, a: Vec3.vec3(), b: Vec3.vec3(), c: Vec3.vec3(), d: Vec3.vec3())
require ElixirRigidPhysics.Geometry.LineSegment, as: LSeg
require ElixirRigidPhysics.Geometry.Triangle, as: Tri
require ElixirRigidPhysics.Geometry.Plane, as: Plane
@verysmol 1.0e-12
@doc """
Function to create a tetrahedron given four points.
## Examples
iex> # IO.puts "Check creating a tetrahedron."
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> a = {1.0, 1.0, 1.0}
iex> b = {2.0, 1.0, 1.0}
iex> c = {1.0, 2.0, 1.0}
iex> d = {1.0, 1.0, 2.0}
iex> Tetra.create(a,b,c,d)
{:tetrahedron, {1.0, 1.0, 1.0}, {2.0, 1.0, 1.0}, {1.0, 2.0, 1.0}, {1.0, 1.0, 2.0}}
"""
@spec create(Vec3.vec3(), Vec3.vec3(), Vec3.vec3(), Vec3.vec3()) :: tetrahedron
def create(a, b, c, d) do
tetrahedron(a: a, b: b, c: c, d: d)
end
@doc """
Calculates the volume of a tetrahedron.
iex> # IO.puts "Check creating a tetrahedron."
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> a = {1.0, 1.0, 1.0}
iex> b = {2.0, 1.0, 1.0}
iex> c = {1.0, 2.0, 1.0}
iex> d = {1.0, 1.0, 2.0}
iex> t = Tetra.create(a,b,c,d)
iex> 0.0001 > abs(Tetra.volume(t) - 0.16666666)
true
"""
@spec volume(tetrahedron) :: float()
def volume(tetrahedron(a: a, b: b, c: c, d: d)) do
v_bc = Vec3.subtract(b, c)
v_dc = Vec3.subtract(d, c)
v_ac = Vec3.subtract(a, c)
1.0 / 6.0 * Vec3.scalar_triple(v_bc, v_dc, v_ac)
end
@doc """
Calculates the Barycentric coordinates in tetrahedron `t` for a query point `p`.
## Examples
iex> # IO.puts "Check query point coincident with a."
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> a = {1.0, 1.0, 1.0}
iex> b = {2.0, 1.0, 1.0}
iex> c = {1.0, 2.0, 1.0}
iex> d = {1.0, 1.0, 2.0}
iex> t = Tetra.create(a,b,c,d)
iex> Tetra.to_barycentric(t, a)
{1.0, 0.0, 0.0, 0.0}
iex> # IO.puts "Check query point coincident with b."
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> a = {1.0, 1.0, 1.0}
iex> b = {2.0, 1.0, 1.0}
iex> c = {1.0, 2.0, 1.0}
iex> d = {1.0, 1.0, 2.0}
iex> t = Tetra.create(a,b,c,d)
iex> Tetra.to_barycentric(t, b)
{0.0, 1.0, 0.0, 0.0}
iex> # IO.puts "Check query point coincident with c."
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> a = {1.0, 1.0, 1.0}
iex> b = {2.0, 1.0, 1.0}
iex> c = {1.0, 2.0, 1.0}
iex> d = {1.0, 1.0, 2.0}
iex> t = Tetra.create(a,b,c,d)
iex> Tetra.to_barycentric(t, c)
{0.0, 0.0, 1.0, 0.0}
iex> # IO.puts "Check query point coincident with d."
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> a = {1.0, 1.0, 1.0}
iex> b = {2.0, 1.0, 1.0}
iex> c = {1.0, 2.0, 1.0}
iex> d = {1.0, 1.0, 2.0}
iex> t = Tetra.create(a,b,c,d)
iex> Tetra.to_barycentric(t, d)
{0.0, 0.0, 0.0, 1.0}
iex> # IO.puts "Check query point outside face BCD."
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> a = {1.0, 1.0, 1.0}
iex> b = {2.0, 1.0, 1.0}
iex> c = {1.0, 2.0, 1.0}
iex> d = {1.0, 1.0, 2.0}
iex> t = Tetra.create(a,b,c,d)
iex> Tetra.to_barycentric(t, {2.0, 2.0, 2.0})
{-2.0, 1.0, 1.0, 1.0}
"""
@spec to_barycentric(tetrahedron, Vec3.vec3()) :: {float, float, float, float}
def to_barycentric(tetrahedron(a: a, b: b, c: c, d: d) = t, q) do
# fun note...with an apex of q, each face must be wound the same way
# a
v_qbcd = create(q, b, c, d) |> volume()
# b
v_qacd = create(q, c, a, d) |> volume()
# c
v_qabd = create(q, a, b, d) |> volume()
# d
v_qcab = create(q, a, c, b) |> volume()
v_total = volume(t)
{v_qbcd / v_total, v_qacd / v_total, v_qabd / v_total, v_qcab / v_total}
end
@doc """
Converts from barycentric coords on a tetrahedron to global cartesian coordinates.
## Examples
iex> # IO.puts "Check barycentric coords for a"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> a = {1.0, 1.0, 1.0}
iex> b = {2.0, 1.0, 1.0}
iex> c = {1.0, 2.0, 1.0}
iex> d = {1.0, 1.0, 2.0}
iex> t = Tetra.create(a,b,c,d)
iex> Tetra.from_barycentric(t, {1.0, 0.0, 0.0, 0.0})
{1.0, 1.0, 1.0}
iex> # IO.puts "Check barycentric coords for b"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> a = {1.0, 1.0, 1.0}
iex> b = {2.0, 1.0, 1.0}
iex> c = {1.0, 2.0, 1.0}
iex> d = {1.0, 1.0, 2.0}
iex> t = Tetra.create(a,b,c,d)
iex> Tetra.from_barycentric(t, {0.0, 1.0, 0.0, 0.0})
{2.0, 1.0, 1.0}
iex> # IO.puts "Check barycentric coords for c"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> a = {1.0, 1.0, 1.0}
iex> b = {2.0, 1.0, 1.0}
iex> c = {1.0, 2.0, 1.0}
iex> d = {1.0, 1.0, 2.0}
iex> t = Tetra.create(a,b,c,d)
iex> Tetra.from_barycentric(t, {0.0, 0.0, 1.0, 0.0})
{1.0, 2.0, 1.0}
iex> # IO.puts "Check barycentric coords for d"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> a = {1.0, 1.0, 1.0}
iex> b = {2.0, 1.0, 1.0}
iex> c = {1.0, 2.0, 1.0}
iex> d = {1.0, 1.0, 2.0}
iex> t = Tetra.create(a,b,c,d)
iex> Tetra.from_barycentric(t, {0.0, 0.0, 0.0, 1.0})
{1.0, 1.0, 2.0}
"""
@spec from_barycentric(tetrahedron, {number, number, number, number}) :: Vec3.vec3()
def from_barycentric(tetrahedron(a: a, b: b, c: c, d: d), {b_a, b_b, b_c, b_d}) do
Vec3.add(Vec3.weighted_sum(b_a, a, b_b, b), Vec3.weighted_sum(b_c, c, b_d, d))
end
@type voronoi_vertex_region :: :region_a | :region_b | :region_c | :region_d
@type voronoi_edge_region ::
:region_ab | :region_ac | :region_ad | :region_bc | :region_bd | :region_cd
@type voronoi_face_region :: :region_abc | :region_abd | :region_acd | :region_bcd
@type voronoi_polyhedron_region :: :region_abcd
@type voronoi_region ::
voronoi_vertex_region
| voronoi_edge_region
| voronoi_face_region
| voronoi_polyhedron_region
@doc """
Find the nearest point on or in a tetrahedron to a query point, and also return its voronoi region.
## Examples
iex> #IO.puts "Classify internal voronoi region abcd"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {0.1, 0.1, 0.1}
iex> Tetra.get_nearest_point( tetra, q)
{{0.1, 0.1, 0.1}, :region_abcd}
iex> #IO.puts "Classify vertex voronoi region a, near"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {0.0, 0.0, 0.0}
iex> Tetra.get_nearest_point( tetra, q)
{ {0.0, 0.0, 0.0}, :region_a}
iex> #IO.puts "Classify vertex voronoi region a, far"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {-1.0, -1.0, -1.0}
iex> Tetra.get_nearest_point( tetra, q)
{ {0.0, 0.0, 0.0}, :region_a}
iex> #IO.puts "Classify vertex voronoi region b, near"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {0.0, 0.0, 1.0}
iex> Tetra.get_nearest_point( tetra, q)
{{ 0.0, 0.0, 1.0}, :region_b}
iex> #IO.puts "Classify vertex voronoi region b, far"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {-0.1, -0.1, 1.5}
iex> Tetra.get_nearest_point( tetra, q)
{{ 0.0, 0.0, 1.0}, :region_b}
iex> #IO.puts "Classify vertex voronoi region c, near"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {1.0, 0.0, 0.0}
iex> Tetra.get_nearest_point( tetra, q)
{{ 1.0, 0.0, 0.0}, :region_c}
iex> #IO.puts "Classify vertex voronoi region c, far"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {1.5, -0.1, -0.1 }
iex> Tetra.get_nearest_point( tetra, q)
{{ 1.0, 0.0, 0.0}, :region_c}
iex> #IO.puts "Classify vertex voronoi region d, near"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {0.0, 1.0, 0.0}
iex> Tetra.get_nearest_point( tetra, q)
{{ 0.0, 1.0, 0.0}, :region_d}
iex> #IO.puts "Classify vertex voronoi region d, far"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {-0.1, 1.5, -0.1 }
iex> Tetra.get_nearest_point( tetra, q)
{{ 0.0, 1.0, 0.0}, :region_d}
iex> #IO.puts "Classify edge voronoi region ab, near"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {0.0, 0.0, 0.5}
iex> Tetra.get_nearest_point( tetra, q)
{{ 0.0, 0.0, 0.5}, :region_ab}
iex> #IO.puts "Classify edge voronoi region ab, far"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {-0.1, -0.1, 0.5 }
iex> Tetra.get_nearest_point( tetra, q)
{{ 0.0, 0.0, 0.5}, :region_ab}
iex> #IO.puts "Classify edge voronoi region ac, near"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {0.5, 0.0, 0.0}
iex> Tetra.get_nearest_point( tetra, q)
{{ 0.5, 0.0, 0.0}, :region_ac}
iex> #IO.puts "Classify edge voronoi region ac, far"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {0.5, -0.1, -0.1 }
iex> Tetra.get_nearest_point( tetra, q)
{{ 0.5, 0.0, 0.0}, :region_ac}
iex> #IO.puts "Classify edge voronoi region ad, near"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {0.0, 0.5, 0.0}
iex> Tetra.get_nearest_point( tetra, q)
{{ 0.0, 0.5, 0.0}, :region_ad}
iex> #IO.puts "Classify edge voronoi region ad, far"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {-0.1, 0.5, -0.1 }
iex> Tetra.get_nearest_point( tetra, q)
{{ 0.0, 0.5, 0.0}, :region_ad}
iex> #IO.puts "Classify edge voronoi region bc, near"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {0.5, 0.0, 0.5}
iex> {nearest_point, region} = Tetra.get_nearest_point( tetra, q)
iex> { Graphmath.Vec3.equal( nearest_point, {0.5, 0.0, 0.5}, 0.0001), region}
{true, :region_bc}
iex> #IO.puts "Classify edge voronoi region bc, far"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {0.5, -0.1, 0.5 }
iex> {nearest_point, region} = Tetra.get_nearest_point( tetra, q)
iex> { Graphmath.Vec3.equal( nearest_point, {0.5, 0.0, 0.5}, 0.0001), region}
{true, :region_bc}
iex> #IO.puts "Classify edge voronoi region bd, near"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {0.0, 0.5, 0.5}
iex> {nearest_point, region} = Tetra.get_nearest_point( tetra, q)
iex> { Graphmath.Vec3.equal( nearest_point, {0.0, 0.5, 0.5}, 0.0001), region}
{true, :region_bd}
iex> #IO.puts "Classify edge voronoi region bd, far"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {-0.1, 0.5, 0.5 }
iex> {nearest_point, region} = Tetra.get_nearest_point( tetra, q)
iex> { Graphmath.Vec3.equal( nearest_point, {0.0, 0.5, 0.5}, 0.0001), region}
{true, :region_bd}
iex> #IO.puts "Classify edge voronoi region cd, near"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {0.5, 0.5, 0.0}
iex> {nearest_point, region} = Tetra.get_nearest_point( tetra, q)
iex> {Graphmath.Vec3.equal( nearest_point, {0.5, 0.5, 0.0}, 0.0001), region}
{true, :region_cd}
iex> #IO.puts "Classify edge voronoi region cd, far"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {0.5, 0.5, -1.0 }
iex> {nearest_point, region} = Tetra.get_nearest_point( tetra, q)
iex> {Graphmath.Vec3.equal( nearest_point, {0.5, 0.5, 0.0}, 0.0001), region}
{true, :region_cd}
iex> #IO.puts "Classify face voronoi region abc, near"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {1/3, 0, 1/3}
iex> {nearest_point, region} = Tetra.get_nearest_point( tetra, q)
iex> {Graphmath.Vec3.equal( nearest_point, {1/3, 0, 1/3}, 0.0001), region}
{true, :region_abc}
iex> #IO.puts "Classify face voronoi region abc, far"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {1/3, -1, 1/3}
iex> {nearest_point, region} = Tetra.get_nearest_point( tetra, q)
iex> {Graphmath.Vec3.equal( nearest_point, {1/3, 0, 1/3}, 0.0001), region}
{true, :region_abc}
iex> #IO.puts "Classify face voronoi region abd, near"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {0, 1/3, 1/3}
iex> {nearest_point, region} = Tetra.get_nearest_point( tetra, q)
iex> {Graphmath.Vec3.equal( nearest_point, {0, 1/3, 1/3}, 0.0001), region}
{true, :region_abd}
iex> #IO.puts "Classify face voronoi region abd, far"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {-1, 1/3, 1/3}
iex> {nearest_point, region} = Tetra.get_nearest_point( tetra, q)
iex> {Graphmath.Vec3.equal( nearest_point, {0, 1/3, 1/3}, 0.0001), region}
{true, :region_abd}
iex> #IO.puts "Classify face voronoi region bcd, near"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {1/3, 1/3, 1/3}
iex> {nearest_point, region} = Tetra.get_nearest_point( tetra, q)
iex> {Graphmath.Vec3.equal( nearest_point, {1/3, 1/3, 1/3}, 0.0001), region}
{true, :region_bcd}
iex> #IO.puts "Classify face voronoi region bcd, far"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {4/3, 4/3, 4/3}
iex> {nearest_point, region} = Tetra.get_nearest_point( tetra, q)
iex> {Graphmath.Vec3.equal( nearest_point, {1/3, 1/3, 1/3}, 0.0001), region}
{true, :region_bcd}
iex> #IO.puts "Classify face voronoi region cad, near"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {1/3, 1/3, 0}
iex> {nearest_point, region} = Tetra.get_nearest_point( tetra, q)
iex> {Graphmath.Vec3.equal( nearest_point, {1/3, 1/3, 0.0}, 0.0001), region}
{true, :region_cad}
iex> #IO.puts "Classify face voronoi region cad, far"
iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
iex> q = {1/3, 1/3, -2}
iex> {nearest_point, region} = Tetra.get_nearest_point( tetra, q)
iex> {Graphmath.Vec3.equal( nearest_point, {1/3, 1/3, 0.0}, 0.0001), region}
{true, :region_cad}
"""
@spec get_nearest_point(tetrahedron, Vec3.vec3()) :: {Vec3.vec3(), voronoi_region}
def get_nearest_point(tetrahedron(a: a, b: b, c: c, d: d) = tetra, q) do
# I hope you like barycenters, 'cause we're about to calculate a lot of them.
{qa, qb, qc, qd} = to_barycentric(tetra, q)
tri_acb = Tri.create_from_points(a, c, b)
acb_plane = Tri.to_plane(tri_acb)
acb_q = Plane.project_point_to_plane(acb_plane, q)
{tri_acb_u, tri_acb_v, tri_acb_w} = Tri.to_barycentric(tri_acb, acb_q)
tri_abd = Tri.create_from_points(a, b, d)
abd_plane = Tri.to_plane(tri_abd)
abd_q = Plane.project_point_to_plane(abd_plane, q)
{tri_abd_u, tri_abd_v, tri_abd_w} = Tri.to_barycentric(tri_abd, abd_q)
tri_bcd = Tri.create_from_points(b, c, d)
bcd_plane = Tri.to_plane(tri_bcd)
bcd_q = Plane.project_point_to_plane(bcd_plane, q)
{tri_bcd_u, tri_bcd_v, tri_bcd_w} = Tri.to_barycentric(tri_bcd, bcd_q)
tri_cad = Tri.create_from_points(c, a, d)
cad_plane = Tri.to_plane(tri_cad)
cad_q = Plane.project_point_to_plane(cad_plane, q)
{tri_cad_u, tri_cad_v, tri_cad_w} = Tri.to_barycentric(tri_cad, cad_q)
l_ab = LSeg.create(a, b)
{l_ab_u, l_ab_v} = LSeg.to_barycentric(l_ab, q)
l_ac = LSeg.create(a, c)
{l_ac_u, l_ac_v} = LSeg.to_barycentric(l_ac, q)
l_ad = LSeg.create(a, d)
{l_ad_u, l_ad_v} = LSeg.to_barycentric(l_ad, q)
l_bc = LSeg.create(b, c)
{l_bc_u, l_bc_v} = LSeg.to_barycentric(l_bc, q)
l_bd = LSeg.create(b, d)
{l_bd_u, l_bd_v} = LSeg.to_barycentric(l_bd, q)
l_cd = LSeg.create(c, d)
{l_cd_u, l_cd_v} = LSeg.to_barycentric(l_cd, q)
# okay, remember:
# test verts, then edges, then faces...lowest dimension first!
cond do
# a
l_ab_v <= @verysmol and l_ac_v <= @verysmol and l_ad_v <= @verysmol ->
{a, :region_a}
# b
l_ab_u <= @verysmol and l_bc_v <= @verysmol and l_bd_v <= @verysmol ->
{b, :region_b}
# c
l_ac_u <= @verysmol and l_bc_u <= @verysmol and l_cd_v <= @verysmol ->
{c, :region_c}
# d
l_ad_u <= @verysmol and l_bd_u <= @verysmol and l_cd_u <= @verysmol ->
{d, :region_d}
# ab
l_ab_u > @verysmol and l_ab_v > @verysmol and tri_abd_w <= @verysmol and
tri_acb_v <= @verysmol ->
{LSeg.from_barycentric(l_ab, {l_ab_u, l_ab_v}), :region_ab}
# # ac
l_ac_u > @verysmol and l_ac_v > @verysmol and tri_acb_w <= @verysmol and
tri_cad_w <= @verysmol ->
{LSeg.from_barycentric(l_ac, {l_ac_u, l_ac_v}), :region_ac}
# # ad
l_ad_u > @verysmol and l_ad_v > @verysmol and tri_abd_v <= @verysmol and
tri_cad_u <= @verysmol ->
{LSeg.from_barycentric(l_ad, {l_ad_u, l_ad_v}), :region_ad}
# bc
l_bc_u > @verysmol and l_bc_v > @verysmol and tri_acb_u <= @verysmol and
tri_bcd_w <= @verysmol ->
{LSeg.from_barycentric(l_bc, {l_bc_u, l_bc_v}), :region_bc}
# bd
l_bd_u > @verysmol and l_bd_v > @verysmol and tri_abd_u <= @verysmol and
tri_bcd_v <= @verysmol ->
{LSeg.from_barycentric(l_bd, {l_bd_u, l_bd_v}), :region_bd}
# cd
l_cd_u > @verysmol and l_cd_v > @verysmol and tri_cad_v <= @verysmol and
tri_bcd_u <= @verysmol ->
{LSeg.from_barycentric(l_cd, {l_cd_u, l_cd_v}), :region_cd}
# abc
tri_acb_u > @verysmol and tri_acb_v > @verysmol and tri_acb_w > @verysmol and
qd <= @verysmol ->
{Tri.from_barycentric(tri_acb, {tri_acb_u, tri_acb_v, tri_acb_w}), :region_abc}
# abd
tri_abd_u > @verysmol and tri_abd_v > @verysmol and tri_abd_w > @verysmol and
qc <= @verysmol ->
{Tri.from_barycentric(tri_abd, {tri_abd_u, tri_abd_v, tri_abd_w}), :region_abd}
# bcd
tri_bcd_u > @verysmol and tri_bcd_v > @verysmol and tri_bcd_w > @verysmol and
qa <= @verysmol ->
{Tri.from_barycentric(tri_bcd, {tri_bcd_u, tri_bcd_v, tri_bcd_w}), :region_bcd}
# cad
tri_cad_u > @verysmol and tri_cad_v > @verysmol and tri_cad_w > @verysmol and
qb <= @verysmol ->
{Tri.from_barycentric(tri_cad, {tri_cad_u, tri_cad_v, tri_cad_w}), :region_cad}
# abcd
qa > 0 and qb > 0 and qc > 0 and qd > 0 ->
{q, :region_abcd}
true ->
IO.puts("""
Something's wrong.
Q: #{inspect(q)}
AB: #{inspect({l_ab_u, l_ab_v})}
AC: #{inspect({l_ac_u, l_ac_v})}
AD: #{inspect({l_ad_u, l_ad_v})}
BC: #{inspect({l_bc_u, l_bc_v})}
BD: #{inspect({l_bd_u, l_bd_v})}
CD: #{inspect({l_cd_u, l_cd_v})}
ACB: #{inspect({tri_acb_u, tri_acb_v, tri_acb_w})}
ABD: #{inspect({tri_abd_u, tri_abd_v, tri_abd_w})}
BCD: #{inspect({tri_bcd_u, tri_bcd_v, tri_bcd_w})}
CAD: #{inspect({tri_cad_u, tri_cad_v, tri_cad_w})}
ABCD: #{inspect({qa, qb, qc, qd})}
""")
end
end
# iex> #IO.puts "Classify face voronoi region abd, near"
# iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
# iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
# iex> Tetra.classify_point_for_tetrahedron( tetra, {-0.0, 0.1, 0.1})
# :region_abd
# iex> #IO.puts "Classify face voronoi region abd, far"
# iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
# iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
# iex> Tetra.classify_point_for_tetrahedron( tetra, {-2.0, 0.1, 0.1})
# :region_abd
# iex> #IO.puts "Classify face voronoi region bcd, near"
# iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
# iex> sqrtthree = :math.sqrt(3.0)
# iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
# iex> Tetra.classify_point_for_tetrahedron( tetra, {sqrtthree,sqrtthree,sqrtthree})
# :region_bcd
# iex> #IO.puts "Classify face voronoi region bcd, far"
# iex> require ElixirRigidPhysics.Geometry.Tetrahedron, as: Tetra
# iex> tetra = Tetra.create( {0.0, 0.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
# iex> Tetra.classify_point_for_tetrahedron( tetra, {1.0, 1.0, 1.0})
# :region_bcd
end
|
lib/geometry/tetrahedron.ex
| 0.916035 | 0.586345 |
tetrahedron.ex
|
starcoder
|
defmodule Operate do
@moduledoc """
Load and run Operate programs (known as "tapes") encoded in Bitcoin SV
transactions.
Operate is a toolset to help developers build applications, games and services
on top of Bitcoin (SV). It lets you write functions, called "Ops", and enables
transactions to become small but powerful programs, capable of delivering new
classes of services layered over Bitcoin.
## Installation
The package is bundled with `libsecp256k1` NIF bindings. `libtool`, `automake`
and `autogen` are required in order for the package to compile.
The package can be installed by adding `operate` to your list of dependencies
in `mix.exs`.
**The most recent `luerl` package published on `hex.pm` is based on Lua 5.2
which may not be compatible with all Ops. It is recommended to override the
`luerl` dependency with the latest development version to benefit from Lua 5.3.**
def deps do
[
{:operate, "~> #{ Mix.Project.config[:version] }"},
{:luerl, github: "rvirding/luerl", branch: "develop", override: true}
]
end
## Quick start
The agent can be used straight away without starting any processes. This will
run without caching so should only be used for testing and kicking the tyres.
{:ok, tape} = Operate.load_tape(txid)
{:ok, tape} = Operate.run_tape(tape)
tape.result
See `load_tape/2` and `run_tape/2`.
## Process supervision
To enable caching the agent should be started as part of your applications
process supervision tree.
children = [
{Operate, [
cache: Operate.Cache.ConCache,
]},
{ConCache, [
name: :operate,
ttl_check_interval: :timer.minutes(1),
global_ttl: :timer.minutes(10),
touch_on_read: true
]}
]
Supervisor.start_link(children, strategy: :one_for_one)
## Configuration
Operate can be configured with the following options. Additionally, any of
these options can be passed to `load_tape/2` and `run_tape/2` to override
the configuration.
* `:tape_adapter` - The adapter module used to fetch the tape transaction.
* `:op_adaapter` - The adapter module used to fetch the tape's Ops.
* `:cache` - The cache module used for caching tapes and Ops.
* `:extensions` - A list of extension modules to extend the VM state.
* `:aliases` - A map of references to alias functions to alternative references.
* `:strict` - Set `false` to disable strict mode and ignore missing and/or erring functions.
The default configuration:
tape_adapter: Operate.Adapter.Bob,
op_adapter: Operate.Adapter.OpApi,
cache: Operate.Cache.NoCache,
extensions: [],
aliases: %{},
strict: true
"""
use Agent
alias Operate.BPU.Transaction
alias Operate.{Tape, VM}
@default_config %{
tape_adapter: Operate.Adapter.Bob,
op_adapter: Operate.Adapter.OpApi,
cache: Operate.Cache.NoCache,
extensions: [],
aliases: %{},
strict: true
}
@version Mix.Project.config[:version]
@doc """
Starts an Operate agent process with the given options merged with the default
config.
## Options
Refer to the list of accepted [configuration options](#module-configuration).
"""
@spec start_link(keyword) :: {:ok, pid}
def start_link(options \\ []) do
name = Keyword.get(options, :name, __MODULE__)
config = Enum.into(options, @default_config)
vm = VM.init(extensions: config.extensions)
Agent.start_link(fn -> {vm, config} end, name: name)
end
@doc """
Gets Operate's current VM state and config.
If a process has already been started then the existing VM and config
is returned. Alternatively a new VM state is initiated and the default config
returned. In either case any configuration option can be overridden.
Returns a tuple pair containing the VM state and a configuration map.
"""
@spec get_state(keyword) :: {VM.t, map}
def get_state(options \\ []) do
name = Keyword.get(options, :name, __MODULE__)
case Process.whereis(name) do
p when is_pid(p) ->
Agent.get(name, fn {vm, config} ->
config = Enum.into(options, config)
vm = Keyword.get(options, :vm, vm)
|> VM.extend(config.extensions)
{vm, config}
end)
nil ->
config = Enum.into(options, @default_config)
vm = VM.init(extensions: config.extensions)
{vm, config}
end
end
@doc """
Loads a tape from the given txid.
Fetchs the tape transaction output as well as all of the required functions,
and returns a `t:Operate.Tape.t/0` ready for execution in an `:ok` / `:error`
tuple pair.
If an Operate agent process has already been started the existing config will
be used. Otherwise a default config will be used. Any configuration option can
be overridden.
## Options
Refer to the list of accepted [configuration options](#module-configuration).
"""
@spec load_tape(String.t, keyword) :: {:ok, Tape.t} | {:error, String.t}
def load_tape(txid, options \\ []) do
[txid, index] = case String.split(txid, "/") do
[_txid, _index] = pair -> pair
[txid] -> [txid, nil]
end
{_vm, config} = get_state(options)
tape_adapter = adapter_with_opts(config.tape_adapter)
{cache, cache_opts} = adapter_with_opts(config.cache)
with {:ok, tx} <- cache.fetch_tx(txid, cache_opts, tape_adapter),
{:ok, tape} <- prep_tape(tx, index, config)
do
{:ok, tape}
else
error -> error
end
end
@doc """
As `load_tape/2`, but returns the tape or raises an exception.
"""
@spec load_tape!(String.t, keyword) :: Tape.t
def load_tape!(txid, options \\ []) do
case load_tape(txid, options) do
{:ok, tape} -> tape
{:error, error} -> raise error
end
end
@doc """
Loads a tape from the given query.
The expected format of the query will depend on the `Operate.Adapter` in use.
The transactions as well as all required functions are loaded an a list of
`t:Operate.Tape.t/0` are returned in an `:ok` / `:error` tuple pair.
If an Operate agent process has already been started the existing config will
be used. Otherwise a default config will be used. Any configuration option can
be overridden.
## Options
Refer to the list of accepted [configuration options](#module-configuration).
## Examples
For example, if using the default `Operate.Adapter.Bob` adapter, a Bitquery
can be provided. The `project` attribute cannot be used and unless otherwise
specified, `limit` defaults to `10`.
Operate.load_tapes_by(%{
"find" => %{
"out.tape.cell" => %{
"$elemMatch" => %{
"i" => 0,
"s" => "1PuQa7K62MiKCtssSLKy1kh56WWU7MtUR5"
}
}
}
})
"""
@spec load_tapes_by(map, keyword) :: {:ok, [Tape.t, ...]} | {:error, String.t}
def load_tapes_by(query, options \\ []) when is_map(query) do
{_vm, config} = get_state(options)
tape_adapter = adapter_with_opts(config.tape_adapter)
{cache, cache_opts} = adapter_with_opts(config.cache)
with {:ok, txns} <- cache.fetch_tx_by(query, cache_opts, tape_adapter),
{:ok, tapes} <- prep_tapes(txns, config)
do
{:ok, tapes}
else
error -> error
end
end
@doc """
As `load_tapes_by/2`, but returns the tapes or raises an exception.
"""
@spec load_tapes_by!(map, keyword) :: [Tape.t, ...]
def load_tapes_by!(query, options \\ []) do
case load_tapes_by(query, options) do
{:ok, tapes} -> tapes
{:error, error} -> raise error
end
end
@doc """
Runs the given tape executing each of the tape's cells and returns the
modified and complete `t:Operate.Tape.t/0` in an `:ok` / `:error` tuple pair.
If an Operate agent process has already been started the existing VM state and
config will be used. Otherwise a new state and default config will be used.
Any configuration option can be overridden.
## Options
The accepted options are:
* `:extensions` - A list of extension modules to extend the VM state.
* `:strict` - Strict mode (defaults `true`). Disable to force the tape to ignore missing and/or erroring cells.
* `:state` - Speficy a state which the tape begins execution with (defaults to `nil`).
* `:vm` - Pass an already initiated VM state in which to run the tape.
"""
@spec run_tape(Tape.t, keyword) :: {:ok, Tape.t} | {:error, Tape.t}
def run_tape(%Tape{} = tape, options \\ []) do
{vm, config} = get_state(options)
state = Map.get(config, :state, nil)
exec_opts = [state: state, strict: config.strict]
with {:ok, tape} <- Tape.run(tape, vm, exec_opts) do
{:ok, tape}
else
error -> error
end
end
@doc """
As `run_tape/2`, but returns the tape or raises an exception.
"""
@spec run_tape!(Tape.t, keyword) :: Tape.t
def run_tape!(%Tape{} = tape, options \\ []) do
case run_tape(tape, options) do
{:ok, tape} -> tape
{:error, tape} -> raise tape.error
end
end
@doc """
Prepare the tape from the given transaction. Optionally specify the output
index of the tape.
"""
@spec prep_tape(Transaction.t, integer | nil, map | keyword) ::
{:ok, Tape.t} |
{:error, String.t}
def prep_tape(tx, index \\ nil, options \\ [])
def prep_tape(%Transaction{} = tx, index, options) when is_list(options) do
{_vm, config} = get_state(options)
prep_tape(tx, index, config)
end
def prep_tape(%Transaction{} = tx, index, config) when is_map(config) do
op_adapter = adapter_with_opts(config.op_adapter)
{cache, cache_opts} = adapter_with_opts(config.cache)
aliases = Map.get(config, :aliases, %{})
with {:ok, tape} <- Tape.from_bpu(tx, index),
refs <- Tape.get_op_refs(tape, aliases),
{:ok, ops} <- cache.fetch_ops(refs, cache_opts, op_adapter),
tape <- Tape.set_cell_ops(tape, ops, aliases)
do
{:ok, tape}
else
error -> error
end
end
@doc """
As `prep_tape/3`, but returns the tape or raises an exception.
"""
@spec prep_tape!(Transaction.t, integer | nil, keyword) :: Tape.t
def prep_tape!(%Transaction{} = tx, index \\ nil, options \\ []) do
case prep_tape(tx, index, options) do
{:ok, tape} -> tape
{:error, error} -> raise error
end
end
@doc """
Prepare the tapes from the given list of transactions.
"""
@spec prep_tapes([Transaction.t, ...], map | keyword, list) ::
{:ok, [Tape.t, ...]} |
{:error, String.t}
def prep_tapes(txns, config \\ [], tapes \\ [])
def prep_tapes([], _config, tapes),
do: {:ok, Enum.reverse(tapes)}
def prep_tapes(txns, options, tapes) when is_list(options) do
{_vm, config} = get_state(options)
prep_tapes(txns, config, tapes)
end
def prep_tapes([%Transaction{} = tx | txns], config, tapes)
when is_map(config)
do
case prep_tape(tx, nil, config) do
{:ok, tape} -> prep_tapes(txns, config, [tape | tapes])
error ->
if config.strict, do: error, else: prep_tapes(txns, config, tapes)
end
end
@doc """
Returns the current version number.
"""
@spec version() :: String.t
def version, do: @version
# Private: Returns the adapter and options in a tuple pair
defp adapter_with_opts(mod) when is_atom(mod), do: {mod, []}
defp adapter_with_opts({mod, opts} = pair)
when is_atom(mod) and is_list(opts),
do: pair
defp adapter_with_opts([mod, opts])
when is_binary(mod) and is_list(opts),
do: {String.to_atom("Elixir." <> mod), opts}
defp adapter_with_opts([mod]) when is_binary(mod),
do: {String.to_atom("Elixir." <> mod), []}
end
|
lib/operate.ex
| 0.853501 | 0.664758 |
operate.ex
|
starcoder
|
defmodule Verk.Queue do
@moduledoc """
This module interacts with a queue
"""
alias Verk.Job
import Verk.Dsl
@doc """
Counts how many jobs are enqueued on a queue
"""
@spec count(binary) :: {:ok, integer} | {:error, atom | Redix.Error.t}
def count(queue) do
Redix.command(Verk.Redis, ["LLEN", queue_name(queue)])
end
@doc """
Counts how many jobs are enqueued on a queue, raising if there's an error
"""
@spec count!(binary) :: integer
def count!(queue) do
bangify(count(queue))
end
@doc """
Clears the `queue`
It will return `{:ok, true}` if the `queue` was cleared and `{:ok, false}` otherwise
An error tuple may be returned if Redis failed
"""
@spec clear(binary) :: {:ok, boolean} | {:error, Redix.Error.t}
def clear(queue) do
case Redix.command(Verk.Redis, ["DEL", queue_name(queue)]) do
{:ok, 0} -> {:ok, false}
{:ok, 1} -> {:ok, true}
{:error, error} -> {:error, error}
end
end
@doc """
Clears the `queue`, raising if there's an error
It will return `true` if the `queue` was cleared and `false` otherwise
"""
@spec clear!(binary) :: boolean
def clear!(queue) do
bangify(clear(queue))
end
@doc """
Lists enqueued jobs from `start` to `stop`
"""
@spec range(binary, integer, integer) :: {:ok, [Verk.Job.T]} | {:error, Redix.Error.t}
def range(queue, start \\ 0, stop \\ -1) do
case Redix.command(Verk.Redis, ["LRANGE", queue_name(queue), start, stop]) do
{:ok, jobs} -> {:ok, (for job <- jobs, do: Job.decode!(job))}
{:error, error} -> {:error, error}
end
end
@doc """
Lists enqueued jobs from `start` to `stop`, raising if there's an error
"""
@spec range!(binary, integer, integer) :: [Verk.Job.T]
def range!(queue, start \\ 0, stop \\ -1) do
bangify(range(queue, start, stop))
end
@doc """
Deletes the job from the `queue`
It returns `{:ok, true}` if the job was found and deleted
Otherwise it returns `{:ok, false}`
An error tuple may be returned if Redis failed
"""
@spec delete_job(binary, %Job{} | binary) :: {:ok, boolean} | {:error, Redix.Error.t}
def delete_job(queue, %Job{original_json: original_json}) do
delete_job(queue, original_json)
end
def delete_job(queue, original_json) do
case Redix.command(Verk.Redis, ["LREM", queue_name(queue), 1, original_json]) do
{:ok, 0} -> {:ok, false}
{:ok, 1} -> {:ok, true}
{:error, error} -> {:error, error}
end
end
@doc """
Delete job from the `queue`, raising if there's an error
It returns `true` if the job was found and delete
Otherwise it returns `false`
An error will be raised if Redis failed
"""
@spec delete_job!(binary, %Job{} | binary) :: boolean
def delete_job!(queue, %Job{original_json: original_json}) do
delete_job!(queue, original_json)
end
def delete_job!(queue, original_json) do
bangify(delete_job(queue, original_json))
end
defp queue_name(queue), do: "queue:#{queue}"
end
|
lib/verk/queue.ex
| 0.844313 | 0.493531 |
queue.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.