prompt
stringlengths 501
4.98M
| target
stringclasses 1
value | chunk_prompt
bool 1
class | kind
stringclasses 2
values | prob
float64 0.2
0.97
⌀ | path
stringlengths 10
394
⌀ | quality_prob
float64 0.4
0.99
⌀ | learning_prob
float64 0.15
1
⌀ | filename
stringlengths 4
221
⌀ |
---|---|---|---|---|---|---|---|---|
"""pyCrypto Crypto-related routines for oauth2client."""
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Util.asn1 import DerSequence
from oauth2client import _helpers
class PyCryptoVerifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey: OpenSSL.crypto.PKey (or equiv), The public key to verify
with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string or bytes, The message to verify. If string, will be
encoded to bytes as utf-8.
signature: string or bytes, The signature on the message.
Returns:
True if message was signed by the private key associated with the
public key that this object was constructed with.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
return PKCS1_v1_5.new(self._pubkey).verify(
SHA256.new(message), signature)
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
is expected to be an RSA key in PEM format.
Returns:
Verifier instance.
"""
if is_x509_cert:
key_pem = _helpers._to_bytes(key_pem)
pemLines = key_pem.replace(b' ', b'').split()
certDer = _helpers._urlsafe_b64decode(b''.join(pemLines[1:-1]))
certSeq = DerSequence()
certSeq.decode(certDer)
tbsSeq = DerSequence()
tbsSeq.decode(certSeq[0])
pubkey = RSA.importKey(tbsSeq[6])
else:
pubkey = RSA.importKey(key_pem)
return PyCryptoVerifier(pubkey)
class PyCryptoSigner(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey, OpenSSL.crypto.PKey (or equiv), The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: string, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
return PKCS1_v1_5.new(self._key).sign(SHA256.new(message))
@staticmethod
def from_string(key, password='notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM
files.
Returns:
Signer instance.
Raises:
NotImplementedError if the key isn't in PEM format.
"""
parsed_pem_key = _helpers._parse_pem_key(_helpers._to_bytes(key))
if parsed_pem_key:
pkey = RSA.importKey(parsed_pem_key)
else:
raise NotImplementedError(
'No key in PEM format was detected. This implementation '
'can only use the PyCrypto library for keys in PEM '
'format.')
return PyCryptoSigner(pkey)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/_pycrypto_crypt.py
| 0.915611 | 0.277477 |
_pycrypto_crypt.py
|
|
import base64
import functools
import inspect
import json
import logging
import os
import warnings
import six
from six.moves import urllib
logger = logging.getLogger(__name__)
POSITIONAL_WARNING = 'WARNING'
POSITIONAL_EXCEPTION = 'EXCEPTION'
POSITIONAL_IGNORE = 'IGNORE'
POSITIONAL_SET = frozenset([POSITIONAL_WARNING, POSITIONAL_EXCEPTION,
POSITIONAL_IGNORE])
positional_parameters_enforcement = POSITIONAL_WARNING
_SYM_LINK_MESSAGE = 'File: {0}: Is a symbolic link.'
_IS_DIR_MESSAGE = '{0}: Is a directory'
_MISSING_FILE_MESSAGE = 'Cannot access {0}: No such file or directory'
def positional(max_positional_args):
"""A decorator to declare that only the first N arguments my be positional.
This decorator makes it easy to support Python 3 style keyword-only
parameters. For example, in Python 3 it is possible to write::
def fn(pos1, *, kwonly1=None, kwonly1=None):
...
All named parameters after ``*`` must be a keyword::
fn(10, 'kw1', 'kw2') # Raises exception.
fn(10, kwonly1='kw1') # Ok.
Example
^^^^^^^
To define a function like above, do::
@positional(1)
def fn(pos1, kwonly1=None, kwonly2=None):
...
If no default value is provided to a keyword argument, it becomes a
required keyword argument::
@positional(0)
def fn(required_kw):
...
This must be called with the keyword parameter::
fn() # Raises exception.
fn(10) # Raises exception.
fn(required_kw=10) # Ok.
When defining instance or class methods always remember to account for
``self`` and ``cls``::
class MyClass(object):
@positional(2)
def my_method(self, pos1, kwonly1=None):
...
@classmethod
@positional(2)
def my_method(cls, pos1, kwonly1=None):
...
The positional decorator behavior is controlled by
``_helpers.positional_parameters_enforcement``, which may be set to
``POSITIONAL_EXCEPTION``, ``POSITIONAL_WARNING`` or
``POSITIONAL_IGNORE`` to raise an exception, log a warning, or do
nothing, respectively, if a declaration is violated.
Args:
max_positional_arguments: Maximum number of positional arguments. All
parameters after the this index must be
keyword only.
Returns:
A decorator that prevents using arguments after max_positional_args
from being used as positional parameters.
Raises:
TypeError: if a key-word only argument is provided as a positional
parameter, but only if
_helpers.positional_parameters_enforcement is set to
POSITIONAL_EXCEPTION.
"""
def positional_decorator(wrapped):
@functools.wraps(wrapped)
def positional_wrapper(*args, **kwargs):
if len(args) > max_positional_args:
plural_s = ''
if max_positional_args != 1:
plural_s = 's'
message = ('{function}() takes at most {args_max} positional '
'argument{plural} ({args_given} given)'.format(
function=wrapped.__name__,
args_max=max_positional_args,
args_given=len(args),
plural=plural_s))
if positional_parameters_enforcement == POSITIONAL_EXCEPTION:
raise TypeError(message)
elif positional_parameters_enforcement == POSITIONAL_WARNING:
logger.warning(message)
return wrapped(*args, **kwargs)
return positional_wrapper
if isinstance(max_positional_args, six.integer_types):
return positional_decorator
else:
args, _, _, defaults = inspect.getargspec(max_positional_args)
return positional(len(args) - len(defaults))(max_positional_args)
def scopes_to_string(scopes):
"""Converts scope value to a string.
If scopes is a string then it is simply passed through. If scopes is an
iterable then a string is returned that is all the individual scopes
concatenated with spaces.
Args:
scopes: string or iterable of strings, the scopes.
Returns:
The scopes formatted as a single string.
"""
if isinstance(scopes, six.string_types):
return scopes
else:
return ' '.join(scopes)
def string_to_scopes(scopes):
"""Converts stringifed scope value to a list.
If scopes is a list then it is simply passed through. If scopes is an
string then a list of each individual scope is returned.
Args:
scopes: a string or iterable of strings, the scopes.
Returns:
The scopes in a list.
"""
if not scopes:
return []
elif isinstance(scopes, six.string_types):
return scopes.split(' ')
else:
return scopes
def parse_unique_urlencoded(content):
"""Parses unique key-value parameters from urlencoded content.
Args:
content: string, URL-encoded key-value pairs.
Returns:
dict, The key-value pairs from ``content``.
Raises:
ValueError: if one of the keys is repeated.
"""
urlencoded_params = urllib.parse.parse_qs(content)
params = {}
for key, value in six.iteritems(urlencoded_params):
if len(value) != 1:
msg = ('URL-encoded content contains a repeated value:'
'%s -> %s' % (key, ', '.join(value)))
raise ValueError(msg)
params[key] = value[0]
return params
def update_query_params(uri, params):
"""Updates a URI with new query parameters.
If a given key from ``params`` is repeated in the ``uri``, then
the URI will be considered invalid and an error will occur.
If the URI is valid, then each value from ``params`` will
replace the corresponding value in the query parameters (if
it exists).
Args:
uri: string, A valid URI, with potential existing query parameters.
params: dict, A dictionary of query parameters.
Returns:
The same URI but with the new query parameters added.
"""
parts = urllib.parse.urlparse(uri)
query_params = parse_unique_urlencoded(parts.query)
query_params.update(params)
new_query = urllib.parse.urlencode(query_params)
new_parts = parts._replace(query=new_query)
return urllib.parse.urlunparse(new_parts)
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
return update_query_params(url, {name: value})
def validate_file(filename):
if os.path.islink(filename):
raise IOError(_SYM_LINK_MESSAGE.format(filename))
elif os.path.isdir(filename):
raise IOError(_IS_DIR_MESSAGE.format(filename))
elif not os.path.isfile(filename):
warnings.warn(_MISSING_FILE_MESSAGE.format(filename))
def _parse_pem_key(raw_key_input):
"""Identify and extract PEM keys.
Determines whether the given key is in the format of PEM key, and extracts
the relevant part of the key if it is.
Args:
raw_key_input: The contents of a private key file (either PEM or
PKCS12).
Returns:
string, The actual key if the contents are from a PEM file, or
else None.
"""
offset = raw_key_input.find(b'-----BEGIN ')
if offset != -1:
return raw_key_input[offset:]
def _json_encode(data):
return json.dumps(data, separators=(',', ':'))
def _to_bytes(value, encoding='ascii'):
"""Converts a string value to bytes, if necessary.
Unfortunately, ``six.b`` is insufficient for this task since in
Python2 it does not modify ``unicode`` objects.
Args:
value: The string/bytes value to be converted.
encoding: The encoding to use to convert unicode to bytes. Defaults
to "ascii", which will not allow any characters from ordinals
larger than 127. Other useful values are "latin-1", which
which will only allows byte ordinals (up to 255) and "utf-8",
which will encode any unicode that needs to be.
Returns:
The original value converted to bytes (if unicode) or as passed in
if it started out as bytes.
Raises:
ValueError if the value could not be converted to bytes.
"""
result = (value.encode(encoding)
if isinstance(value, six.text_type) else value)
if isinstance(result, six.binary_type):
return result
else:
raise ValueError('{0!r} could not be converted to bytes'.format(value))
def _from_bytes(value):
"""Converts bytes to a string value, if necessary.
Args:
value: The string/bytes value to be converted.
Returns:
The original value converted to unicode (if bytes) or as passed in
if it started out as unicode.
Raises:
ValueError if the value could not be converted to unicode.
"""
result = (value.decode('utf-8')
if isinstance(value, six.binary_type) else value)
if isinstance(result, six.text_type):
return result
else:
raise ValueError(
'{0!r} could not be converted to unicode'.format(value))
def _urlsafe_b64encode(raw_bytes):
raw_bytes = _to_bytes(raw_bytes, encoding='utf-8')
return base64.urlsafe_b64encode(raw_bytes).rstrip(b'=')
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = _to_bytes(b64string)
padded = b64string + b'=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/_helpers.py
| 0.70202 | 0.182535 |
_helpers.py
|
|
from pyasn1.codec.der import decoder
from pyasn1_modules import pem
from pyasn1_modules.rfc2459 import Certificate
from pyasn1_modules.rfc5208 import PrivateKeyInfo
import rsa
import six
from oauth2client import _helpers
_PKCS12_ERROR = r"""\
PKCS12 format is not supported by the RSA library.
Either install PyOpenSSL, or please convert .p12 format
to .pem format:
$ cat key.p12 | \
> openssl pkcs12 -nodes -nocerts -passin pass:notasecret | \
> openssl rsa > key.pem
"""
_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
_PKCS1_MARKER = ('-----BEGIN RSA PRIVATE KEY-----',
'-----END RSA PRIVATE KEY-----')
_PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',
'-----END PRIVATE KEY-----')
_PKCS8_SPEC = PrivateKeyInfo()
def _bit_list_to_bytes(bit_list):
"""Converts an iterable of 1's and 0's to bytes.
Combines the list 8 at a time, treating each group of 8 bits
as a single byte.
"""
num_bits = len(bit_list)
byte_vals = bytearray()
for start in six.moves.xrange(0, num_bits, 8):
curr_bits = bit_list[start:start + 8]
char_val = sum(val * digit
for val, digit in zip(_POW2, curr_bits))
byte_vals.append(char_val)
return bytes(byte_vals)
class RsaVerifier(object):
"""Verifies the signature on a message.
Args:
pubkey: rsa.key.PublicKey (or equiv), The public key to verify with.
"""
def __init__(self, pubkey):
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string or bytes, The message to verify. If string, will be
encoded to bytes as utf-8.
signature: string or bytes, The signature on the message. If
string, will be encoded to bytes as utf-8.
Returns:
True if message was signed by the private key associated with the
public key that this object was constructed with.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
try:
return rsa.pkcs1.verify(message, signature, self._pubkey)
except (ValueError, rsa.pkcs1.VerificationError):
return False
@classmethod
def from_string(cls, key_pem, is_x509_cert):
"""Construct an RsaVerifier instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
is expected to be an RSA key in PEM format.
Returns:
RsaVerifier instance.
Raises:
ValueError: if the key_pem can't be parsed. In either case, error
will begin with 'No PEM start marker'. If
``is_x509_cert`` is True, will fail to find the
"-----BEGIN CERTIFICATE-----" error, otherwise fails
to find "-----BEGIN RSA PUBLIC KEY-----".
"""
key_pem = _helpers._to_bytes(key_pem)
if is_x509_cert:
der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')
asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
if remaining != b'':
raise ValueError('Unused bytes', remaining)
cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
else:
pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')
return cls(pubkey)
class RsaSigner(object):
"""Signs messages with a private key.
Args:
pkey: rsa.key.PrivateKey (or equiv), The private key to sign with.
"""
def __init__(self, pkey):
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
return rsa.pkcs1.sign(message, self._key, 'SHA-256')
@classmethod
def from_string(cls, key, password='notasecret'):
"""Construct an RsaSigner instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM
files.
Returns:
RsaSigner instance.
Raises:
ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
PEM format.
"""
key = _helpers._from_bytes(key) # pem expects str in Py3
marker_id, key_bytes = pem.readPemBlocksFromFile(
six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
if marker_id == 0:
pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,
format='DER')
elif marker_id == 1:
key_info, remaining = decoder.decode(
key_bytes, asn1Spec=_PKCS8_SPEC)
if remaining != b'':
raise ValueError('Unused bytes', remaining)
pkey_info = key_info.getComponentByName('privateKey')
pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),
format='DER')
else:
raise ValueError('No key could be detected.')
return cls(pkey)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/_pure_python_crypt.py
| 0.769773 | 0.219108 |
_pure_python_crypt.py
|
|
import base64
import copy
import datetime
import json
import time
import oauth2client
from oauth2client import _helpers
from oauth2client import client
from oauth2client import crypt
from oauth2client import transport
_PASSWORD_DEFAULT = 'notasecret'
_PKCS12_KEY = '_private_key_pkcs12'
_PKCS12_ERROR = r"""
This library only implements PKCS#12 support via the pyOpenSSL library.
Either install pyOpenSSL, or please convert the .p12 file
to .pem format:
$ cat key.p12 | \
> openssl pkcs12 -nodes -nocerts -passin pass:notasecret | \
> openssl rsa > key.pem
"""
class ServiceAccountCredentials(client.AssertionCredentials):
"""Service Account credential for OAuth 2.0 signed JWT grants.
Supports
* JSON keyfile (typically contains a PKCS8 key stored as
PEM text)
* ``.p12`` key (stores PKCS12 key and certificate)
Makes an assertion to server using a signed JWT assertion in exchange
for an access token.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens.
Args:
service_account_email: string, The email associated with the
service account.
signer: ``crypt.Signer``, A signer which can be used to sign content.
scopes: List or string, (Optional) Scopes to use when acquiring
an access token.
private_key_id: string, (Optional) Private key identifier. Typically
only used with a JSON keyfile. Can be sent in the
header of a JWT token assertion.
client_id: string, (Optional) Client ID for the project that owns the
service account.
user_agent: string, (Optional) User agent to use when sending
request.
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
revoke_uri: string, URI for revoke endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
kwargs: dict, Extra key-value pairs (both strings) to send in the
payload body when making an assertion.
"""
MAX_TOKEN_LIFETIME_SECS = 3600
"""Max lifetime of the token (one hour, in seconds)."""
NON_SERIALIZED_MEMBERS = (
frozenset(['_signer']) |
client.AssertionCredentials.NON_SERIALIZED_MEMBERS)
"""Members that aren't serialized when object is converted to JSON."""
# Can be over-ridden by factory constructors. Used for
# serialization/deserialization purposes.
_private_key_pkcs8_pem = None
_private_key_pkcs12 = None
_private_key_password = None
def __init__(self,
service_account_email,
signer,
scopes='',
private_key_id=None,
client_id=None,
user_agent=None,
token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
**kwargs):
super(ServiceAccountCredentials, self).__init__(
None, user_agent=user_agent, token_uri=token_uri,
revoke_uri=revoke_uri)
self._service_account_email = service_account_email
self._signer = signer
self._scopes = _helpers.scopes_to_string(scopes)
self._private_key_id = private_key_id
self.client_id = client_id
self._user_agent = user_agent
self._kwargs = kwargs
def _to_json(self, strip, to_serialize=None):
"""Utility function that creates JSON repr. of a credentials object.
Over-ride is needed since PKCS#12 keys will not in general be JSON
serializable.
Args:
strip: array, An array of names of members to exclude from the
JSON.
to_serialize: dict, (Optional) The properties for this object
that will be serialized. This allows callers to
modify before serializing.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
if to_serialize is None:
to_serialize = copy.copy(self.__dict__)
pkcs12_val = to_serialize.get(_PKCS12_KEY)
if pkcs12_val is not None:
to_serialize[_PKCS12_KEY] = base64.b64encode(pkcs12_val)
return super(ServiceAccountCredentials, self)._to_json(
strip, to_serialize=to_serialize)
@classmethod
def _from_parsed_json_keyfile(cls, keyfile_dict, scopes,
token_uri=None, revoke_uri=None):
"""Helper for factory constructors from JSON keyfile.
Args:
keyfile_dict: dict-like object, The parsed dictionary-like object
containing the contents of the JSON keyfile.
scopes: List or string, Scopes to use when acquiring an
access token.
token_uri: string, URI for OAuth 2.0 provider token endpoint.
If unset and not present in keyfile_dict, defaults
to Google's endpoints.
revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.
If unset and not present in keyfile_dict, defaults
to Google's endpoints.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile contents.
Raises:
ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.
KeyError, if one of the expected keys is not present in
the keyfile.
"""
creds_type = keyfile_dict.get('type')
if creds_type != client.SERVICE_ACCOUNT:
raise ValueError('Unexpected credentials type', creds_type,
'Expected', client.SERVICE_ACCOUNT)
service_account_email = keyfile_dict['client_email']
private_key_pkcs8_pem = keyfile_dict['private_key']
private_key_id = keyfile_dict['private_key_id']
client_id = keyfile_dict['client_id']
if not token_uri:
token_uri = keyfile_dict.get('token_uri',
oauth2client.GOOGLE_TOKEN_URI)
if not revoke_uri:
revoke_uri = keyfile_dict.get('revoke_uri',
oauth2client.GOOGLE_REVOKE_URI)
signer = crypt.Signer.from_string(private_key_pkcs8_pem)
credentials = cls(service_account_email, signer, scopes=scopes,
private_key_id=private_key_id,
client_id=client_id, token_uri=token_uri,
revoke_uri=revoke_uri)
credentials._private_key_pkcs8_pem = private_key_pkcs8_pem
return credentials
@classmethod
def from_json_keyfile_name(cls, filename, scopes='',
token_uri=None, revoke_uri=None):
"""Factory constructor from JSON keyfile by name.
Args:
filename: string, The location of the keyfile.
scopes: List or string, (Optional) Scopes to use when acquiring an
access token.
token_uri: string, URI for OAuth 2.0 provider token endpoint.
If unset and not present in the key file, defaults
to Google's endpoints.
revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.
If unset and not present in the key file, defaults
to Google's endpoints.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile.
Raises:
ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.
KeyError, if one of the expected keys is not present in
the keyfile.
"""
with open(filename, 'r') as file_obj:
client_credentials = json.load(file_obj)
return cls._from_parsed_json_keyfile(client_credentials, scopes,
token_uri=token_uri,
revoke_uri=revoke_uri)
@classmethod
def from_json_keyfile_dict(cls, keyfile_dict, scopes='',
token_uri=None, revoke_uri=None):
"""Factory constructor from parsed JSON keyfile.
Args:
keyfile_dict: dict-like object, The parsed dictionary-like object
containing the contents of the JSON keyfile.
scopes: List or string, (Optional) Scopes to use when acquiring an
access token.
token_uri: string, URI for OAuth 2.0 provider token endpoint.
If unset and not present in keyfile_dict, defaults
to Google's endpoints.
revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.
If unset and not present in keyfile_dict, defaults
to Google's endpoints.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile.
Raises:
ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.
KeyError, if one of the expected keys is not present in
the keyfile.
"""
return cls._from_parsed_json_keyfile(keyfile_dict, scopes,
token_uri=token_uri,
revoke_uri=revoke_uri)
@classmethod
def _from_p12_keyfile_contents(cls, service_account_email,
private_key_pkcs12,
private_key_password=None, scopes='',
token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
"""Factory constructor from JSON keyfile.
Args:
service_account_email: string, The email associated with the
service account.
private_key_pkcs12: string, The contents of a PKCS#12 keyfile.
private_key_password: string, (Optional) Password for PKCS#12
private key. Defaults to ``notasecret``.
scopes: List or string, (Optional) Scopes to use when acquiring an
access token.
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0
provider can be used.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile.
Raises:
NotImplementedError if pyOpenSSL is not installed / not the
active crypto library.
"""
if private_key_password is None:
private_key_password = _PASSWORD_DEFAULT
if crypt.Signer is not crypt.OpenSSLSigner:
raise NotImplementedError(_PKCS12_ERROR)
signer = crypt.Signer.from_string(private_key_pkcs12,
private_key_password)
credentials = cls(service_account_email, signer, scopes=scopes,
token_uri=token_uri, revoke_uri=revoke_uri)
credentials._private_key_pkcs12 = private_key_pkcs12
credentials._private_key_password = private_key_password
return credentials
@classmethod
def from_p12_keyfile(cls, service_account_email, filename,
private_key_password=None, scopes='',
token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
"""Factory constructor from JSON keyfile.
Args:
service_account_email: string, The email associated with the
service account.
filename: string, The location of the PKCS#12 keyfile.
private_key_password: string, (Optional) Password for PKCS#12
private key. Defaults to ``notasecret``.
scopes: List or string, (Optional) Scopes to use when acquiring an
access token.
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0
provider can be used.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile.
Raises:
NotImplementedError if pyOpenSSL is not installed / not the
active crypto library.
"""
with open(filename, 'rb') as file_obj:
private_key_pkcs12 = file_obj.read()
return cls._from_p12_keyfile_contents(
service_account_email, private_key_pkcs12,
private_key_password=private_key_password, scopes=scopes,
token_uri=token_uri, revoke_uri=revoke_uri)
@classmethod
def from_p12_keyfile_buffer(cls, service_account_email, file_buffer,
private_key_password=None, scopes='',
token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
"""Factory constructor from JSON keyfile.
Args:
service_account_email: string, The email associated with the
service account.
file_buffer: stream, A buffer that implements ``read()``
and contains the PKCS#12 key contents.
private_key_password: string, (Optional) Password for PKCS#12
private key. Defaults to ``notasecret``.
scopes: List or string, (Optional) Scopes to use when acquiring an
access token.
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0
provider can be used.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile.
Raises:
NotImplementedError if pyOpenSSL is not installed / not the
active crypto library.
"""
private_key_pkcs12 = file_buffer.read()
return cls._from_p12_keyfile_contents(
service_account_email, private_key_pkcs12,
private_key_password=private_key_password, scopes=scopes,
token_uri=token_uri, revoke_uri=revoke_uri)
def _generate_assertion(self):
"""Generate the assertion that will be used in the request."""
now = int(time.time())
payload = {
'aud': self.token_uri,
'scope': self._scopes,
'iat': now,
'exp': now + self.MAX_TOKEN_LIFETIME_SECS,
'iss': self._service_account_email,
}
payload.update(self._kwargs)
return crypt.make_signed_jwt(self._signer, payload,
key_id=self._private_key_id)
def sign_blob(self, blob):
"""Cryptographically sign a blob (of bytes).
Implements abstract method
:meth:`oauth2client.client.AssertionCredentials.sign_blob`.
Args:
blob: bytes, Message to be signed.
Returns:
tuple, A pair of the private key ID used to sign the blob and
the signed contents.
"""
return self._private_key_id, self._signer.sign(blob)
@property
def service_account_email(self):
"""Get the email for the current service account.
Returns:
string, The email associated with the service account.
"""
return self._service_account_email
@property
def serialization_data(self):
# NOTE: This is only useful for JSON keyfile.
return {
'type': 'service_account',
'client_email': self._service_account_email,
'private_key_id': self._private_key_id,
'private_key': self._private_key_pkcs8_pem,
'client_id': self.client_id,
}
@classmethod
def from_json(cls, json_data):
"""Deserialize a JSON-serialized instance.
Inverse to :meth:`to_json`.
Args:
json_data: dict or string, Serialized JSON (as a string or an
already parsed dictionary) representing a credential.
Returns:
ServiceAccountCredentials from the serialized data.
"""
if not isinstance(json_data, dict):
json_data = json.loads(_helpers._from_bytes(json_data))
private_key_pkcs8_pem = None
pkcs12_val = json_data.get(_PKCS12_KEY)
password = None
if pkcs12_val is None:
private_key_pkcs8_pem = json_data['_private_key_pkcs8_pem']
signer = crypt.Signer.from_string(private_key_pkcs8_pem)
else:
# NOTE: This assumes that private_key_pkcs8_pem is not also
# in the serialized data. This would be very incorrect
# state.
pkcs12_val = base64.b64decode(pkcs12_val)
password = json_data['_private_key_password']
signer = crypt.Signer.from_string(pkcs12_val, password)
credentials = cls(
json_data['_service_account_email'],
signer,
scopes=json_data['_scopes'],
private_key_id=json_data['_private_key_id'],
client_id=json_data['client_id'],
user_agent=json_data['_user_agent'],
**json_data['_kwargs']
)
if private_key_pkcs8_pem is not None:
credentials._private_key_pkcs8_pem = private_key_pkcs8_pem
if pkcs12_val is not None:
credentials._private_key_pkcs12 = pkcs12_val
if password is not None:
credentials._private_key_password = password
credentials.invalid = json_data['invalid']
credentials.access_token = json_data['access_token']
credentials.token_uri = json_data['token_uri']
credentials.revoke_uri = json_data['revoke_uri']
token_expiry = json_data.get('token_expiry', None)
if token_expiry is not None:
credentials.token_expiry = datetime.datetime.strptime(
token_expiry, client.EXPIRY_FORMAT)
return credentials
def create_scoped_required(self):
return not self._scopes
def create_scoped(self, scopes):
result = self.__class__(self._service_account_email,
self._signer,
scopes=scopes,
private_key_id=self._private_key_id,
client_id=self.client_id,
user_agent=self._user_agent,
**self._kwargs)
result.token_uri = self.token_uri
result.revoke_uri = self.revoke_uri
result._private_key_pkcs8_pem = self._private_key_pkcs8_pem
result._private_key_pkcs12 = self._private_key_pkcs12
result._private_key_password = self._private_key_password
return result
def create_with_claims(self, claims):
"""Create credentials that specify additional claims.
Args:
claims: dict, key-value pairs for claims.
Returns:
ServiceAccountCredentials, a copy of the current service account
credentials with updated claims to use when obtaining access
tokens.
"""
new_kwargs = dict(self._kwargs)
new_kwargs.update(claims)
result = self.__class__(self._service_account_email,
self._signer,
scopes=self._scopes,
private_key_id=self._private_key_id,
client_id=self.client_id,
user_agent=self._user_agent,
**new_kwargs)
result.token_uri = self.token_uri
result.revoke_uri = self.revoke_uri
result._private_key_pkcs8_pem = self._private_key_pkcs8_pem
result._private_key_pkcs12 = self._private_key_pkcs12
result._private_key_password = self._private_key_password
return result
def create_delegated(self, sub):
"""Create credentials that act as domain-wide delegation of authority.
Use the ``sub`` parameter as the subject to delegate on behalf of
that user.
For example::
>>> account_sub = 'foo@email.com'
>>> delegate_creds = creds.create_delegated(account_sub)
Args:
sub: string, An email address that this service account will
act on behalf of (via domain-wide delegation).
Returns:
ServiceAccountCredentials, a copy of the current service account
updated to act on behalf of ``sub``.
"""
return self.create_with_claims({'sub': sub})
def _datetime_to_secs(utc_time):
# TODO(issue 298): use time_delta.total_seconds()
# time_delta.total_seconds() not supported in Python 2.6
epoch = datetime.datetime(1970, 1, 1)
time_delta = utc_time - epoch
return time_delta.days * 86400 + time_delta.seconds
class _JWTAccessCredentials(ServiceAccountCredentials):
"""Self signed JWT credentials.
Makes an assertion to server using a self signed JWT from service account
credentials. These credentials do NOT use OAuth 2.0 and instead
authenticate directly.
"""
_MAX_TOKEN_LIFETIME_SECS = 3600
"""Max lifetime of the token (one hour, in seconds)."""
def __init__(self,
service_account_email,
signer,
scopes=None,
private_key_id=None,
client_id=None,
user_agent=None,
token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
additional_claims=None):
if additional_claims is None:
additional_claims = {}
super(_JWTAccessCredentials, self).__init__(
service_account_email,
signer,
private_key_id=private_key_id,
client_id=client_id,
user_agent=user_agent,
token_uri=token_uri,
revoke_uri=revoke_uri,
**additional_claims)
def authorize(self, http):
"""Authorize an httplib2.Http instance with a JWT assertion.
Unless specified, the 'aud' of the assertion will be the base
uri of the request.
Args:
http: An instance of ``httplib2.Http`` or something that acts
like it.
Returns:
A modified instance of http that was passed in.
Example::
h = httplib2.Http()
h = credentials.authorize(h)
"""
transport.wrap_http_for_jwt_access(self, http)
return http
def get_access_token(self, http=None, additional_claims=None):
"""Create a signed jwt.
Args:
http: unused
additional_claims: dict, additional claims to add to
the payload of the JWT.
Returns:
An AccessTokenInfo with the signed jwt
"""
if additional_claims is None:
if self.access_token is None or self.access_token_expired:
self.refresh(None)
return client.AccessTokenInfo(
access_token=self.access_token, expires_in=self._expires_in())
else:
# Create a 1 time token
token, unused_expiry = self._create_token(additional_claims)
return client.AccessTokenInfo(
access_token=token, expires_in=self._MAX_TOKEN_LIFETIME_SECS)
def revoke(self, http):
"""Cannot revoke JWTAccessCredentials tokens."""
pass
def create_scoped_required(self):
# JWTAccessCredentials are unscoped by definition
return True
def create_scoped(self, scopes, token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
# Returns an OAuth2 credentials with the given scope
result = ServiceAccountCredentials(self._service_account_email,
self._signer,
scopes=scopes,
private_key_id=self._private_key_id,
client_id=self.client_id,
user_agent=self._user_agent,
token_uri=token_uri,
revoke_uri=revoke_uri,
**self._kwargs)
if self._private_key_pkcs8_pem is not None:
result._private_key_pkcs8_pem = self._private_key_pkcs8_pem
if self._private_key_pkcs12 is not None:
result._private_key_pkcs12 = self._private_key_pkcs12
if self._private_key_password is not None:
result._private_key_password = self._private_key_password
return result
def refresh(self, http):
"""Refreshes the access_token.
The HTTP object is unused since no request needs to be made to
get a new token, it can just be generated locally.
Args:
http: unused HTTP object
"""
self._refresh(None)
def _refresh(self, http):
"""Refreshes the access_token.
Args:
http: unused HTTP object
"""
self.access_token, self.token_expiry = self._create_token()
def _create_token(self, additional_claims=None):
now = client._UTCNOW()
lifetime = datetime.timedelta(seconds=self._MAX_TOKEN_LIFETIME_SECS)
expiry = now + lifetime
payload = {
'iat': _datetime_to_secs(now),
'exp': _datetime_to_secs(expiry),
'iss': self._service_account_email,
'sub': self._service_account_email
}
payload.update(self._kwargs)
if additional_claims is not None:
payload.update(additional_claims)
jwt = crypt.make_signed_jwt(self._signer, payload,
key_id=self._private_key_id)
return jwt.decode('ascii'), expiry
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/service_account.py
| 0.720958 | 0.157105 |
service_account.py
|
|
"""OpenSSL Crypto-related routines for oauth2client."""
from OpenSSL import crypto
from oauth2client import _helpers
class OpenSSLVerifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey: OpenSSL.crypto.PKey, The public key to verify with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string or bytes, The message to verify. If string, will be
encoded to bytes as utf-8.
signature: string or bytes, The signature on the message. If string,
will be encoded to bytes as utf-8.
Returns:
True if message was signed by the private key associated with the
public key that this object was constructed with.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
signature = _helpers._to_bytes(signature, encoding='utf-8')
try:
crypto.verify(self._pubkey, signature, message, 'sha256')
return True
except crypto.Error:
return False
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
is expected to be an RSA key in PEM format.
Returns:
Verifier instance.
Raises:
OpenSSL.crypto.Error: if the key_pem can't be parsed.
"""
key_pem = _helpers._to_bytes(key_pem)
if is_x509_cert:
pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem)
else:
pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)
return OpenSSLVerifier(pubkey)
class OpenSSLSigner(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey: OpenSSL.crypto.PKey (or equiv), The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
return crypto.sign(self._key, message, 'sha256')
@staticmethod
def from_string(key, password=b'notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in PKCS12 or PEM format.
password: string, password for the private key file.
Returns:
Signer instance.
Raises:
OpenSSL.crypto.Error if the key can't be parsed.
"""
key = _helpers._to_bytes(key)
parsed_pem_key = _helpers._parse_pem_key(key)
if parsed_pem_key:
pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, parsed_pem_key)
else:
password = _helpers._to_bytes(password, encoding='utf-8')
pkey = crypto.load_pkcs12(key, password).get_privatekey()
return OpenSSLSigner(pkey)
def pkcs12_key_as_pem(private_key_bytes, private_key_password):
"""Convert the contents of a PKCS#12 key to PEM using pyOpenSSL.
Args:
private_key_bytes: Bytes. PKCS#12 key in DER format.
private_key_password: String. Password for PKCS#12 key.
Returns:
String. PEM contents of ``private_key_bytes``.
"""
private_key_password = _helpers._to_bytes(private_key_password)
pkcs12 = crypto.load_pkcs12(private_key_bytes, private_key_password)
return crypto.dump_privatekey(crypto.FILETYPE_PEM,
pkcs12.get_privatekey())
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/_openssl_crypt.py
| 0.906875 | 0.263718 |
_openssl_crypt.py
|
|
import threading
import keyring
from oauth2client import client
class Storage(client.Storage):
"""Store and retrieve a single credential to and from the keyring.
To use this module you must have the keyring module installed. See
<http://pypi.python.org/pypi/keyring/>. This is an optional module and is
not installed with oauth2client by default because it does not work on all
the platforms that oauth2client supports, such as Google App Engine.
The keyring module <http://pypi.python.org/pypi/keyring/> is a
cross-platform library for access the keyring capabilities of the local
system. The user will be prompted for their keyring password when this
module is used, and the manner in which the user is prompted will vary per
platform.
Usage::
from oauth2client import keyring_storage
s = keyring_storage.Storage('name_of_application', 'user1')
credentials = s.get()
"""
def __init__(self, service_name, user_name):
"""Constructor.
Args:
service_name: string, The name of the service under which the
credentials are stored.
user_name: string, The name of the user to store credentials for.
"""
super(Storage, self).__init__(lock=threading.Lock())
self._service_name = service_name
self._user_name = user_name
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
credentials = None
content = keyring.get_password(self._service_name, self._user_name)
if content is not None:
try:
credentials = client.Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials
def locked_put(self, credentials):
"""Write Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
keyring.set_password(self._service_name, self._user_name,
credentials.to_json())
def locked_delete(self):
"""Delete Credentials file.
Args:
credentials: Credentials, the credentials to store.
"""
keyring.set_password(self._service_name, self._user_name, '')
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/contrib/keyring_storage.py
| 0.654343 | 0.254961 |
keyring_storage.py
|
|
import logging
import warnings
from six.moves import http_client
from oauth2client import client
from oauth2client.contrib import _metadata
logger = logging.getLogger(__name__)
_SCOPES_WARNING = """\
You have requested explicit scopes to be used with a GCE service account.
Using this argument will have no effect on the actual scopes for tokens
requested. These scopes are set at VM instance creation time and
can't be overridden in the request.
"""
class AppAssertionCredentials(client.AssertionCredentials):
"""Credentials object for Compute Engine Assertion Grants
This object will allow a Compute Engine instance to identify itself to
Google and other OAuth 2.0 servers that can verify assertions. It can be
used for the purpose of accessing data stored under an account assigned to
the Compute Engine instance itself.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens.
Note that :attr:`service_account_email` and :attr:`scopes`
will both return None until the credentials have been refreshed.
To check whether credentials have previously been refreshed use
:attr:`invalid`.
"""
def __init__(self, email=None, *args, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
email: an email that specifies the service account to use.
Only necessary if using custom service accounts
(see https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances#createdefaultserviceaccount).
"""
if 'scopes' in kwargs:
warnings.warn(_SCOPES_WARNING)
kwargs['scopes'] = None
# Assertion type is no longer used, but still in the
# parent class signature.
super(AppAssertionCredentials, self).__init__(None, *args, **kwargs)
self.service_account_email = email
self.scopes = None
self.invalid = True
@classmethod
def from_json(cls, json_data):
raise NotImplementedError(
'Cannot serialize credentials for GCE service accounts.')
def to_json(self):
raise NotImplementedError(
'Cannot serialize credentials for GCE service accounts.')
def retrieve_scopes(self, http):
"""Retrieves the canonical list of scopes for this access token.
Overrides client.Credentials.retrieve_scopes. Fetches scopes info
from the metadata server.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
Returns:
A set of strings containing the canonical list of scopes.
"""
self._retrieve_info(http)
return self.scopes
def _retrieve_info(self, http):
"""Retrieves service account info for invalid credentials.
Args:
http: an object to be used to make HTTP requests.
"""
if self.invalid:
info = _metadata.get_service_account_info(
http,
service_account=self.service_account_email or 'default')
self.invalid = False
self.service_account_email = info['email']
self.scopes = info['scopes']
def _refresh(self, http):
"""Refreshes the access token.
Skip all the storage hoops and just refresh using the API.
Args:
http: an object to be used to make HTTP requests.
Raises:
HttpAccessTokenRefreshError: When the refresh fails.
"""
try:
self._retrieve_info(http)
self.access_token, self.token_expiry = _metadata.get_token(
http, service_account=self.service_account_email)
except http_client.HTTPException as err:
raise client.HttpAccessTokenRefreshError(str(err))
@property
def serialization_data(self):
raise NotImplementedError(
'Cannot serialize credentials for GCE service accounts.')
def create_scoped_required(self):
return False
def sign_blob(self, blob):
"""Cryptographically sign a blob (of bytes).
This method is provided to support a common interface, but
the actual key used for a Google Compute Engine service account
is not available, so it can't be used to sign content.
Args:
blob: bytes, Message to be signed.
Raises:
NotImplementedError, always.
"""
raise NotImplementedError(
'Compute Engine service accounts cannot sign blobs')
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/contrib/gce.py
| 0.82029 | 0.336849 |
gce.py
|
|
import logging
from google.appengine.ext import ndb
from oauth2client import client
NDB_KEY = ndb.Key
"""Key constant used by :mod:`oauth2client.contrib.appengine`."""
NDB_MODEL = ndb.Model
"""Model constant used by :mod:`oauth2client.contrib.appengine`."""
_LOGGER = logging.getLogger(__name__)
class SiteXsrfSecretKeyNDB(ndb.Model):
"""NDB Model for storage for the sites XSRF secret key.
Since this model uses the same kind as SiteXsrfSecretKey, it can be
used interchangeably. This simply provides an NDB model for interacting
with the same data the DB model interacts with.
There should only be one instance stored of this model, the one used
for the site.
"""
secret = ndb.StringProperty()
@classmethod
def _get_kind(cls):
"""Return the kind name for this class."""
return 'SiteXsrfSecretKey'
class FlowNDBProperty(ndb.PickleProperty):
"""App Engine NDB datastore Property for Flow.
Serves the same purpose as the DB FlowProperty, but for NDB models.
Since PickleProperty inherits from BlobProperty, the underlying
representation of the data in the datastore will be the same as in the
DB case.
Utility property that allows easy storage and retrieval of an
oauth2client.Flow
"""
def _validate(self, value):
"""Validates a value as a proper Flow object.
Args:
value: A value to be set on the property.
Raises:
TypeError if the value is not an instance of Flow.
"""
_LOGGER.info('validate: Got type %s', type(value))
if value is not None and not isinstance(value, client.Flow):
raise TypeError(
'Property {0} must be convertible to a flow '
'instance; received: {1}.'.format(self._name, value))
class CredentialsNDBProperty(ndb.BlobProperty):
"""App Engine NDB datastore Property for Credentials.
Serves the same purpose as the DB CredentialsProperty, but for NDB
models. Since CredentialsProperty stores data as a blob and this
inherits from BlobProperty, the data in the datastore will be the same
as in the DB case.
Utility property that allows easy storage and retrieval of Credentials
and subclasses.
"""
def _validate(self, value):
"""Validates a value as a proper credentials object.
Args:
value: A value to be set on the property.
Raises:
TypeError if the value is not an instance of Credentials.
"""
_LOGGER.info('validate: Got type %s', type(value))
if value is not None and not isinstance(value, client.Credentials):
raise TypeError(
'Property {0} must be convertible to a credentials '
'instance; received: {1}.'.format(self._name, value))
def _to_base_type(self, value):
"""Converts our validated value to a JSON serialized string.
Args:
value: A value to be set in the datastore.
Returns:
A JSON serialized version of the credential, else '' if value
is None.
"""
if value is None:
return ''
else:
return value.to_json()
def _from_base_type(self, value):
"""Converts our stored JSON string back to the desired type.
Args:
value: A value from the datastore to be converted to the
desired type.
Returns:
A deserialized Credentials (or subclass) object, else None if
the value can't be parsed.
"""
if not value:
return None
try:
# Uses the from_json method of the implied class of value
credentials = client.Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
class CredentialsNDBModel(ndb.Model):
"""NDB Model for storage of OAuth 2.0 Credentials
Since this model uses the same kind as CredentialsModel and has a
property which can serialize and deserialize Credentials correctly, it
can be used interchangeably with a CredentialsModel to access, insert
and delete the same entities. This simply provides an NDB model for
interacting with the same data the DB model interacts with.
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsNDBProperty()
@classmethod
def _get_kind(cls):
"""Return the kind name for this class."""
return 'CredentialsModel'
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/contrib/_appengine_ndb.py
| 0.859723 | 0.383988 |
_appengine_ndb.py
|
|
import base64
import binascii
import hmac
import time
from oauth2client import _helpers
# Delimiter character
DELIMITER = b':'
# 1 hour in seconds
DEFAULT_TIMEOUT_SECS = 60 * 60
@_helpers.positional(2)
def generate_token(key, user_id, action_id='', when=None):
"""Generates a URL-safe token for the given user, action, time tuple.
Args:
key: secret key to use.
user_id: the user ID of the authenticated user.
action_id: a string identifier of the action they requested
authorization for.
when: the time in seconds since the epoch at which the user was
authorized for this action. If not set the current time is used.
Returns:
A string XSRF protection token.
"""
digester = hmac.new(_helpers._to_bytes(key, encoding='utf-8'))
digester.update(_helpers._to_bytes(str(user_id), encoding='utf-8'))
digester.update(DELIMITER)
digester.update(_helpers._to_bytes(action_id, encoding='utf-8'))
digester.update(DELIMITER)
when = _helpers._to_bytes(str(when or int(time.time())), encoding='utf-8')
digester.update(when)
digest = digester.digest()
token = base64.urlsafe_b64encode(digest + DELIMITER + when)
return token
@_helpers.positional(3)
def validate_token(key, token, user_id, action_id="", current_time=None):
"""Validates that the given token authorizes the user for the action.
Tokens are invalid if the time of issue is too old or if the token
does not match what generateToken outputs (i.e. the token was forged).
Args:
key: secret key to use.
token: a string of the token generated by generateToken.
user_id: the user ID of the authenticated user.
action_id: a string identifier of the action they requested
authorization for.
Returns:
A boolean - True if the user is authorized for the action, False
otherwise.
"""
if not token:
return False
try:
decoded = base64.urlsafe_b64decode(token)
token_time = int(decoded.split(DELIMITER)[-1])
except (TypeError, ValueError, binascii.Error):
return False
if current_time is None:
current_time = time.time()
# If the token is too old it's not valid.
if current_time - token_time > DEFAULT_TIMEOUT_SECS:
return False
# The given token should match the generated one with the same time.
expected_token = generate_token(key, user_id, action_id=action_id,
when=token_time)
if len(token) != len(expected_token):
return False
# Perform constant time comparison to avoid timing attacks
different = 0
for x, y in zip(bytearray(token), bytearray(expected_token)):
different |= x ^ y
return not different
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/contrib/xsrfutil.py
| 0.750278 | 0.227598 |
xsrfutil.py
|
|
from __future__ import absolute_import
import sqlalchemy.types
from oauth2client import client
class CredentialsType(sqlalchemy.types.PickleType):
"""Type representing credentials.
Alias for :class:`sqlalchemy.types.PickleType`.
"""
class Storage(client.Storage):
"""Store and retrieve a single credential to and from SQLAlchemy.
This helper presumes the Credentials
have been stored as a Credentials column
on a db model class.
"""
def __init__(self, session, model_class, key_name,
key_value, property_name):
"""Constructor for Storage.
Args:
session: An instance of :class:`sqlalchemy.orm.Session`.
model_class: SQLAlchemy declarative mapping.
key_name: string, key name for the entity that has the credentials
key_value: key value for the entity that has the credentials
property_name: A string indicating which property on the
``model_class`` to store the credentials.
This property must be a
:class:`CredentialsType` column.
"""
super(Storage, self).__init__()
self.session = session
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def locked_get(self):
"""Retrieve stored credential.
Returns:
A :class:`oauth2client.Credentials` instance or `None`.
"""
filters = {self.key_name: self.key_value}
query = self.session.query(self.model_class).filter_by(**filters)
entity = query.first()
if entity:
credential = getattr(entity, self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
return credential
else:
return None
def locked_put(self, credentials):
"""Write a credentials to the SQLAlchemy datastore.
Args:
credentials: :class:`oauth2client.Credentials`
"""
filters = {self.key_name: self.key_value}
query = self.session.query(self.model_class).filter_by(**filters)
entity = query.first()
if not entity:
entity = self.model_class(**filters)
setattr(entity, self.property_name, credentials)
self.session.add(entity)
def locked_delete(self):
"""Delete credentials from the SQLAlchemy datastore."""
filters = {self.key_name: self.key_value}
self.session.query(self.model_class).filter_by(**filters).delete()
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/contrib/sqlalchemy.py
| 0.898722 | 0.297228 |
sqlalchemy.py
|
|
from oauth2client import client
class DjangoORMStorage(client.Storage):
"""Store and retrieve a single credential to and from the Django datastore.
This Storage helper presumes the Credentials
have been stored as a CredentialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: string, fully qualified name of db.Model model class.
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the
credentials.
property_name: string, name of the property that is an
CredentialsProperty.
"""
super(DjangoORMStorage, self).__init__()
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def locked_get(self):
"""Retrieve stored credential from the Django ORM.
Returns:
oauth2client.Credentials retrieved from the Django ORM, associated
with the ``model``, ``key_value``->``key_name`` pair used to query
for the model, and ``property_name`` identifying the
``CredentialsProperty`` field, all of which are defined in the
constructor for this Storage object.
"""
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if getattr(credential, 'set_store', None) is not None:
credential.set_store(self)
return credential
else:
return None
def locked_put(self, credentials):
"""Write a Credentials to the Django datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity, _ = self.model_class.objects.get_or_create(
**{self.key_name: self.key_value})
setattr(entity, self.property_name, credentials)
entity.save()
def locked_delete(self):
"""Delete Credentials from the datastore."""
query = {self.key_name: self.key_value}
self.model_class.objects.filter(**query).delete()
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/contrib/django_util/storage.py
| 0.824179 | 0.41052 |
storage.py
|
|
import os
import boto
from boto.compat import json
from boto.exception import BotoClientError
from boto.endpoints import BotoEndpointResolver
from boto.endpoints import StaticEndpointBuilder
_endpoints_cache = {}
def load_endpoint_json(path):
"""
Loads a given JSON file & returns it.
:param path: The path to the JSON file
:type path: string
:returns: The loaded data
"""
return _load_json_file(path)
def _load_json_file(path):
"""
Loads a given JSON file & returns it.
:param path: The path to the JSON file
:type path: string
:returns: The loaded data
"""
with open(path, 'r') as endpoints_file:
return json.load(endpoints_file)
def merge_endpoints(defaults, additions):
"""
Given an existing set of endpoint data, this will deep-update it with
any similarly structured data in the additions.
:param defaults: The existing endpoints data
:type defaults: dict
:param defaults: The additional endpoints data
:type defaults: dict
:returns: The modified endpoints data
:rtype: dict
"""
# We can't just do an ``defaults.update(...)`` here, as that could
# *overwrite* regions if present in both.
# We'll iterate instead, essentially doing a deeper merge.
for service, region_info in additions.items():
# Set the default, if not present, to an empty dict.
defaults.setdefault(service, {})
defaults[service].update(region_info)
return defaults
def load_regions():
"""
Actually load the region/endpoint information from the JSON files.
By default, this loads from the default included ``boto/endpoints.json``
file.
Users can override/extend this by supplying either a ``BOTO_ENDPOINTS``
environment variable or a ``endpoints_path`` config variable, either of
which should be an absolute path to the user's JSON file.
:returns: The endpoints data
:rtype: dict
"""
# Load the defaults first.
endpoints = _load_builtin_endpoints()
additional_path = None
# Try the ENV var. If not, check the config file.
if os.environ.get('BOTO_ENDPOINTS'):
additional_path = os.environ['BOTO_ENDPOINTS']
elif boto.config.get('Boto', 'endpoints_path'):
additional_path = boto.config.get('Boto', 'endpoints_path')
# If there's a file provided, we'll load it & additively merge it into
# the endpoints.
if additional_path:
additional = load_endpoint_json(additional_path)
endpoints = merge_endpoints(endpoints, additional)
return endpoints
def _load_builtin_endpoints(_cache=_endpoints_cache):
"""Loads the builtin endpoints in the legacy format."""
# If there's a cached response, return it
if _cache:
return _cache
# Load the endpoints file
endpoints = _load_json_file(boto.ENDPOINTS_PATH)
# Build the endpoints into the legacy format
resolver = BotoEndpointResolver(endpoints)
builder = StaticEndpointBuilder(resolver)
endpoints = builder.build_static_endpoints()
# Cache the endpoints and then return them
_cache.update(endpoints)
return _cache
def get_regions(service_name, region_cls=None, connection_cls=None):
"""
Given a service name (like ``ec2``), returns a list of ``RegionInfo``
objects for that service.
This leverages the ``endpoints.json`` file (+ optional user overrides) to
configure/construct all the objects.
:param service_name: The name of the service to construct the ``RegionInfo``
objects for. Ex: ``ec2``, ``s3``, ``sns``, etc.
:type service_name: string
:param region_cls: (Optional) The class to use when constructing. By
default, this is ``RegionInfo``.
:type region_cls: class
:param connection_cls: (Optional) The connection class for the
``RegionInfo`` object. Providing this allows the ``connect`` method on
the ``RegionInfo`` to work. Default is ``None`` (no connection).
:type connection_cls: class
:returns: A list of configured ``RegionInfo`` objects
:rtype: list
"""
endpoints = load_regions()
if service_name not in endpoints:
raise BotoClientError(
"Service '%s' not found in endpoints." % service_name
)
if region_cls is None:
region_cls = RegionInfo
region_objs = []
for region_name, endpoint in endpoints.get(service_name, {}).items():
region_objs.append(
region_cls(
name=region_name,
endpoint=endpoint,
connection_cls=connection_cls
)
)
return region_objs
def connect(service_name, region_name, region_cls=None,
connection_cls=None, **kw_params):
"""Create a connection class for a given service in a given region.
:param service_name: The name of the service to construct the
``RegionInfo`` object for, e.g. ``ec2``, ``s3``, etc.
:type service_name: str
:param region_name: The name of the region to connect to, e.g.
``us-west-2``, ``eu-central-1``, etc.
:type region_name: str
:param region_cls: (Optional) The class to use when constructing. By
default, this is ``RegionInfo``.
:type region_cls: class
:param connection_cls: (Optional) The connection class for the
``RegionInfo`` object. Providing this allows the ``connect`` method on
the ``RegionInfo`` to work. Default is ``None`` (no connection).
:type connection_cls: class
:returns: A configured connection class.
"""
if region_cls is None:
region_cls = RegionInfo
region = _get_region(service_name, region_name, region_cls, connection_cls)
if region is None and _use_endpoint_heuristics():
region = _get_region_with_heuristics(
service_name, region_name, region_cls, connection_cls
)
if region is None:
return None
return region.connect(**kw_params)
def _get_region(service_name, region_name, region_cls=None,
connection_cls=None):
"""Finds the region by searching through the known regions."""
for region in get_regions(service_name, region_cls, connection_cls):
if region.name == region_name:
return region
return None
def _get_region_with_heuristics(service_name, region_name, region_cls=None,
connection_cls=None):
"""Finds the region using known regions and heuristics."""
endpoints = load_endpoint_json(boto.ENDPOINTS_PATH)
resolver = BotoEndpointResolver(endpoints)
hostname = resolver.resolve_hostname(service_name, region_name)
return region_cls(
name=region_name,
endpoint=hostname,
connection_cls=connection_cls
)
def _use_endpoint_heuristics():
env_var = os.environ.get('BOTO_USE_ENDPOINT_HEURISTICS', 'false').lower()
config_var = boto.config.getbool('Boto', 'use_endpoint_heuristics', False)
return env_var == 'true' or config_var
class RegionInfo(object):
"""
Represents an AWS Region
"""
def __init__(self, connection=None, name=None, endpoint=None,
connection_cls=None):
self.connection = connection
self.name = name
self.endpoint = endpoint
self.connection_cls = connection_cls
def __repr__(self):
return 'RegionInfo:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'regionName':
self.name = value
elif name == 'regionEndpoint':
self.endpoint = value
else:
setattr(self, name, value)
def connect(self, **kw_params):
"""
Connect to this Region's endpoint. Returns an connection
object pointing to the endpoint associated with this region.
You may pass any of the arguments accepted by the connection
class's constructor as keyword arguments and they will be
passed along to the connection object.
:rtype: Connection object
:return: The connection to this regions endpoint
"""
if self.connection_cls:
return self.connection_cls(region=self, **kw_params)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/regioninfo.py
| 0.795142 | 0.376795 |
regioninfo.py
|
|
from boto.s3.user import User
class ResultSet(list):
"""
The ResultSet is used to pass results back from the Amazon services
to the client. It is light wrapper around Python's :py:class:`list` class,
with some additional methods for parsing XML results from AWS.
Because I don't really want any dependencies on external libraries,
I'm using the standard SAX parser that comes with Python. The good news is
that it's quite fast and efficient but it makes some things rather
difficult.
You can pass in, as the marker_elem parameter, a list of tuples.
Each tuple contains a string as the first element which represents
the XML element that the resultset needs to be on the lookout for
and a Python class as the second element of the tuple. Each time the
specified element is found in the XML, a new instance of the class
will be created and popped onto the stack.
:ivar str next_token: A hash used to assist in paging through very long
result sets. In most cases, passing this value to certain methods
will give you another 'page' of results.
"""
def __init__(self, marker_elem=None):
list.__init__(self)
if isinstance(marker_elem, list):
self.markers = marker_elem
else:
self.markers = []
self.marker = None
self.key_marker = None
self.next_marker = None # avail when delimiter used
self.next_key_marker = None
self.next_upload_id_marker = None
self.next_version_id_marker = None
self.next_generation_marker = None
self.version_id_marker = None
self.is_truncated = False
self.next_token = None
self.status = True
def startElement(self, name, attrs, connection):
for t in self.markers:
if name == t[0]:
obj = t[1](connection)
self.append(obj)
return obj
if name == 'Owner':
# Makes owner available for get_service and
# perhaps other lists where not handled by
# another element.
self.owner = User()
return self.owner
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'IsTruncated':
self.is_truncated = self.to_boolean(value)
elif name == 'Marker':
self.marker = value
elif name == 'KeyMarker':
self.key_marker = value
elif name == 'NextMarker':
self.next_marker = value
elif name == 'NextKeyMarker':
self.next_key_marker = value
elif name == 'VersionIdMarker':
self.version_id_marker = value
elif name == 'NextVersionIdMarker':
self.next_version_id_marker = value
elif name == 'NextGenerationMarker':
self.next_generation_marker = value
elif name == 'UploadIdMarker':
self.upload_id_marker = value
elif name == 'NextUploadIdMarker':
self.next_upload_id_marker = value
elif name == 'Bucket':
self.bucket = value
elif name == 'MaxUploads':
self.max_uploads = int(value)
elif name == 'MaxItems':
self.max_items = int(value)
elif name == 'Prefix':
self.prefix = value
elif name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'ItemName':
self.append(value)
elif name == 'NextToken':
self.next_token = value
elif name == 'nextToken':
self.next_token = value
# Code exists which expects nextToken to be available, so we
# set it here to remain backwards-compatibile.
self.nextToken = value
elif name == 'BoxUsage':
try:
connection.box_usage += float(value)
except:
pass
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
else:
setattr(self, name, value)
class BooleanResult(object):
def __init__(self, marker_elem=None):
self.status = True
self.request_id = None
self.box_usage = None
def __repr__(self):
if self.status:
return 'True'
else:
return 'False'
def __nonzero__(self):
return self.status
def startElement(self, name, attrs, connection):
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
elif name == 'RequestId':
self.request_id = value
elif name == 'requestId':
self.request_id = value
elif name == 'BoxUsage':
self.request_id = value
else:
setattr(self, name, value)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/resultset.py
| 0.764892 | 0.405096 |
resultset.py
|
|
import boto.vendored.regions.regions as _regions
class _CompatEndpointResolver(_regions.EndpointResolver):
"""Endpoint resolver which handles boto2 compatibility concerns.
This is NOT intended for external use whatsoever.
"""
_DEFAULT_SERVICE_RENAMES = {
# The botocore resolver is based on endpoint prefix.
# These don't always sync up to the name that boto2 uses.
# A mapping can be provided that handles the mapping between
# "service names" and endpoint prefixes.
'awslambda': 'lambda',
'cloudwatch': 'monitoring',
'ses': 'email',
'ec2containerservice': 'ecs',
'configservice': 'config',
}
def __init__(self, endpoint_data, service_rename_map=None):
"""
:type endpoint_data: dict
:param endpoint_data: Regions and endpoints data in the same format
as is used by botocore / boto3.
:type service_rename_map: dict
:param service_rename_map: A mapping of boto2 service name to
endpoint prefix.
"""
super(_CompatEndpointResolver, self).__init__(endpoint_data)
if service_rename_map is None:
service_rename_map = self._DEFAULT_SERVICE_RENAMES
# Mapping of boto2 service name to endpoint prefix
self._endpoint_prefix_map = service_rename_map
# Mapping of endpoint prefix to boto2 service name
self._service_name_map = dict(
(v, k) for k, v in service_rename_map.items())
def get_available_endpoints(self, service_name, partition_name='aws',
allow_non_regional=False):
endpoint_prefix = self._endpoint_prefix(service_name)
return super(_CompatEndpointResolver, self).get_available_endpoints(
endpoint_prefix, partition_name, allow_non_regional)
def get_all_available_regions(self, service_name):
"""Retrieve every region across partitions for a service."""
regions = set()
endpoint_prefix = self._endpoint_prefix(service_name)
# Get every region for every partition in the new endpoint format
for partition_name in self.get_available_partitions():
if self._is_global_service(service_name, partition_name):
# Global services are available in every region in the
# partition in which they are considered global.
partition = self._get_partition_data(partition_name)
regions.update(partition['regions'].keys())
continue
else:
regions.update(
self.get_available_endpoints(
endpoint_prefix, partition_name)
)
return list(regions)
def construct_endpoint(self, service_name, region_name=None):
endpoint_prefix = self._endpoint_prefix(service_name)
return super(_CompatEndpointResolver, self).construct_endpoint(
endpoint_prefix, region_name)
def get_available_services(self):
"""Get a list of all the available services in the endpoints file(s)"""
services = set()
for partition in self._endpoint_data['partitions']:
services.update(partition['services'].keys())
return [self._service_name(s) for s in services]
def _is_global_service(self, service_name, partition_name='aws'):
"""Determines whether a service uses a global endpoint.
In theory a service can be 'global' in one partition but regional in
another. In practice, each service is all global or all regional.
"""
endpoint_prefix = self._endpoint_prefix(service_name)
partition = self._get_partition_data(partition_name)
service = partition['services'].get(endpoint_prefix, {})
return 'partitionEndpoint' in service
def _get_partition_data(self, partition_name):
"""Get partition information for a particular partition.
This should NOT be used to get service endpoint data because it only
loads from the new endpoint format. It should only be used for
partition metadata and partition specific service metadata.
:type partition_name: str
:param partition_name: The name of the partition to search for.
:returns: Partition info from the new endpoints format.
:rtype: dict or None
"""
for partition in self._endpoint_data['partitions']:
if partition['partition'] == partition_name:
return partition
raise ValueError(
"Could not find partition data for: %s" % partition_name)
def _endpoint_prefix(self, service_name):
"""Given a boto2 service name, get the endpoint prefix."""
return self._endpoint_prefix_map.get(service_name, service_name)
def _service_name(self, endpoint_prefix):
"""Given an endpoint prefix, get the boto2 service name."""
return self._service_name_map.get(endpoint_prefix, endpoint_prefix)
class BotoEndpointResolver(object):
"""Resolves endpoint hostnames for AWS services.
This is NOT intended for external use.
"""
def __init__(self, endpoint_data, service_rename_map=None):
"""
:type endpoint_data: dict
:param endpoint_data: Regions and endpoints data in the same format
as is used by botocore / boto3.
:type service_rename_map: dict
:param service_rename_map: A mapping of boto2 service name to
endpoint prefix.
"""
self._resolver = _CompatEndpointResolver(
endpoint_data, service_rename_map)
def resolve_hostname(self, service_name, region_name):
"""Resolve the hostname for a service in a particular region.
:type service_name: str
:param service_name: The service to look up.
:type region_name: str
:param region_name: The region to find the endpoint for.
:return: The hostname for the given service in the given region.
"""
endpoint = self._resolver.construct_endpoint(service_name, region_name)
if endpoint is None:
return None
return endpoint.get('sslCommonName', endpoint['hostname'])
def get_all_available_regions(self, service_name):
"""Get all the regions a service is available in.
:type service_name: str
:param service_name: The service to look up.
:rtype: list of str
:return: A list of all the regions the given service is available in.
"""
return self._resolver.get_all_available_regions(service_name)
def get_available_services(self):
"""Get all the services supported by the endpoint data.
:rtype: list of str
:return: A list of all the services explicitly contained within the
endpoint data provided during instantiation.
"""
return self._resolver.get_available_services()
class StaticEndpointBuilder(object):
"""Builds a static mapping of endpoints in the legacy format."""
def __init__(self, resolver):
"""
:type resolver: BotoEndpointResolver
:param resolver: An endpoint resolver.
"""
self._resolver = resolver
def build_static_endpoints(self, service_names=None):
"""Build a set of static endpoints in the legacy boto2 format.
:param service_names: The names of the services to build. They must
use the names that boto2 uses, not boto3, e.g "ec2containerservice"
and not "ecs". If no service names are provided, all available
services will be built.
:return: A dict consisting of::
{"service": {"region": "full.host.name"}}
"""
if service_names is None:
service_names = self._resolver.get_available_services()
static_endpoints = {}
for name in service_names:
endpoints_for_service = self._build_endpoints_for_service(name)
if endpoints_for_service:
# It's possible that when we try to build endpoints for
# services we get an empty hash. In that case we don't
# bother adding it to the final list of static endpoints.
static_endpoints[name] = endpoints_for_service
self._handle_special_cases(static_endpoints)
return static_endpoints
def _build_endpoints_for_service(self, service_name):
# Given a service name, 'ec2', build a dict of
# 'region' -> 'hostname'
endpoints = {}
regions = self._resolver.get_all_available_regions(service_name)
for region_name in regions:
endpoints[region_name] = self._resolver.resolve_hostname(
service_name, region_name)
return endpoints
def _handle_special_cases(self, static_endpoints):
# cloudsearchdomain endpoints use the exact same set of endpoints as
# cloudsearch.
if 'cloudsearch' in static_endpoints:
cloudsearch_endpoints = static_endpoints['cloudsearch']
static_endpoints['cloudsearchdomain'] = cloudsearch_endpoints
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/endpoints.py
| 0.762778 | 0.231017 |
endpoints.py
|
|
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.kms import exceptions
from boto.compat import six
import base64
class KMSConnection(AWSQueryConnection):
"""
AWS Key Management Service
AWS Key Management Service (KMS) is an encryption and key
management web service. This guide describes the KMS actions that
you can call programmatically. For general information about KMS,
see (need an address here). For the KMS developer guide, see (need
address here).
AWS provides SDKs that consist of libraries and sample code for
various programming languages and platforms (Java, Ruby, .Net,
iOS, Android, etc.). The SDKs provide a convenient way to create
programmatic access to KMS and AWS. For example, the SDKs take
care of tasks such as signing requests (see below), managing
errors, and retrying requests automatically. For more information
about the AWS SDKs, including how to download and install them,
see `Tools for Amazon Web Services`_.
We recommend that you use the AWS SDKs to make programmatic API
calls to KMS. However, you can also use the KMS Query API to make
to make direct calls to the KMS web service.
**Signing Requests**
Requests must be signed by using an access key ID and a secret
access key. We strongly recommend that you do not use your AWS
account access key ID and secret key for everyday work with KMS.
Instead, use the access key ID and secret access key for an IAM
user, or you can use the AWS Security Token Service to generate
temporary security credentials that you can use to sign requests.
All KMS operations require `Signature Version 4`_.
**Recording API Requests**
KMS supports AWS CloudTrail, a service that records AWS API calls
and related events for your AWS account and delivers them to an
Amazon S3 bucket that you specify. By using the information
collected by CloudTrail, you can determine what requests were made
to KMS, who made the request, when it was made, and so on. To
learn more about CloudTrail, including how to turn it on and find
your log files, see the `AWS CloudTrail User Guide`_
**Additional Resources**
For more information about credentials and request signing, see
the following:
+ `AWS Security Credentials`_. This topic provides general
information about the types of credentials used for accessing AWS.
+ `AWS Security Token Service`_. This guide describes how to
create and use temporary security credentials.
+ `Signing AWS API Requests`_. This set of topics walks you
through the process of signing a request using an access key ID
and a secret access key.
"""
APIVersion = "2014-11-01"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "kms.us-east-1.amazonaws.com"
ServiceName = "KMS"
TargetPrefix = "TrentService"
ResponseError = JSONResponseError
_faults = {
"InvalidGrantTokenException": exceptions.InvalidGrantTokenException,
"DisabledException": exceptions.DisabledException,
"LimitExceededException": exceptions.LimitExceededException,
"DependencyTimeoutException": exceptions.DependencyTimeoutException,
"InvalidMarkerException": exceptions.InvalidMarkerException,
"AlreadyExistsException": exceptions.AlreadyExistsException,
"InvalidCiphertextException": exceptions.InvalidCiphertextException,
"KeyUnavailableException": exceptions.KeyUnavailableException,
"InvalidAliasNameException": exceptions.InvalidAliasNameException,
"UnsupportedOperationException": exceptions.UnsupportedOperationException,
"InvalidArnException": exceptions.InvalidArnException,
"KMSInternalException": exceptions.KMSInternalException,
"InvalidKeyUsageException": exceptions.InvalidKeyUsageException,
"MalformedPolicyDocumentException": exceptions.MalformedPolicyDocumentException,
"NotFoundException": exceptions.NotFoundException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(KMSConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_alias(self, alias_name, target_key_id):
"""
Creates a display name for a customer master key. An alias can
be used to identify a key and should be unique. The console
enforces a one-to-one mapping between the alias and a key. An
alias name can contain only alphanumeric characters, forward
slashes (/), underscores (_), and dashes (-). An alias must
start with the word "alias" followed by a forward slash
(alias/). An alias that begins with "aws" after the forward
slash (alias/aws...) is reserved by Amazon Web Services (AWS).
:type alias_name: string
:param alias_name: String that contains the display name. Aliases that
begin with AWS are reserved.
:type target_key_id: string
:param target_key_id: An identifier of the key for which you are
creating the alias. This value cannot be another alias.
"""
params = {
'AliasName': alias_name,
'TargetKeyId': target_key_id,
}
return self.make_request(action='CreateAlias',
body=json.dumps(params))
def create_grant(self, key_id, grantee_principal,
retiring_principal=None, operations=None,
constraints=None, grant_tokens=None):
"""
Adds a grant to a key to specify who can access the key and
under what conditions. Grants are alternate permission
mechanisms to key policies. If absent, access to the key is
evaluated based on IAM policies attached to the user. By
default, grants do not expire. Grants can be listed, retired,
or revoked as indicated by the following APIs. Typically, when
you are finished using a grant, you retire it. When you want
to end a grant immediately, revoke it. For more information
about grants, see `Grants`_.
#. ListGrants
#. RetireGrant
#. RevokeGrant
:type key_id: string
:param key_id: A unique key identifier for a customer master key. This
value can be a globally unique identifier, an ARN, or an alias.
:type grantee_principal: string
:param grantee_principal: Principal given permission by the grant to
use the key identified by the `keyId` parameter.
:type retiring_principal: string
:param retiring_principal: Principal given permission to retire the
grant. For more information, see RetireGrant.
:type operations: list
:param operations: List of operations permitted by the grant. This can
be any combination of one or more of the following values:
#. Decrypt
#. Encrypt
#. GenerateDataKey
#. GenerateDataKeyWithoutPlaintext
#. ReEncryptFrom
#. ReEncryptTo
#. CreateGrant
:type constraints: dict
:param constraints: Specifies the conditions under which the actions
specified by the `Operations` parameter are allowed.
:type grant_tokens: list
:param grant_tokens: List of grant tokens.
"""
params = {
'KeyId': key_id,
'GranteePrincipal': grantee_principal,
}
if retiring_principal is not None:
params['RetiringPrincipal'] = retiring_principal
if operations is not None:
params['Operations'] = operations
if constraints is not None:
params['Constraints'] = constraints
if grant_tokens is not None:
params['GrantTokens'] = grant_tokens
return self.make_request(action='CreateGrant',
body=json.dumps(params))
def create_key(self, policy=None, description=None, key_usage=None):
"""
Creates a customer master key. Customer master keys can be
used to encrypt small amounts of data (less than 4K) directly,
but they are most commonly used to encrypt or envelope data
keys that are then used to encrypt customer data. For more
information about data keys, see GenerateDataKey and
GenerateDataKeyWithoutPlaintext.
:type policy: string
:param policy: Policy to be attached to the key. This is required and
delegates back to the account. The key is the root of trust.
:type description: string
:param description: Description of the key. We recommend that you
choose a description that helps your customer decide whether the
key is appropriate for a task.
:type key_usage: string
:param key_usage: Specifies the intended use of the key. Currently this
defaults to ENCRYPT/DECRYPT, and only symmetric encryption and
decryption are supported.
"""
params = {}
if policy is not None:
params['Policy'] = policy
if description is not None:
params['Description'] = description
if key_usage is not None:
params['KeyUsage'] = key_usage
return self.make_request(action='CreateKey',
body=json.dumps(params))
def decrypt(self, ciphertext_blob, encryption_context=None,
grant_tokens=None):
"""
Decrypts ciphertext. Ciphertext is plaintext that has been
previously encrypted by using the Encrypt function.
:type ciphertext_blob: blob
:param ciphertext_blob: Ciphertext including metadata.
:type encryption_context: map
:param encryption_context: The encryption context. If this was
specified in the Encrypt function, it must be specified here or the
decryption operation will fail. For more information, see
`Encryption Context`_.
:type grant_tokens: list
:param grant_tokens: A list of grant tokens that represent grants which
can be used to provide long term permissions to perform decryption.
"""
if not isinstance(ciphertext_blob, six.binary_type):
raise TypeError(
"Value of argument ``ciphertext_blob`` "
"must be of type %s." % six.binary_type)
ciphertext_blob = base64.b64encode(ciphertext_blob)
params = {'CiphertextBlob': ciphertext_blob.decode('utf-8'), }
if encryption_context is not None:
params['EncryptionContext'] = encryption_context
if grant_tokens is not None:
params['GrantTokens'] = grant_tokens
response = self.make_request(action='Decrypt',
body=json.dumps(params))
if response.get('Plaintext') is not None:
response['Plaintext'] = base64.b64decode(
response['Plaintext'].encode('utf-8'))
return response
def delete_alias(self, alias_name):
"""
Deletes the specified alias.
:type alias_name: string
:param alias_name: The alias to be deleted.
"""
params = {'AliasName': alias_name, }
return self.make_request(action='DeleteAlias',
body=json.dumps(params))
def describe_key(self, key_id):
"""
Provides detailed information about the specified customer
master key.
:type key_id: string
:param key_id: Unique identifier of the customer master key to be
described. This can be an ARN, an alias, or a globally unique
identifier.
"""
params = {'KeyId': key_id, }
return self.make_request(action='DescribeKey',
body=json.dumps(params))
def disable_key(self, key_id):
"""
Marks a key as disabled, thereby preventing its use.
:type key_id: string
:param key_id: Unique identifier of the customer master key to be
disabled. This can be an ARN, an alias, or a globally unique
identifier.
"""
params = {'KeyId': key_id, }
return self.make_request(action='DisableKey',
body=json.dumps(params))
def disable_key_rotation(self, key_id):
"""
Disables rotation of the specified key.
:type key_id: string
:param key_id: Unique identifier of the customer master key for which
rotation is to be disabled. This can be an ARN, an alias, or a
globally unique identifier.
"""
params = {'KeyId': key_id, }
return self.make_request(action='DisableKeyRotation',
body=json.dumps(params))
def enable_key(self, key_id):
"""
Marks a key as enabled, thereby permitting its use. You can
have up to 25 enabled keys at one time.
:type key_id: string
:param key_id: Unique identifier of the customer master key to be
enabled. This can be an ARN, an alias, or a globally unique
identifier.
"""
params = {'KeyId': key_id, }
return self.make_request(action='EnableKey',
body=json.dumps(params))
def enable_key_rotation(self, key_id):
"""
Enables rotation of the specified customer master key.
:type key_id: string
:param key_id: Unique identifier of the customer master key for which
rotation is to be enabled. This can be an ARN, an alias, or a
globally unique identifier.
"""
params = {'KeyId': key_id, }
return self.make_request(action='EnableKeyRotation',
body=json.dumps(params))
def encrypt(self, key_id, plaintext, encryption_context=None,
grant_tokens=None):
"""
Encrypts plaintext into ciphertext by using a customer master
key.
:type key_id: string
:param key_id: Unique identifier of the customer master. This can be an
ARN, an alias, or the Key ID.
:type plaintext: blob
:param plaintext: Data to be encrypted.
:type encryption_context: map
:param encryption_context: Name:value pair that specifies the
encryption context to be used for authenticated encryption. For
more information, see `Authenticated Encryption`_.
:type grant_tokens: list
:param grant_tokens: A list of grant tokens that represent grants which
can be used to provide long term permissions to perform encryption.
"""
if not isinstance(plaintext, six.binary_type):
raise TypeError(
"Value of argument ``plaintext`` "
"must be of type %s." % six.binary_type)
plaintext = base64.b64encode(plaintext)
params = {'KeyId': key_id, 'Plaintext': plaintext.decode('utf-8'), }
if encryption_context is not None:
params['EncryptionContext'] = encryption_context
if grant_tokens is not None:
params['GrantTokens'] = grant_tokens
response = self.make_request(action='Encrypt',
body=json.dumps(params))
if response.get('CiphertextBlob') is not None:
response['CiphertextBlob'] = base64.b64decode(
response['CiphertextBlob'].encode('utf-8'))
return response
def generate_data_key(self, key_id, encryption_context=None,
number_of_bytes=None, key_spec=None,
grant_tokens=None):
"""
Generates a secure data key. Data keys are used to encrypt and
decrypt data. They are wrapped by customer master keys.
:type key_id: string
:param key_id: Unique identifier of the key. This can be an ARN, an
alias, or a globally unique identifier.
:type encryption_context: map
:param encryption_context: Name/value pair that contains additional
data to be authenticated during the encryption and decryption
processes that use the key. This value is logged by AWS CloudTrail
to provide context around the data encrypted by the key.
:type number_of_bytes: integer
:param number_of_bytes: Integer that contains the number of bytes to
generate. Common values are 128, 256, 512, 1024 and so on. 1024 is
the current limit.
:type key_spec: string
:param key_spec: Value that identifies the encryption algorithm and key
size to generate a data key for. Currently this can be AES_128 or
AES_256.
:type grant_tokens: list
:param grant_tokens: A list of grant tokens that represent grants which
can be used to provide long term permissions to generate a key.
"""
params = {'KeyId': key_id, }
if encryption_context is not None:
params['EncryptionContext'] = encryption_context
if number_of_bytes is not None:
params['NumberOfBytes'] = number_of_bytes
if key_spec is not None:
params['KeySpec'] = key_spec
if grant_tokens is not None:
params['GrantTokens'] = grant_tokens
response = self.make_request(action='GenerateDataKey',
body=json.dumps(params))
if response.get('CiphertextBlob') is not None:
response['CiphertextBlob'] = base64.b64decode(
response['CiphertextBlob'].encode('utf-8'))
if response.get('Plaintext') is not None:
response['Plaintext'] = base64.b64decode(
response['Plaintext'].encode('utf-8'))
return response
def generate_data_key_without_plaintext(self, key_id,
encryption_context=None,
key_spec=None,
number_of_bytes=None,
grant_tokens=None):
"""
Returns a key wrapped by a customer master key without the
plaintext copy of that key. To retrieve the plaintext, see
GenerateDataKey.
:type key_id: string
:param key_id: Unique identifier of the key. This can be an ARN, an
alias, or a globally unique identifier.
:type encryption_context: map
:param encryption_context: Name:value pair that contains additional
data to be authenticated during the encryption and decryption
processes.
:type key_spec: string
:param key_spec: Value that identifies the encryption algorithm and key
size. Currently this can be AES_128 or AES_256.
:type number_of_bytes: integer
:param number_of_bytes: Integer that contains the number of bytes to
generate. Common values are 128, 256, 512, 1024 and so on.
:type grant_tokens: list
:param grant_tokens: A list of grant tokens that represent grants which
can be used to provide long term permissions to generate a key.
"""
params = {'KeyId': key_id, }
if encryption_context is not None:
params['EncryptionContext'] = encryption_context
if key_spec is not None:
params['KeySpec'] = key_spec
if number_of_bytes is not None:
params['NumberOfBytes'] = number_of_bytes
if grant_tokens is not None:
params['GrantTokens'] = grant_tokens
response = self.make_request(action='GenerateDataKeyWithoutPlaintext',
body=json.dumps(params))
if response.get('CiphertextBlob') is not None:
response['CiphertextBlob'] = base64.b64decode(
response['CiphertextBlob'].encode('utf-8'))
return response
def generate_random(self, number_of_bytes=None):
"""
Generates an unpredictable byte string.
:type number_of_bytes: integer
:param number_of_bytes: Integer that contains the number of bytes to
generate. Common values are 128, 256, 512, 1024 and so on. The
current limit is 1024 bytes.
"""
params = {}
if number_of_bytes is not None:
params['NumberOfBytes'] = number_of_bytes
response = self.make_request(action='GenerateRandom',
body=json.dumps(params))
if response.get('Plaintext') is not None:
response['Plaintext'] = base64.b64decode(
response['Plaintext'].encode('utf-8'))
return response
def get_key_policy(self, key_id, policy_name):
"""
Retrieves a policy attached to the specified key.
:type key_id: string
:param key_id: Unique identifier of the key. This can be an ARN, an
alias, or a globally unique identifier.
:type policy_name: string
:param policy_name: String that contains the name of the policy.
Currently, this must be "default". Policy names can be discovered
by calling ListKeyPolicies.
"""
params = {'KeyId': key_id, 'PolicyName': policy_name, }
return self.make_request(action='GetKeyPolicy',
body=json.dumps(params))
def get_key_rotation_status(self, key_id):
"""
Retrieves a Boolean value that indicates whether key rotation
is enabled for the specified key.
:type key_id: string
:param key_id: Unique identifier of the key. This can be an ARN, an
alias, or a globally unique identifier.
"""
params = {'KeyId': key_id, }
return self.make_request(action='GetKeyRotationStatus',
body=json.dumps(params))
def list_aliases(self, limit=None, marker=None):
"""
Lists all of the key aliases in the account.
:type limit: integer
:param limit: Specify this parameter when paginating results to
indicate the maximum number of aliases you want in each response.
If there are additional aliases beyond the maximum you specify, the
`Truncated` response element will be set to `true.`
:type marker: string
:param marker: Use this parameter when paginating results, and only in
a subsequent request after you've received a response where the
results are truncated. Set it to the value of the `NextMarker`
element in the response you just received.
"""
params = {}
if limit is not None:
params['Limit'] = limit
if marker is not None:
params['Marker'] = marker
return self.make_request(action='ListAliases',
body=json.dumps(params))
def list_grants(self, key_id, limit=None, marker=None):
"""
List the grants for a specified key.
:type key_id: string
:param key_id: Unique identifier of the key. This can be an ARN, an
alias, or a globally unique identifier.
:type limit: integer
:param limit: Specify this parameter only when paginating results to
indicate the maximum number of grants you want listed in the
response. If there are additional grants beyond the maximum you
specify, the `Truncated` response element will be set to `true.`
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response where
the results are truncated. Set it to the value of the `NextMarker`
in the response you just received.
"""
params = {'KeyId': key_id, }
if limit is not None:
params['Limit'] = limit
if marker is not None:
params['Marker'] = marker
return self.make_request(action='ListGrants',
body=json.dumps(params))
def list_key_policies(self, key_id, limit=None, marker=None):
"""
Retrieves a list of policies attached to a key.
:type key_id: string
:param key_id: Unique identifier of the key. This can be an ARN, an
alias, or a globally unique identifier.
:type limit: integer
:param limit: Specify this parameter only when paginating results to
indicate the maximum number of policies you want listed in the
response. If there are additional policies beyond the maximum you
specify, the `Truncated` response element will be set to `true.`
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response where
the results are truncated. Set it to the value of the `NextMarker`
in the response you just received.
"""
params = {'KeyId': key_id, }
if limit is not None:
params['Limit'] = limit
if marker is not None:
params['Marker'] = marker
return self.make_request(action='ListKeyPolicies',
body=json.dumps(params))
def list_keys(self, limit=None, marker=None):
"""
Lists the customer master keys.
:type limit: integer
:param limit: Specify this parameter only when paginating results to
indicate the maximum number of keys you want listed in the
response. If there are additional keys beyond the maximum you
specify, the `Truncated` response element will be set to `true.`
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response where
the results are truncated. Set it to the value of the `NextMarker`
in the response you just received.
"""
params = {}
if limit is not None:
params['Limit'] = limit
if marker is not None:
params['Marker'] = marker
return self.make_request(action='ListKeys',
body=json.dumps(params))
def put_key_policy(self, key_id, policy_name, policy):
"""
Attaches a policy to the specified key.
:type key_id: string
:param key_id: Unique identifier of the key. This can be an ARN, an
alias, or a globally unique identifier.
:type policy_name: string
:param policy_name: Name of the policy to be attached. Currently, the
only supported name is "default".
:type policy: string
:param policy: The policy, in JSON format, to be attached to the key.
"""
params = {
'KeyId': key_id,
'PolicyName': policy_name,
'Policy': policy,
}
return self.make_request(action='PutKeyPolicy',
body=json.dumps(params))
def re_encrypt(self, ciphertext_blob, destination_key_id,
source_encryption_context=None,
destination_encryption_context=None, grant_tokens=None):
"""
Encrypts data on the server side with a new customer master
key without exposing the plaintext of the data on the client
side. The data is first decrypted and then encrypted. This
operation can also be used to change the encryption context of
a ciphertext.
:type ciphertext_blob: blob
:param ciphertext_blob: Ciphertext of the data to re-encrypt.
:type source_encryption_context: map
:param source_encryption_context: Encryption context used to encrypt
and decrypt the data specified in the `CiphertextBlob` parameter.
:type destination_key_id: string
:param destination_key_id: Key identifier of the key used to re-encrypt
the data.
:type destination_encryption_context: map
:param destination_encryption_context: Encryption context to be used
when the data is re-encrypted.
:type grant_tokens: list
:param grant_tokens: Grant tokens that identify the grants that have
permissions for the encryption and decryption process.
"""
if not isinstance(ciphertext_blob, six.binary_type):
raise TypeError(
"Value of argument ``ciphertext_blob`` "
"must be of type %s." % six.binary_type)
ciphertext_blob = base64.b64encode(ciphertext_blob)
params = {
'CiphertextBlob': ciphertext_blob,
'DestinationKeyId': destination_key_id,
}
if source_encryption_context is not None:
params['SourceEncryptionContext'] = source_encryption_context
if destination_encryption_context is not None:
params['DestinationEncryptionContext'] = destination_encryption_context
if grant_tokens is not None:
params['GrantTokens'] = grant_tokens
response = self.make_request(action='ReEncrypt',
body=json.dumps(params))
if response.get('CiphertextBlob') is not None:
response['CiphertextBlob'] = base64.b64decode(
response['CiphertextBlob'].encode('utf-8'))
return response
def retire_grant(self, grant_token):
"""
Retires a grant. You can retire a grant when you're done using
it to clean up. You should revoke a grant when you intend to
actively deny operations that depend on it.
:type grant_token: string
:param grant_token: Token that identifies the grant to be retired.
"""
params = {'GrantToken': grant_token, }
return self.make_request(action='RetireGrant',
body=json.dumps(params))
def revoke_grant(self, key_id, grant_id):
"""
Revokes a grant. You can revoke a grant to actively deny
operations that depend on it.
:type key_id: string
:param key_id: Unique identifier of the key associated with the grant.
:type grant_id: string
:param grant_id: Identifier of the grant to be revoked.
"""
params = {'KeyId': key_id, 'GrantId': grant_id, }
return self.make_request(action='RevokeGrant',
body=json.dumps(params))
def update_key_description(self, key_id, description):
"""
:type key_id: string
:param key_id:
:type description: string
:param description:
"""
params = {'KeyId': key_id, 'Description': description, }
return self.make_request(action='UpdateKeyDescription',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/kms/layer1.py
| 0.727685 | 0.333585 |
layer1.py
|
|
import uuid
from boto.compat import urllib
from boto.resultset import ResultSet
class InvalidationBatch(object):
"""A simple invalidation request.
:see: http://docs.amazonwebservices.com/AmazonCloudFront/2010-08-01/APIReference/index.html?InvalidationBatchDatatype.html
"""
def __init__(self, paths=None, connection=None, distribution=None, caller_reference=''):
"""Create a new invalidation request:
:paths: An array of paths to invalidate
"""
self.paths = paths or []
self.distribution = distribution
self.caller_reference = caller_reference
if not self.caller_reference:
self.caller_reference = str(uuid.uuid4())
# If we passed in a distribution,
# then we use that as the connection object
if distribution:
self.connection = distribution
else:
self.connection = connection
def __repr__(self):
return '<InvalidationBatch: %s>' % self.id
def add(self, path):
"""Add another path to this invalidation request"""
return self.paths.append(path)
def remove(self, path):
"""Remove a path from this invalidation request"""
return self.paths.remove(path)
def __iter__(self):
return iter(self.paths)
def __getitem__(self, i):
return self.paths[i]
def __setitem__(self, k, v):
self.paths[k] = v
def escape(self, p):
"""Escape a path, make sure it begins with a slash and contains no invalid characters. Retain literal wildcard characters."""
if not p[0] == "/":
p = "/%s" % p
return urllib.parse.quote(p, safe = "/*")
def to_xml(self):
"""Get this batch as XML"""
assert self.connection is not None
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<InvalidationBatch xmlns="http://cloudfront.amazonaws.com/doc/%s/">\n' % self.connection.Version
for p in self.paths:
s += ' <Path>%s</Path>\n' % self.escape(p)
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
s += '</InvalidationBatch>\n'
return s
def startElement(self, name, attrs, connection):
if name == "InvalidationBatch":
self.paths = []
return None
def endElement(self, name, value, connection):
if name == 'Path':
self.paths.append(value)
elif name == "Status":
self.status = value
elif name == "Id":
self.id = value
elif name == "CreateTime":
self.create_time = value
elif name == "CallerReference":
self.caller_reference = value
return None
class InvalidationListResultSet(object):
"""
A resultset for listing invalidations on a given CloudFront distribution.
Implements the iterator interface and transparently handles paging results
from CF so even if you have many thousands of invalidations on the
distribution you can iterate over all invalidations in a reasonably
efficient manner.
"""
def __init__(self, markers=None, connection=None, distribution_id=None,
invalidations=None, marker='', next_marker=None,
max_items=None, is_truncated=False):
self.markers = markers or []
self.connection = connection
self.distribution_id = distribution_id
self.marker = marker
self.next_marker = next_marker
self.max_items = max_items
self.auto_paginate = max_items is None
self.is_truncated = is_truncated
self._inval_cache = invalidations or []
def __iter__(self):
"""
A generator function for listing invalidation requests for a given
CloudFront distribution.
"""
conn = self.connection
distribution_id = self.distribution_id
result_set = self
for inval in result_set._inval_cache:
yield inval
if not self.auto_paginate:
return
while result_set.is_truncated:
result_set = conn.get_invalidation_requests(distribution_id,
marker=result_set.next_marker,
max_items=result_set.max_items)
for i in result_set._inval_cache:
yield i
def startElement(self, name, attrs, connection):
for root_elem, handler in self.markers:
if name == root_elem:
obj = handler(connection, distribution_id=self.distribution_id)
self._inval_cache.append(obj)
return obj
def endElement(self, name, value, connection):
if name == 'IsTruncated':
self.is_truncated = self.to_boolean(value)
elif name == 'Marker':
self.marker = value
elif name == 'NextMarker':
self.next_marker = value
elif name == 'MaxItems':
self.max_items = int(value)
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
class InvalidationSummary(object):
"""
Represents InvalidationSummary complex type in CloudFront API that lists
the id and status of a given invalidation request.
"""
def __init__(self, connection=None, distribution_id=None, id='',
status=''):
self.connection = connection
self.distribution_id = distribution_id
self.id = id
self.status = status
def __repr__(self):
return '<InvalidationSummary: %s>' % self.id
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'Status':
self.status = value
def get_distribution(self):
"""
Returns a Distribution object representing the parent CloudFront
distribution of the invalidation request listed in the
InvalidationSummary.
:rtype: :class:`boto.cloudfront.distribution.Distribution`
:returns: A Distribution object representing the parent CloudFront
distribution of the invalidation request listed in the
InvalidationSummary
"""
return self.connection.get_distribution_info(self.distribution_id)
def get_invalidation_request(self):
"""
Returns an InvalidationBatch object representing the invalidation
request referred to in the InvalidationSummary.
:rtype: :class:`boto.cloudfront.invalidation.InvalidationBatch`
:returns: An InvalidationBatch object representing the invalidation
request referred to by the InvalidationSummary
"""
return self.connection.invalidation_request_status(
self.distribution_id, self.id)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cloudfront/invalidation.py
| 0.766031 | 0.183722 |
invalidation.py
|
|
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.redshift import exceptions
class RedshiftConnection(AWSQueryConnection):
"""
Amazon Redshift **Overview**
This is an interface reference for Amazon Redshift. It contains
documentation for one of the programming or command line
interfaces you can use to manage Amazon Redshift clusters. Note
that Amazon Redshift is asynchronous, which means that some
interfaces may require techniques, such as polling or asynchronous
callback handlers, to determine when a command has been applied.
In this reference, the parameter descriptions indicate whether a
change is applied immediately, on the next instance reboot, or
during the next maintenance window. For a summary of the Amazon
Redshift cluster management interfaces, go to `Using the Amazon
Redshift Management Interfaces `_.
Amazon Redshift manages all the work of setting up, operating, and
scaling a data warehouse: provisioning capacity, monitoring and
backing up the cluster, and applying patches and upgrades to the
Amazon Redshift engine. You can focus on using your data to
acquire new insights for your business and customers.
If you are a first-time user of Amazon Redshift, we recommend that
you begin by reading the The `Amazon Redshift Getting Started
Guide`_
If you are a database developer, the `Amazon Redshift Database
Developer Guide`_ explains how to design, build, query, and
maintain the databases that make up your data warehouse.
"""
APIVersion = "2012-12-01"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "redshift.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"SnapshotCopyAlreadyDisabled": exceptions.SnapshotCopyAlreadyDisabled,
"ClusterNotFound": exceptions.ClusterNotFound,
"UnknownSnapshotCopyRegion": exceptions.UnknownSnapshotCopyRegion,
"InvalidClusterSubnetState": exceptions.InvalidClusterSubnetState,
"InvalidSubnet": exceptions.InvalidSubnet,
"ReservedNodeQuotaExceeded": exceptions.ReservedNodeQuotaExceeded,
"InvalidClusterState": exceptions.InvalidClusterState,
"HsmClientCertificateQuotaExceeded": exceptions.HsmClientCertificateQuotaExceeded,
"SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound,
"HsmClientCertificateNotFound": exceptions.HsmClientCertificateNotFound,
"SubscriptionEventIdNotFound": exceptions.SubscriptionEventIdNotFound,
"ClusterSecurityGroupAlreadyExists": exceptions.ClusterSecurityGroupAlreadyExists,
"HsmConfigurationAlreadyExists": exceptions.HsmConfigurationAlreadyExists,
"NumberOfNodesQuotaExceeded": exceptions.NumberOfNodesQuotaExceeded,
"ReservedNodeOfferingNotFound": exceptions.ReservedNodeOfferingNotFound,
"BucketNotFound": exceptions.BucketNotFound,
"InsufficientClusterCapacity": exceptions.InsufficientClusterCapacity,
"InvalidRestore": exceptions.InvalidRestore,
"UnauthorizedOperation": exceptions.UnauthorizedOperation,
"ClusterQuotaExceeded": exceptions.ClusterQuotaExceeded,
"InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState,
"ClusterSnapshotNotFound": exceptions.ClusterSnapshotNotFound,
"AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded,
"InvalidHsmClientCertificateState": exceptions.InvalidHsmClientCertificateState,
"SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound,
"ResizeNotFound": exceptions.ResizeNotFound,
"ClusterSubnetGroupNotFound": exceptions.ClusterSubnetGroupNotFound,
"SNSNoAuthorization": exceptions.SNSNoAuthorization,
"ClusterSnapshotQuotaExceeded": exceptions.ClusterSnapshotQuotaExceeded,
"AccessToSnapshotDenied": exceptions.AccessToSnapshotDenied,
"InvalidClusterSecurityGroupState": exceptions.InvalidClusterSecurityGroupState,
"NumberOfNodesPerClusterLimitExceeded": exceptions.NumberOfNodesPerClusterLimitExceeded,
"ClusterSubnetQuotaExceeded": exceptions.ClusterSubnetQuotaExceeded,
"SNSInvalidTopic": exceptions.SNSInvalidTopic,
"ClusterSecurityGroupNotFound": exceptions.ClusterSecurityGroupNotFound,
"InvalidElasticIp": exceptions.InvalidElasticIp,
"InvalidClusterParameterGroupState": exceptions.InvalidClusterParameterGroupState,
"InvalidHsmConfigurationState": exceptions.InvalidHsmConfigurationState,
"ClusterAlreadyExists": exceptions.ClusterAlreadyExists,
"HsmConfigurationQuotaExceeded": exceptions.HsmConfigurationQuotaExceeded,
"ClusterSnapshotAlreadyExists": exceptions.ClusterSnapshotAlreadyExists,
"SubscriptionSeverityNotFound": exceptions.SubscriptionSeverityNotFound,
"SourceNotFound": exceptions.SourceNotFound,
"ReservedNodeAlreadyExists": exceptions.ReservedNodeAlreadyExists,
"ClusterSubnetGroupQuotaExceeded": exceptions.ClusterSubnetGroupQuotaExceeded,
"ClusterParameterGroupNotFound": exceptions.ClusterParameterGroupNotFound,
"InvalidS3BucketName": exceptions.InvalidS3BucketName,
"InvalidS3KeyPrefix": exceptions.InvalidS3KeyPrefix,
"SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist,
"HsmConfigurationNotFound": exceptions.HsmConfigurationNotFound,
"InvalidSubscriptionState": exceptions.InvalidSubscriptionState,
"AuthorizationNotFound": exceptions.AuthorizationNotFound,
"ClusterSecurityGroupQuotaExceeded": exceptions.ClusterSecurityGroupQuotaExceeded,
"SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse,
"EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded,
"AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists,
"InvalidClusterSnapshotState": exceptions.InvalidClusterSnapshotState,
"ClusterParameterGroupQuotaExceeded": exceptions.ClusterParameterGroupQuotaExceeded,
"SnapshotCopyDisabled": exceptions.SnapshotCopyDisabled,
"ClusterSubnetGroupAlreadyExists": exceptions.ClusterSubnetGroupAlreadyExists,
"ReservedNodeNotFound": exceptions.ReservedNodeNotFound,
"HsmClientCertificateAlreadyExists": exceptions.HsmClientCertificateAlreadyExists,
"InvalidClusterSubnetGroupState": exceptions.InvalidClusterSubnetGroupState,
"SubscriptionNotFound": exceptions.SubscriptionNotFound,
"InsufficientS3BucketPolicy": exceptions.InsufficientS3BucketPolicy,
"ClusterParameterGroupAlreadyExists": exceptions.ClusterParameterGroupAlreadyExists,
"UnsupportedOption": exceptions.UnsupportedOption,
"CopyToRegionDisabled": exceptions.CopyToRegionDisabled,
"SnapshotCopyAlreadyEnabled": exceptions.SnapshotCopyAlreadyEnabled,
"IncompatibleOrderableOptions": exceptions.IncompatibleOrderableOptions,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(RedshiftConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def authorize_cluster_security_group_ingress(self,
cluster_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_owner_id=None):
"""
Adds an inbound (ingress) rule to an Amazon Redshift security
group. Depending on whether the application accessing your
cluster is running on the Internet or an EC2 instance, you can
authorize inbound access to either a Classless Interdomain
Routing (CIDR) IP address range or an EC2 security group. You
can add as many as 20 ingress rules to an Amazon Redshift
security group.
For an overview of CIDR blocks, see the Wikipedia article on
`Classless Inter-Domain Routing`_.
You must also associate the security group with a cluster so
that clients running on these IP addresses or the EC2 instance
are authorized to connect to the cluster. For information
about managing security groups, go to `Working with Security
Groups`_ in the Amazon Redshift Management Guide .
:type cluster_security_group_name: string
:param cluster_security_group_name: The name of the security group to
which the ingress rule is added.
:type cidrip: string
:param cidrip: The IP range to be added the Amazon Redshift security
group.
:type ec2_security_group_name: string
:param ec2_security_group_name: The EC2 security group to be added the
Amazon Redshift security group.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS account number of the owner
of the security group specified by the EC2SecurityGroupName
parameter. The AWS Access Key ID is not an acceptable value.
Example: `111122223333`
"""
params = {
'ClusterSecurityGroupName': cluster_security_group_name,
}
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='AuthorizeClusterSecurityGroupIngress',
verb='POST',
path='/', params=params)
def authorize_snapshot_access(self, snapshot_identifier,
account_with_restore_access,
snapshot_cluster_identifier=None):
"""
Authorizes the specified AWS customer account to restore the
specified snapshot.
For more information about working with snapshots, go to
`Amazon Redshift Snapshots`_ in the Amazon Redshift Management
Guide .
:type snapshot_identifier: string
:param snapshot_identifier: The identifier of the snapshot the account
is authorized to restore.
:type snapshot_cluster_identifier: string
:param snapshot_cluster_identifier: The identifier of the cluster the
snapshot was created from. This parameter is required if your IAM
user has a policy containing a snapshot resource element that
specifies anything other than * for the cluster name.
:type account_with_restore_access: string
:param account_with_restore_access: The identifier of the AWS customer
account authorized to restore the specified snapshot.
"""
params = {
'SnapshotIdentifier': snapshot_identifier,
'AccountWithRestoreAccess': account_with_restore_access,
}
if snapshot_cluster_identifier is not None:
params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier
return self._make_request(
action='AuthorizeSnapshotAccess',
verb='POST',
path='/', params=params)
def copy_cluster_snapshot(self, source_snapshot_identifier,
target_snapshot_identifier,
source_snapshot_cluster_identifier=None):
"""
Copies the specified automated cluster snapshot to a new
manual cluster snapshot. The source must be an automated
snapshot and it must be in the available state.
When you delete a cluster, Amazon Redshift deletes any
automated snapshots of the cluster. Also, when the retention
period of the snapshot expires, Amazon Redshift automatically
deletes it. If you want to keep an automated snapshot for a
longer period, you can make a manual copy of the snapshot.
Manual snapshots are retained until you delete them.
For more information about working with snapshots, go to
`Amazon Redshift Snapshots`_ in the Amazon Redshift Management
Guide .
:type source_snapshot_identifier: string
:param source_snapshot_identifier:
The identifier for the source snapshot.
Constraints:
+ Must be the identifier for a valid automated snapshot whose state is
`available`.
:type source_snapshot_cluster_identifier: string
:param source_snapshot_cluster_identifier:
The identifier of the cluster the source snapshot was created from.
This parameter is required if your IAM user has a policy containing
a snapshot resource element that specifies anything other than *
for the cluster name.
Constraints:
+ Must be the identifier for a valid cluster.
:type target_snapshot_identifier: string
:param target_snapshot_identifier:
The identifier given to the new manual snapshot.
Constraints:
+ Cannot be null, empty, or blank.
+ Must contain from 1 to 255 alphanumeric characters or hyphens.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
+ Must be unique for the AWS account that is making the request.
"""
params = {
'SourceSnapshotIdentifier': source_snapshot_identifier,
'TargetSnapshotIdentifier': target_snapshot_identifier,
}
if source_snapshot_cluster_identifier is not None:
params['SourceSnapshotClusterIdentifier'] = source_snapshot_cluster_identifier
return self._make_request(
action='CopyClusterSnapshot',
verb='POST',
path='/', params=params)
def create_cluster(self, cluster_identifier, node_type, master_username,
master_user_password, db_name=None, cluster_type=None,
cluster_security_groups=None,
vpc_security_group_ids=None,
cluster_subnet_group_name=None,
availability_zone=None,
preferred_maintenance_window=None,
cluster_parameter_group_name=None,
automated_snapshot_retention_period=None, port=None,
cluster_version=None, allow_version_upgrade=None,
number_of_nodes=None, publicly_accessible=None,
encrypted=None,
hsm_client_certificate_identifier=None,
hsm_configuration_identifier=None, elastic_ip=None):
"""
Creates a new cluster. To create the cluster in virtual
private cloud (VPC), you must provide cluster subnet group
name. If you don't provide a cluster subnet group name or the
cluster security group parameter, Amazon Redshift creates a
non-VPC cluster, it associates the default cluster security
group with the cluster. For more information about managing
clusters, go to `Amazon Redshift Clusters`_ in the Amazon
Redshift Management Guide .
:type db_name: string
:param db_name:
The name of the first database to be created when the cluster is
created.
To create additional databases after the cluster is created, connect to
the cluster with a SQL client and use SQL commands to create a
database. For more information, go to `Create a Database`_ in the
Amazon Redshift Database Developer Guide.
Default: `dev`
Constraints:
+ Must contain 1 to 64 alphanumeric characters.
+ Must contain only lowercase letters.
+ Cannot be a word that is reserved by the service. A list of reserved
words can be found in `Reserved Words`_ in the Amazon Redshift
Database Developer Guide.
:type cluster_identifier: string
:param cluster_identifier: A unique identifier for the cluster. You use
this identifier to refer to the cluster for any subsequent cluster
operations such as deleting or modifying. The identifier also
appears in the Amazon Redshift console.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens.
+ Alphabetic characters must be lowercase.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
+ Must be unique for all clusters within an AWS account.
Example: `myexamplecluster`
:type cluster_type: string
:param cluster_type: The type of the cluster. When cluster type is
specified as
+ `single-node`, the **NumberOfNodes** parameter is not required.
+ `multi-node`, the **NumberOfNodes** parameter is required.
Valid Values: `multi-node` | `single-node`
Default: `multi-node`
:type node_type: string
:param node_type: The node type to be provisioned for the cluster. For
information about node types, go to ` Working with Clusters`_ in
the Amazon Redshift Management Guide .
Valid Values: `dw1.xlarge` | `dw1.8xlarge` | `dw2.large` |
`dw2.8xlarge`.
:type master_username: string
:param master_username:
The user name associated with the master user account for the cluster
that is being created.
Constraints:
+ Must be 1 - 128 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word. A list of reserved words can be found in
`Reserved Words`_ in the Amazon Redshift Database Developer Guide.
:type master_user_password: string
:param master_user_password:
The password associated with the master user account for the cluster
that is being created.
Constraints:
+ Must be between 8 and 64 characters in length.
+ Must contain at least one uppercase letter.
+ Must contain at least one lowercase letter.
+ Must contain one number.
+ Can be any printable ASCII character (ASCII code 33 to 126) except '
(single quote), " (double quote), \, /, @, or space.
:type cluster_security_groups: list
:param cluster_security_groups: A list of security groups to be
associated with this cluster.
Default: The default cluster security group for Amazon Redshift.
:type vpc_security_group_ids: list
:param vpc_security_group_ids: A list of Virtual Private Cloud (VPC)
security groups to be associated with the cluster.
Default: The default VPC security group is associated with the cluster.
:type cluster_subnet_group_name: string
:param cluster_subnet_group_name: The name of a cluster subnet group to
be associated with this cluster.
If this parameter is not provided the resulting cluster will be
deployed outside virtual private cloud (VPC).
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone (AZ) in which you
want Amazon Redshift to provision the cluster. For example, if you
have several EC2 instances running in a specific Availability Zone,
then you might want the cluster to be provisioned in the same zone
in order to decrease network latency.
Default: A random, system-chosen Availability Zone in the region that
is specified by the endpoint.
Example: `us-east-1d`
Constraint: The specified Availability Zone must be in the same region
as the current endpoint.
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which automated cluster maintenance can occur.
Format: `ddd:hh24:mi-ddd:hh24:mi`
Default: A 30-minute window selected at random from an 8-hour block of
time per region, occurring on a random day of the week. The
following list shows the time blocks for each region from which the
default maintenance windows are assigned.
+ **US-East (Northern Virginia) Region:** 03:00-11:00 UTC
+ **US-West (Oregon) Region** 06:00-14:00 UTC
+ **EU (Ireland) Region** 22:00-06:00 UTC
+ **Asia Pacific (Singapore) Region** 14:00-22:00 UTC
+ **Asia Pacific (Sydney) Region** 12:00-20:00 UTC
+ **Asia Pacific (Tokyo) Region** 17:00-03:00 UTC
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Minimum 30-minute window.
:type cluster_parameter_group_name: string
:param cluster_parameter_group_name:
The name of the parameter group to be associated with this cluster.
Default: The default Amazon Redshift cluster parameter group. For
information about the default parameter group, go to `Working with
Amazon Redshift Parameter Groups`_
Constraints:
+ Must be 1 to 255 alphanumeric characters or hyphens.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type automated_snapshot_retention_period: integer
:param automated_snapshot_retention_period: The number of days that
automated snapshots are retained. If the value is 0, automated
snapshots are disabled. Even if automated snapshots are disabled,
you can still create manual snapshots when you want with
CreateClusterSnapshot.
Default: `1`
Constraints: Must be a value from 0 to 35.
:type port: integer
:param port: The port number on which the cluster accepts incoming
connections.
The cluster is accessible only via the JDBC and ODBC connection
strings. Part of the connection string requires the port on which
the cluster will listen for incoming connections.
Default: `5439`
Valid Values: `1150-65535`
:type cluster_version: string
:param cluster_version: The version of the Amazon Redshift engine
software that you want to deploy on the cluster.
The version selected runs on all the nodes in the cluster.
Constraints: Only version 1.0 is currently available.
Example: `1.0`
:type allow_version_upgrade: boolean
:param allow_version_upgrade: If `True`, upgrades can be applied during
the maintenance window to the Amazon Redshift engine that is
running on the cluster.
When a new version of the Amazon Redshift engine is released, you can
request that the service automatically apply upgrades during the
maintenance window to the Amazon Redshift engine that is running on
your cluster.
Default: `True`
:type number_of_nodes: integer
:param number_of_nodes: The number of compute nodes in the cluster.
This parameter is required when the **ClusterType** parameter is
specified as `multi-node`.
For information about determining how many nodes you need, go to `
Working with Clusters`_ in the Amazon Redshift Management Guide .
If you don't specify this parameter, you get a single-node cluster.
When requesting a multi-node cluster, you must specify the number
of nodes that you want in the cluster.
Default: `1`
Constraints: Value must be at least 1 and no more than 100.
:type publicly_accessible: boolean
:param publicly_accessible: If `True`, the cluster can be accessed from
a public network.
:type encrypted: boolean
:param encrypted: If `True`, the data in the cluster is encrypted at
rest.
Default: false
:type hsm_client_certificate_identifier: string
:param hsm_client_certificate_identifier: Specifies the name of the HSM
client certificate the Amazon Redshift cluster uses to retrieve the
data encryption keys stored in an HSM.
:type hsm_configuration_identifier: string
:param hsm_configuration_identifier: Specifies the name of the HSM
configuration that contains the information the Amazon Redshift
cluster can use to retrieve and store keys in an HSM.
:type elastic_ip: string
:param elastic_ip: The Elastic IP (EIP) address for the cluster.
Constraints: The cluster must be provisioned in EC2-VPC and publicly-
accessible through an Internet gateway. For more information about
provisioning clusters in EC2-VPC, go to `Supported Platforms to
Launch Your Cluster`_ in the Amazon Redshift Management Guide.
"""
params = {
'ClusterIdentifier': cluster_identifier,
'NodeType': node_type,
'MasterUsername': master_username,
'MasterUserPassword': master_user_password,
}
if db_name is not None:
params['DBName'] = db_name
if cluster_type is not None:
params['ClusterType'] = cluster_type
if cluster_security_groups is not None:
self.build_list_params(params,
cluster_security_groups,
'ClusterSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if cluster_subnet_group_name is not None:
params['ClusterSubnetGroupName'] = cluster_subnet_group_name
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if cluster_parameter_group_name is not None:
params['ClusterParameterGroupName'] = cluster_parameter_group_name
if automated_snapshot_retention_period is not None:
params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period
if port is not None:
params['Port'] = port
if cluster_version is not None:
params['ClusterVersion'] = cluster_version
if allow_version_upgrade is not None:
params['AllowVersionUpgrade'] = str(
allow_version_upgrade).lower()
if number_of_nodes is not None:
params['NumberOfNodes'] = number_of_nodes
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if encrypted is not None:
params['Encrypted'] = str(
encrypted).lower()
if hsm_client_certificate_identifier is not None:
params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier
if hsm_configuration_identifier is not None:
params['HsmConfigurationIdentifier'] = hsm_configuration_identifier
if elastic_ip is not None:
params['ElasticIp'] = elastic_ip
return self._make_request(
action='CreateCluster',
verb='POST',
path='/', params=params)
def create_cluster_parameter_group(self, parameter_group_name,
parameter_group_family, description):
"""
Creates an Amazon Redshift parameter group.
Creating parameter groups is independent of creating clusters.
You can associate a cluster with a parameter group when you
create the cluster. You can also associate an existing cluster
with a parameter group after the cluster is created by using
ModifyCluster.
Parameters in the parameter group define specific behavior
that applies to the databases you create on the cluster. For
more information about managing parameter groups, go to
`Amazon Redshift Parameter Groups`_ in the Amazon Redshift
Management Guide .
:type parameter_group_name: string
:param parameter_group_name:
The name of the cluster parameter group.
Constraints:
+ Must be 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
+ Must be unique within your AWS account.
This value is stored as a lower-case string.
:type parameter_group_family: string
:param parameter_group_family: The Amazon Redshift engine version to
which the cluster parameter group applies. The cluster engine
version determines the set of parameters.
To get a list of valid parameter group family names, you can call
DescribeClusterParameterGroups. By default, Amazon Redshift returns
a list of all the parameter groups that are owned by your AWS
account, including the default parameter groups for each Amazon
Redshift engine version. The parameter group family names
associated with the default parameter groups provide you the valid
values. For example, a valid family name is "redshift-1.0".
:type description: string
:param description: A description of the parameter group.
"""
params = {
'ParameterGroupName': parameter_group_name,
'ParameterGroupFamily': parameter_group_family,
'Description': description,
}
return self._make_request(
action='CreateClusterParameterGroup',
verb='POST',
path='/', params=params)
def create_cluster_security_group(self, cluster_security_group_name,
description):
"""
Creates a new Amazon Redshift security group. You use security
groups to control access to non-VPC clusters.
For information about managing security groups, go to `Amazon
Redshift Cluster Security Groups`_ in the Amazon Redshift
Management Guide .
:type cluster_security_group_name: string
:param cluster_security_group_name: The name for the security group.
Amazon Redshift stores the value as a lowercase string.
Constraints:
+ Must contain no more than 255 alphanumeric characters or hyphens.
+ Must not be "Default".
+ Must be unique for all security groups that are created by your AWS
account.
Example: `examplesecuritygroup`
:type description: string
:param description: A description for the security group.
"""
params = {
'ClusterSecurityGroupName': cluster_security_group_name,
'Description': description,
}
return self._make_request(
action='CreateClusterSecurityGroup',
verb='POST',
path='/', params=params)
def create_cluster_snapshot(self, snapshot_identifier,
cluster_identifier):
"""
Creates a manual snapshot of the specified cluster. The
cluster must be in the `available` state.
For more information about working with snapshots, go to
`Amazon Redshift Snapshots`_ in the Amazon Redshift Management
Guide .
:type snapshot_identifier: string
:param snapshot_identifier: A unique identifier for the snapshot that
you are requesting. This identifier must be unique for all
snapshots within the AWS account.
Constraints:
+ Cannot be null, empty, or blank
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-snapshot-id`
:type cluster_identifier: string
:param cluster_identifier: The cluster identifier for which you want a
snapshot.
"""
params = {
'SnapshotIdentifier': snapshot_identifier,
'ClusterIdentifier': cluster_identifier,
}
return self._make_request(
action='CreateClusterSnapshot',
verb='POST',
path='/', params=params)
def create_cluster_subnet_group(self, cluster_subnet_group_name,
description, subnet_ids):
"""
Creates a new Amazon Redshift subnet group. You must provide a
list of one or more subnets in your existing Amazon Virtual
Private Cloud (Amazon VPC) when creating Amazon Redshift
subnet group.
For information about subnet groups, go to `Amazon Redshift
Cluster Subnet Groups`_ in the Amazon Redshift Management
Guide .
:type cluster_subnet_group_name: string
:param cluster_subnet_group_name: The name for the subnet group. Amazon
Redshift stores the value as a lowercase string.
Constraints:
+ Must contain no more than 255 alphanumeric characters or hyphens.
+ Must not be "Default".
+ Must be unique for all subnet groups that are created by your AWS
account.
Example: `examplesubnetgroup`
:type description: string
:param description: A description for the subnet group.
:type subnet_ids: list
:param subnet_ids: An array of VPC subnet IDs. A maximum of 20 subnets
can be modified in a single request.
"""
params = {
'ClusterSubnetGroupName': cluster_subnet_group_name,
'Description': description,
}
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
return self._make_request(
action='CreateClusterSubnetGroup',
verb='POST',
path='/', params=params)
def create_event_subscription(self, subscription_name, sns_topic_arn,
source_type=None, source_ids=None,
event_categories=None, severity=None,
enabled=None):
"""
Creates an Amazon Redshift event notification subscription.
This action requires an ARN (Amazon Resource Name) of an
Amazon SNS topic created by either the Amazon Redshift
console, the Amazon SNS console, or the Amazon SNS API. To
obtain an ARN with Amazon SNS, you must create a topic in
Amazon SNS and subscribe to the topic. The ARN is displayed in
the SNS console.
You can specify the source type, and lists of Amazon Redshift
source IDs, event categories, and event severities.
Notifications will be sent for all events you want that match
those criteria. For example, you can specify source type =
cluster, source ID = my-cluster-1 and mycluster2, event
categories = Availability, Backup, and severity = ERROR. The
subscription will only send notifications for those ERROR
events in the Availability and Backup categories for the
specified clusters.
If you specify both the source type and source IDs, such as
source type = cluster and source identifier = my-cluster-1,
notifications will be sent for all the cluster events for my-
cluster-1. If you specify a source type but do not specify a
source identifier, you will receive notice of the events for
the objects of that type in your AWS account. If you do not
specify either the SourceType nor the SourceIdentifier, you
will be notified of events generated from all Amazon Redshift
sources belonging to your AWS account. You must specify a
source type if you specify a source ID.
:type subscription_name: string
:param subscription_name:
The name of the event subscription to be created.
Constraints:
+ Cannot be null, empty, or blank.
+ Must contain from 1 to 255 alphanumeric characters or hyphens.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type sns_topic_arn: string
:param sns_topic_arn: The Amazon Resource Name (ARN) of the Amazon SNS
topic used to transmit the event notifications. The ARN is created
by Amazon SNS when you create a topic and subscribe to it.
:type source_type: string
:param source_type: The type of source that will be generating the
events. For example, if you want to be notified of events generated
by a cluster, you would set this parameter to cluster. If this
value is not specified, events are returned for all Amazon Redshift
objects in your AWS account. You must specify a source type in
order to specify source IDs.
Valid values: cluster, cluster-parameter-group, cluster-security-group,
and cluster-snapshot.
:type source_ids: list
:param source_ids: A list of one or more identifiers of Amazon Redshift
source objects. All of the objects must be of the same type as was
specified in the source type parameter. The event subscription will
return only events generated by the specified objects. If not
specified, then events are returned for all objects within the
source type specified.
Example: my-cluster-1, my-cluster-2
Example: my-snapshot-20131010
:type event_categories: list
:param event_categories: Specifies the Amazon Redshift event categories
to be published by the event notification subscription.
Values: Configuration, Management, Monitoring, Security
:type severity: string
:param severity: Specifies the Amazon Redshift event severity to be
published by the event notification subscription.
Values: ERROR, INFO
:type enabled: boolean
:param enabled: A Boolean value; set to `True` to activate the
subscription, set to `False` to create the subscription but not
active it.
"""
params = {
'SubscriptionName': subscription_name,
'SnsTopicArn': sns_topic_arn,
}
if source_type is not None:
params['SourceType'] = source_type
if source_ids is not None:
self.build_list_params(params,
source_ids,
'SourceIds.member')
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if severity is not None:
params['Severity'] = severity
if enabled is not None:
params['Enabled'] = str(
enabled).lower()
return self._make_request(
action='CreateEventSubscription',
verb='POST',
path='/', params=params)
def create_hsm_client_certificate(self,
hsm_client_certificate_identifier):
"""
Creates an HSM client certificate that an Amazon Redshift
cluster will use to connect to the client's HSM in order to
store and retrieve the keys used to encrypt the cluster
databases.
The command returns a public key, which you must store in the
HSM. In addition to creating the HSM certificate, you must
create an Amazon Redshift HSM configuration that provides a
cluster the information needed to store and use encryption
keys in the HSM. For more information, go to `Hardware
Security Modules`_ in the Amazon Redshift Management Guide.
:type hsm_client_certificate_identifier: string
:param hsm_client_certificate_identifier: The identifier to be assigned
to the new HSM client certificate that the cluster will use to
connect to the HSM to use the database encryption keys.
"""
params = {
'HsmClientCertificateIdentifier': hsm_client_certificate_identifier,
}
return self._make_request(
action='CreateHsmClientCertificate',
verb='POST',
path='/', params=params)
def create_hsm_configuration(self, hsm_configuration_identifier,
description, hsm_ip_address,
hsm_partition_name, hsm_partition_password,
hsm_server_public_certificate):
"""
Creates an HSM configuration that contains the information
required by an Amazon Redshift cluster to store and use
database encryption keys in a Hardware Security Module (HSM).
After creating the HSM configuration, you can specify it as a
parameter when creating a cluster. The cluster will then store
its encryption keys in the HSM.
In addition to creating an HSM configuration, you must also
create an HSM client certificate. For more information, go to
`Hardware Security Modules`_ in the Amazon Redshift Management
Guide.
:type hsm_configuration_identifier: string
:param hsm_configuration_identifier: The identifier to be assigned to
the new Amazon Redshift HSM configuration.
:type description: string
:param description: A text description of the HSM configuration to be
created.
:type hsm_ip_address: string
:param hsm_ip_address: The IP address that the Amazon Redshift cluster
must use to access the HSM.
:type hsm_partition_name: string
:param hsm_partition_name: The name of the partition in the HSM where
the Amazon Redshift clusters will store their database encryption
keys.
:type hsm_partition_password: string
:param hsm_partition_password: The password required to access the HSM
partition.
:type hsm_server_public_certificate: string
:param hsm_server_public_certificate: The HSMs public certificate file.
When using Cloud HSM, the file name is server.pem.
"""
params = {
'HsmConfigurationIdentifier': hsm_configuration_identifier,
'Description': description,
'HsmIpAddress': hsm_ip_address,
'HsmPartitionName': hsm_partition_name,
'HsmPartitionPassword': hsm_partition_password,
'HsmServerPublicCertificate': hsm_server_public_certificate,
}
return self._make_request(
action='CreateHsmConfiguration',
verb='POST',
path='/', params=params)
def delete_cluster(self, cluster_identifier,
skip_final_cluster_snapshot=None,
final_cluster_snapshot_identifier=None):
"""
Deletes a previously provisioned cluster. A successful
response from the web service indicates that the request was
received correctly. If a final cluster snapshot is requested
the status of the cluster will be "final-snapshot" while the
snapshot is being taken, then it's "deleting" once Amazon
Redshift begins deleting the cluster. Use DescribeClusters to
monitor the status of the deletion. The delete operation
cannot be canceled or reverted once submitted. For more
information about managing clusters, go to `Amazon Redshift
Clusters`_ in the Amazon Redshift Management Guide .
:type cluster_identifier: string
:param cluster_identifier:
The identifier of the cluster to be deleted.
Constraints:
+ Must contain lowercase characters.
+ Must contain from 1 to 63 alphanumeric characters or hyphens.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type skip_final_cluster_snapshot: boolean
:param skip_final_cluster_snapshot: Determines whether a final snapshot
of the cluster is created before Amazon Redshift deletes the
cluster. If `True`, a final cluster snapshot is not created. If
`False`, a final cluster snapshot is created before the cluster is
deleted.
Default: `False`
:type final_cluster_snapshot_identifier: string
:param final_cluster_snapshot_identifier:
The identifier of the final snapshot that is to be created immediately
before deleting the cluster. If this parameter is provided,
SkipFinalClusterSnapshot must be `False`.
Constraints:
+ Must be 1 to 255 alphanumeric characters.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
"""
params = {'ClusterIdentifier': cluster_identifier, }
if skip_final_cluster_snapshot is not None:
params['SkipFinalClusterSnapshot'] = str(
skip_final_cluster_snapshot).lower()
if final_cluster_snapshot_identifier is not None:
params['FinalClusterSnapshotIdentifier'] = final_cluster_snapshot_identifier
return self._make_request(
action='DeleteCluster',
verb='POST',
path='/', params=params)
def delete_cluster_parameter_group(self, parameter_group_name):
"""
Deletes a specified Amazon Redshift parameter group.
:type parameter_group_name: string
:param parameter_group_name:
The name of the parameter group to be deleted.
Constraints:
+ Must be the name of an existing cluster parameter group.
+ Cannot delete a default cluster parameter group.
"""
params = {'ParameterGroupName': parameter_group_name, }
return self._make_request(
action='DeleteClusterParameterGroup',
verb='POST',
path='/', params=params)
def delete_cluster_security_group(self, cluster_security_group_name):
"""
Deletes an Amazon Redshift security group.
For information about managing security groups, go to `Amazon
Redshift Cluster Security Groups`_ in the Amazon Redshift
Management Guide .
:type cluster_security_group_name: string
:param cluster_security_group_name: The name of the cluster security
group to be deleted.
"""
params = {
'ClusterSecurityGroupName': cluster_security_group_name,
}
return self._make_request(
action='DeleteClusterSecurityGroup',
verb='POST',
path='/', params=params)
def delete_cluster_snapshot(self, snapshot_identifier,
snapshot_cluster_identifier=None):
"""
Deletes the specified manual snapshot. The snapshot must be in
the `available` state, with no other users authorized to
access the snapshot.
Unlike automated snapshots, manual snapshots are retained even
after you delete your cluster. Amazon Redshift does not delete
your manual snapshots. You must delete manual snapshot
explicitly to avoid getting charged. If other accounts are
authorized to access the snapshot, you must revoke all of the
authorizations before you can delete the snapshot.
:type snapshot_identifier: string
:param snapshot_identifier: The unique identifier of the manual
snapshot to be deleted.
Constraints: Must be the name of an existing snapshot that is in the
`available` state.
:type snapshot_cluster_identifier: string
:param snapshot_cluster_identifier: The unique identifier of the
cluster the snapshot was created from. This parameter is required
if your IAM user has a policy containing a snapshot resource
element that specifies anything other than * for the cluster name.
Constraints: Must be the name of valid cluster.
"""
params = {'SnapshotIdentifier': snapshot_identifier, }
if snapshot_cluster_identifier is not None:
params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier
return self._make_request(
action='DeleteClusterSnapshot',
verb='POST',
path='/', params=params)
def delete_cluster_subnet_group(self, cluster_subnet_group_name):
"""
Deletes the specified cluster subnet group.
:type cluster_subnet_group_name: string
:param cluster_subnet_group_name: The name of the cluster subnet group
name to be deleted.
"""
params = {
'ClusterSubnetGroupName': cluster_subnet_group_name,
}
return self._make_request(
action='DeleteClusterSubnetGroup',
verb='POST',
path='/', params=params)
def delete_event_subscription(self, subscription_name):
"""
Deletes an Amazon Redshift event notification subscription.
:type subscription_name: string
:param subscription_name: The name of the Amazon Redshift event
notification subscription to be deleted.
"""
params = {'SubscriptionName': subscription_name, }
return self._make_request(
action='DeleteEventSubscription',
verb='POST',
path='/', params=params)
def delete_hsm_client_certificate(self,
hsm_client_certificate_identifier):
"""
Deletes the specified HSM client certificate.
:type hsm_client_certificate_identifier: string
:param hsm_client_certificate_identifier: The identifier of the HSM
client certificate to be deleted.
"""
params = {
'HsmClientCertificateIdentifier': hsm_client_certificate_identifier,
}
return self._make_request(
action='DeleteHsmClientCertificate',
verb='POST',
path='/', params=params)
def delete_hsm_configuration(self, hsm_configuration_identifier):
"""
Deletes the specified Amazon Redshift HSM configuration.
:type hsm_configuration_identifier: string
:param hsm_configuration_identifier: The identifier of the Amazon
Redshift HSM configuration to be deleted.
"""
params = {
'HsmConfigurationIdentifier': hsm_configuration_identifier,
}
return self._make_request(
action='DeleteHsmConfiguration',
verb='POST',
path='/', params=params)
def describe_cluster_parameter_groups(self, parameter_group_name=None,
max_records=None, marker=None):
"""
Returns a list of Amazon Redshift parameter groups, including
parameter groups you created and the default parameter group.
For each parameter group, the response includes the parameter
group name, description, and parameter group family name. You
can optionally specify a name to retrieve the description of a
specific parameter group.
For more information about managing parameter groups, go to
`Amazon Redshift Parameter Groups`_ in the Amazon Redshift
Management Guide .
:type parameter_group_name: string
:param parameter_group_name: The name of a specific parameter group for
which to return details. By default, details about all parameter
groups and the default parameter group are returned.
:type max_records: integer
:param max_records: The maximum number of response records to return in
each call. If the number of remaining response records exceeds the
specified `MaxRecords` value, a value is returned in a `marker`
field of the response. You can retrieve the next set of records by
retrying the command with the returned marker value.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional parameter that specifies the starting point
to return a set of response records. When the results of a
DescribeClusterParameterGroups request exceed the value specified
in `MaxRecords`, AWS returns a value in the `Marker` field of the
response. You can retrieve the next set of response records by
providing the returned marker value in the `Marker` parameter and
retrying the request.
"""
params = {}
if parameter_group_name is not None:
params['ParameterGroupName'] = parameter_group_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeClusterParameterGroups',
verb='POST',
path='/', params=params)
def describe_cluster_parameters(self, parameter_group_name, source=None,
max_records=None, marker=None):
"""
Returns a detailed list of parameters contained within the
specified Amazon Redshift parameter group. For each parameter
the response includes information such as parameter name,
description, data type, value, whether the parameter value is
modifiable, and so on.
You can specify source filter to retrieve parameters of only
specific type. For example, to retrieve parameters that were
modified by a user action such as from
ModifyClusterParameterGroup, you can specify source equal to
user .
For more information about managing parameter groups, go to
`Amazon Redshift Parameter Groups`_ in the Amazon Redshift
Management Guide .
:type parameter_group_name: string
:param parameter_group_name: The name of a cluster parameter group for
which to return details.
:type source: string
:param source: The parameter types to return. Specify `user` to show
parameters that are different form the default. Similarly, specify
`engine-default` to show parameters that are the same as the
default parameter group.
Default: All parameter types returned.
Valid Values: `user` | `engine-default`
:type max_records: integer
:param max_records: The maximum number of response records to return in
each call. If the number of remaining response records exceeds the
specified `MaxRecords` value, a value is returned in a `marker`
field of the response. You can retrieve the next set of records by
retrying the command with the returned marker value.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional parameter that specifies the starting point
to return a set of response records. When the results of a
DescribeClusterParameters request exceed the value specified in
`MaxRecords`, AWS returns a value in the `Marker` field of the
response. You can retrieve the next set of response records by
providing the returned marker value in the `Marker` parameter and
retrying the request.
"""
params = {'ParameterGroupName': parameter_group_name, }
if source is not None:
params['Source'] = source
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeClusterParameters',
verb='POST',
path='/', params=params)
def describe_cluster_security_groups(self,
cluster_security_group_name=None,
max_records=None, marker=None):
"""
Returns information about Amazon Redshift security groups. If
the name of a security group is specified, the response will
contain only information about only that security group.
For information about managing security groups, go to `Amazon
Redshift Cluster Security Groups`_ in the Amazon Redshift
Management Guide .
:type cluster_security_group_name: string
:param cluster_security_group_name: The name of a cluster security
group for which you are requesting details. You can specify either
the **Marker** parameter or a **ClusterSecurityGroupName**
parameter, but not both.
Example: `securitygroup1`
:type max_records: integer
:param max_records: The maximum number of response records to return in
each call. If the number of remaining response records exceeds the
specified `MaxRecords` value, a value is returned in a `marker`
field of the response. You can retrieve the next set of records by
retrying the command with the returned marker value.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional parameter that specifies the starting point
to return a set of response records. When the results of a
DescribeClusterSecurityGroups request exceed the value specified in
`MaxRecords`, AWS returns a value in the `Marker` field of the
response. You can retrieve the next set of response records by
providing the returned marker value in the `Marker` parameter and
retrying the request.
Constraints: You can specify either the **ClusterSecurityGroupName**
parameter or the **Marker** parameter, but not both.
"""
params = {}
if cluster_security_group_name is not None:
params['ClusterSecurityGroupName'] = cluster_security_group_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeClusterSecurityGroups',
verb='POST',
path='/', params=params)
def describe_cluster_snapshots(self, cluster_identifier=None,
snapshot_identifier=None,
snapshot_type=None, start_time=None,
end_time=None, max_records=None,
marker=None, owner_account=None):
"""
Returns one or more snapshot objects, which contain metadata
about your cluster snapshots. By default, this operation
returns information about all snapshots of all clusters that
are owned by you AWS customer account. No information is
returned for snapshots owned by inactive AWS customer
accounts.
:type cluster_identifier: string
:param cluster_identifier: The identifier of the cluster for which
information about snapshots is requested.
:type snapshot_identifier: string
:param snapshot_identifier: The snapshot identifier of the snapshot
about which to return information.
:type snapshot_type: string
:param snapshot_type: The type of snapshots for which you are
requesting information. By default, snapshots of all types are
returned.
Valid Values: `automated` | `manual`
:type start_time: timestamp
:param start_time: A value that requests only snapshots created at or
after the specified time. The time value is specified in ISO 8601
format. For more information about ISO 8601, go to the `ISO8601
Wikipedia page.`_
Example: `2012-07-16T18:00:00Z`
:type end_time: timestamp
:param end_time: A time value that requests only snapshots created at
or before the specified time. The time value is specified in ISO
8601 format. For more information about ISO 8601, go to the
`ISO8601 Wikipedia page.`_
Example: `2012-07-16T18:00:00Z`
:type max_records: integer
:param max_records: The maximum number of response records to return in
each call. If the number of remaining response records exceeds the
specified `MaxRecords` value, a value is returned in a `marker`
field of the response. You can retrieve the next set of records by
retrying the command with the returned marker value.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional parameter that specifies the starting point
to return a set of response records. When the results of a
DescribeClusterSnapshots request exceed the value specified in
`MaxRecords`, AWS returns a value in the `Marker` field of the
response. You can retrieve the next set of response records by
providing the returned marker value in the `Marker` parameter and
retrying the request.
:type owner_account: string
:param owner_account: The AWS customer account used to create or copy
the snapshot. Use this field to filter the results to snapshots
owned by a particular account. To describe snapshots you own,
either specify your AWS customer account, or do not specify the
parameter.
"""
params = {}
if cluster_identifier is not None:
params['ClusterIdentifier'] = cluster_identifier
if snapshot_identifier is not None:
params['SnapshotIdentifier'] = snapshot_identifier
if snapshot_type is not None:
params['SnapshotType'] = snapshot_type
if start_time is not None:
params['StartTime'] = start_time
if end_time is not None:
params['EndTime'] = end_time
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
if owner_account is not None:
params['OwnerAccount'] = owner_account
return self._make_request(
action='DescribeClusterSnapshots',
verb='POST',
path='/', params=params)
def describe_cluster_subnet_groups(self, cluster_subnet_group_name=None,
max_records=None, marker=None):
"""
Returns one or more cluster subnet group objects, which
contain metadata about your cluster subnet groups. By default,
this operation returns information about all cluster subnet
groups that are defined in you AWS account.
:type cluster_subnet_group_name: string
:param cluster_subnet_group_name: The name of the cluster subnet group
for which information is requested.
:type max_records: integer
:param max_records: The maximum number of response records to return in
each call. If the number of remaining response records exceeds the
specified `MaxRecords` value, a value is returned in a `marker`
field of the response. You can retrieve the next set of records by
retrying the command with the returned marker value.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional parameter that specifies the starting point
to return a set of response records. When the results of a
DescribeClusterSubnetGroups request exceed the value specified in
`MaxRecords`, AWS returns a value in the `Marker` field of the
response. You can retrieve the next set of response records by
providing the returned marker value in the `Marker` parameter and
retrying the request.
"""
params = {}
if cluster_subnet_group_name is not None:
params['ClusterSubnetGroupName'] = cluster_subnet_group_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeClusterSubnetGroups',
verb='POST',
path='/', params=params)
def describe_cluster_versions(self, cluster_version=None,
cluster_parameter_group_family=None,
max_records=None, marker=None):
"""
Returns descriptions of the available Amazon Redshift cluster
versions. You can call this operation even before creating any
clusters to learn more about the Amazon Redshift versions. For
more information about managing clusters, go to `Amazon
Redshift Clusters`_ in the Amazon Redshift Management Guide
:type cluster_version: string
:param cluster_version: The specific cluster version to return.
Example: `1.0`
:type cluster_parameter_group_family: string
:param cluster_parameter_group_family:
The name of a specific cluster parameter group family to return details
for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type max_records: integer
:param max_records: The maximum number of response records to return in
each call. If the number of remaining response records exceeds the
specified `MaxRecords` value, a value is returned in a `marker`
field of the response. You can retrieve the next set of records by
retrying the command with the returned marker value.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional parameter that specifies the starting point
to return a set of response records. When the results of a
DescribeClusterVersions request exceed the value specified in
`MaxRecords`, AWS returns a value in the `Marker` field of the
response. You can retrieve the next set of response records by
providing the returned marker value in the `Marker` parameter and
retrying the request.
"""
params = {}
if cluster_version is not None:
params['ClusterVersion'] = cluster_version
if cluster_parameter_group_family is not None:
params['ClusterParameterGroupFamily'] = cluster_parameter_group_family
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeClusterVersions',
verb='POST',
path='/', params=params)
def describe_clusters(self, cluster_identifier=None, max_records=None,
marker=None):
"""
Returns properties of provisioned clusters including general
cluster properties, cluster database properties, maintenance
and backup properties, and security and access properties.
This operation supports pagination. For more information about
managing clusters, go to `Amazon Redshift Clusters`_ in the
Amazon Redshift Management Guide .
:type cluster_identifier: string
:param cluster_identifier: The unique identifier of a cluster whose
properties you are requesting. This parameter is case sensitive.
The default is that all clusters defined for an account are returned.
:type max_records: integer
:param max_records: The maximum number of response records to return in
each call. If the number of remaining response records exceeds the
specified `MaxRecords` value, a value is returned in a `marker`
field of the response. You can retrieve the next set of records by
retrying the command with the returned marker value.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional parameter that specifies the starting point
to return a set of response records. When the results of a
DescribeClusters request exceed the value specified in
`MaxRecords`, AWS returns a value in the `Marker` field of the
response. You can retrieve the next set of response records by
providing the returned marker value in the `Marker` parameter and
retrying the request.
Constraints: You can specify either the **ClusterIdentifier** parameter
or the **Marker** parameter, but not both.
"""
params = {}
if cluster_identifier is not None:
params['ClusterIdentifier'] = cluster_identifier
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeClusters',
verb='POST',
path='/', params=params)
def describe_default_cluster_parameters(self, parameter_group_family,
max_records=None, marker=None):
"""
Returns a list of parameter settings for the specified
parameter group family.
For more information about managing parameter groups, go to
`Amazon Redshift Parameter Groups`_ in the Amazon Redshift
Management Guide .
:type parameter_group_family: string
:param parameter_group_family: The name of the cluster parameter group
family.
:type max_records: integer
:param max_records: The maximum number of response records to return in
each call. If the number of remaining response records exceeds the
specified `MaxRecords` value, a value is returned in a `marker`
field of the response. You can retrieve the next set of records by
retrying the command with the returned marker value.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional parameter that specifies the starting point
to return a set of response records. When the results of a
DescribeDefaultClusterParameters request exceed the value specified
in `MaxRecords`, AWS returns a value in the `Marker` field of the
response. You can retrieve the next set of response records by
providing the returned marker value in the `Marker` parameter and
retrying the request.
"""
params = {'ParameterGroupFamily': parameter_group_family, }
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDefaultClusterParameters',
verb='POST',
path='/', params=params)
def describe_event_categories(self, source_type=None):
"""
Displays a list of event categories for all event source
types, or for a specified source type. For a list of the event
categories and source types, go to `Amazon Redshift Event
Notifications`_.
:type source_type: string
:param source_type: The source type, such as cluster or parameter
group, to which the described event categories apply.
Valid values: cluster, snapshot, parameter group, and security group.
"""
params = {}
if source_type is not None:
params['SourceType'] = source_type
return self._make_request(
action='DescribeEventCategories',
verb='POST',
path='/', params=params)
def describe_event_subscriptions(self, subscription_name=None,
max_records=None, marker=None):
"""
Lists descriptions of all the Amazon Redshift event
notifications subscription for a customer account. If you
specify a subscription name, lists the description for that
subscription.
:type subscription_name: string
:param subscription_name: The name of the Amazon Redshift event
notification subscription to be described.
:type max_records: integer
:param max_records: The maximum number of response records to return in
each call. If the number of remaining response records exceeds the
specified `MaxRecords` value, a value is returned in a `marker`
field of the response. You can retrieve the next set of records by
retrying the command with the returned marker value.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional parameter that specifies the starting point
to return a set of response records. When the results of a
DescribeEventSubscriptions request exceed the value specified in
`MaxRecords`, AWS returns a value in the `Marker` field of the
response. You can retrieve the next set of response records by
providing the returned marker value in the `Marker` parameter and
retrying the request.
"""
params = {}
if subscription_name is not None:
params['SubscriptionName'] = subscription_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEventSubscriptions',
verb='POST',
path='/', params=params)
def describe_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None, duration=None,
max_records=None, marker=None):
"""
Returns events related to clusters, security groups,
snapshots, and parameter groups for the past 14 days. Events
specific to a particular cluster, security group, snapshot or
parameter group can be obtained by providing the name as a
parameter. By default, the past hour of events are returned.
:type source_identifier: string
:param source_identifier:
The identifier of the event source for which events will be returned.
If this parameter is not specified, then all sources are included
in the response.
Constraints:
If SourceIdentifier is supplied, SourceType must also be provided.
+ Specify a cluster identifier when SourceType is `cluster`.
+ Specify a cluster security group name when SourceType is `cluster-
security-group`.
+ Specify a cluster parameter group name when SourceType is `cluster-
parameter-group`.
+ Specify a cluster snapshot identifier when SourceType is `cluster-
snapshot`.
:type source_type: string
:param source_type:
The event source to retrieve events for. If no value is specified, all
events are returned.
Constraints:
If SourceType is supplied, SourceIdentifier must also be provided.
+ Specify `cluster` when SourceIdentifier is a cluster identifier.
+ Specify `cluster-security-group` when SourceIdentifier is a cluster
security group name.
+ Specify `cluster-parameter-group` when SourceIdentifier is a cluster
parameter group name.
+ Specify `cluster-snapshot` when SourceIdentifier is a cluster
snapshot identifier.
:type start_time: timestamp
:param start_time: The beginning of the time interval to retrieve
events for, specified in ISO 8601 format. For more information
about ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: `2009-07-08T18:00Z`
:type end_time: timestamp
:param end_time: The end of the time interval for which to retrieve
events, specified in ISO 8601 format. For more information about
ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: `2009-07-08T18:00Z`
:type duration: integer
:param duration: The number of minutes prior to the time of the request
for which to retrieve events. For example, if the request is sent
at 18:00 and you specify a duration of 60, then only events which
have occurred after 17:00 will be returned.
Default: `60`
:type max_records: integer
:param max_records: The maximum number of response records to return in
each call. If the number of remaining response records exceeds the
specified `MaxRecords` value, a value is returned in a `marker`
field of the response. You can retrieve the next set of records by
retrying the command with the returned marker value.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional parameter that specifies the starting point
to return a set of response records. When the results of a
DescribeEvents request exceed the value specified in `MaxRecords`,
AWS returns a value in the `Marker` field of the response. You can
retrieve the next set of response records by providing the returned
marker value in the `Marker` parameter and retrying the request.
"""
params = {}
if source_identifier is not None:
params['SourceIdentifier'] = source_identifier
if source_type is not None:
params['SourceType'] = source_type
if start_time is not None:
params['StartTime'] = start_time
if end_time is not None:
params['EndTime'] = end_time
if duration is not None:
params['Duration'] = duration
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEvents',
verb='POST',
path='/', params=params)
def describe_hsm_client_certificates(self,
hsm_client_certificate_identifier=None,
max_records=None, marker=None):
"""
Returns information about the specified HSM client
certificate. If no certificate ID is specified, returns
information about all the HSM certificates owned by your AWS
customer account.
:type hsm_client_certificate_identifier: string
:param hsm_client_certificate_identifier: The identifier of a specific
HSM client certificate for which you want information. If no
identifier is specified, information is returned for all HSM client
certificates owned by your AWS customer account.
:type max_records: integer
:param max_records: The maximum number of response records to return in
each call. If the number of remaining response records exceeds the
specified `MaxRecords` value, a value is returned in a `marker`
field of the response. You can retrieve the next set of records by
retrying the command with the returned marker value.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional parameter that specifies the starting point
to return a set of response records. When the results of a
DescribeHsmClientCertificates request exceed the value specified in
`MaxRecords`, AWS returns a value in the `Marker` field of the
response. You can retrieve the next set of response records by
providing the returned marker value in the `Marker` parameter and
retrying the request.
"""
params = {}
if hsm_client_certificate_identifier is not None:
params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeHsmClientCertificates',
verb='POST',
path='/', params=params)
def describe_hsm_configurations(self, hsm_configuration_identifier=None,
max_records=None, marker=None):
"""
Returns information about the specified Amazon Redshift HSM
configuration. If no configuration ID is specified, returns
information about all the HSM configurations owned by your AWS
customer account.
:type hsm_configuration_identifier: string
:param hsm_configuration_identifier: The identifier of a specific
Amazon Redshift HSM configuration to be described. If no identifier
is specified, information is returned for all HSM configurations
owned by your AWS customer account.
:type max_records: integer
:param max_records: The maximum number of response records to return in
each call. If the number of remaining response records exceeds the
specified `MaxRecords` value, a value is returned in a `marker`
field of the response. You can retrieve the next set of records by
retrying the command with the returned marker value.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional parameter that specifies the starting point
to return a set of response records. When the results of a
DescribeHsmConfigurations request exceed the value specified in
`MaxRecords`, AWS returns a value in the `Marker` field of the
response. You can retrieve the next set of response records by
providing the returned marker value in the `Marker` parameter and
retrying the request.
"""
params = {}
if hsm_configuration_identifier is not None:
params['HsmConfigurationIdentifier'] = hsm_configuration_identifier
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeHsmConfigurations',
verb='POST',
path='/', params=params)
def describe_logging_status(self, cluster_identifier):
"""
Describes whether information, such as queries and connection
attempts, is being logged for the specified Amazon Redshift
cluster.
:type cluster_identifier: string
:param cluster_identifier: The identifier of the cluster to get the
logging status from.
Example: `examplecluster`
"""
params = {'ClusterIdentifier': cluster_identifier, }
return self._make_request(
action='DescribeLoggingStatus',
verb='POST',
path='/', params=params)
def describe_orderable_cluster_options(self, cluster_version=None,
node_type=None, max_records=None,
marker=None):
"""
Returns a list of orderable cluster options. Before you create
a new cluster you can use this operation to find what options
are available, such as the EC2 Availability Zones (AZ) in the
specific AWS region that you can specify, and the node types
you can request. The node types differ by available storage,
memory, CPU and price. With the cost involved you might want
to obtain a list of cluster options in the specific region and
specify values when creating a cluster. For more information
about managing clusters, go to `Amazon Redshift Clusters`_ in
the Amazon Redshift Management Guide
:type cluster_version: string
:param cluster_version: The version filter value. Specify this
parameter to show only the available offerings matching the
specified version.
Default: All versions.
Constraints: Must be one of the version returned from
DescribeClusterVersions.
:type node_type: string
:param node_type: The node type filter value. Specify this parameter to
show only the available offerings matching the specified node type.
:type max_records: integer
:param max_records: The maximum number of response records to return in
each call. If the number of remaining response records exceeds the
specified `MaxRecords` value, a value is returned in a `marker`
field of the response. You can retrieve the next set of records by
retrying the command with the returned marker value.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional parameter that specifies the starting point
to return a set of response records. When the results of a
DescribeOrderableClusterOptions request exceed the value specified
in `MaxRecords`, AWS returns a value in the `Marker` field of the
response. You can retrieve the next set of response records by
providing the returned marker value in the `Marker` parameter and
retrying the request.
"""
params = {}
if cluster_version is not None:
params['ClusterVersion'] = cluster_version
if node_type is not None:
params['NodeType'] = node_type
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeOrderableClusterOptions',
verb='POST',
path='/', params=params)
def describe_reserved_node_offerings(self,
reserved_node_offering_id=None,
max_records=None, marker=None):
"""
Returns a list of the available reserved node offerings by
Amazon Redshift with their descriptions including the node
type, the fixed and recurring costs of reserving the node and
duration the node will be reserved for you. These descriptions
help you determine which reserve node offering you want to
purchase. You then use the unique offering ID in you call to
PurchaseReservedNodeOffering to reserve one or more nodes for
your Amazon Redshift cluster.
For more information about managing parameter groups, go to
`Purchasing Reserved Nodes`_ in the Amazon Redshift Management
Guide .
:type reserved_node_offering_id: string
:param reserved_node_offering_id: The unique identifier for the
offering.
:type max_records: integer
:param max_records: The maximum number of response records to return in
each call. If the number of remaining response records exceeds the
specified `MaxRecords` value, a value is returned in a `marker`
field of the response. You can retrieve the next set of records by
retrying the command with the returned marker value.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional parameter that specifies the starting point
to return a set of response records. When the results of a
DescribeReservedNodeOfferings request exceed the value specified in
`MaxRecords`, AWS returns a value in the `Marker` field of the
response. You can retrieve the next set of response records by
providing the returned marker value in the `Marker` parameter and
retrying the request.
"""
params = {}
if reserved_node_offering_id is not None:
params['ReservedNodeOfferingId'] = reserved_node_offering_id
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedNodeOfferings',
verb='POST',
path='/', params=params)
def describe_reserved_nodes(self, reserved_node_id=None,
max_records=None, marker=None):
"""
Returns the descriptions of the reserved nodes.
:type reserved_node_id: string
:param reserved_node_id: Identifier for the node reservation.
:type max_records: integer
:param max_records: The maximum number of response records to return in
each call. If the number of remaining response records exceeds the
specified `MaxRecords` value, a value is returned in a `marker`
field of the response. You can retrieve the next set of records by
retrying the command with the returned marker value.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional parameter that specifies the starting point
to return a set of response records. When the results of a
DescribeReservedNodes request exceed the value specified in
`MaxRecords`, AWS returns a value in the `Marker` field of the
response. You can retrieve the next set of response records by
providing the returned marker value in the `Marker` parameter and
retrying the request.
"""
params = {}
if reserved_node_id is not None:
params['ReservedNodeId'] = reserved_node_id
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedNodes',
verb='POST',
path='/', params=params)
def describe_resize(self, cluster_identifier):
"""
Returns information about the last resize operation for the
specified cluster. If no resize operation has ever been
initiated for the specified cluster, a `HTTP 404` error is
returned. If a resize operation was initiated and completed,
the status of the resize remains as `SUCCEEDED` until the next
resize.
A resize operation can be requested using ModifyCluster and
specifying a different number or type of nodes for the
cluster.
:type cluster_identifier: string
:param cluster_identifier: The unique identifier of a cluster whose
resize progress you are requesting. This parameter isn't case-
sensitive.
By default, resize operations for all clusters defined for an AWS
account are returned.
"""
params = {'ClusterIdentifier': cluster_identifier, }
return self._make_request(
action='DescribeResize',
verb='POST',
path='/', params=params)
def disable_logging(self, cluster_identifier):
"""
Stops logging information, such as queries and connection
attempts, for the specified Amazon Redshift cluster.
:type cluster_identifier: string
:param cluster_identifier: The identifier of the cluster on which
logging is to be stopped.
Example: `examplecluster`
"""
params = {'ClusterIdentifier': cluster_identifier, }
return self._make_request(
action='DisableLogging',
verb='POST',
path='/', params=params)
def disable_snapshot_copy(self, cluster_identifier):
"""
Disables the automatic copying of snapshots from one region to
another region for a specified cluster.
:type cluster_identifier: string
:param cluster_identifier: The unique identifier of the source cluster
that you want to disable copying of snapshots to a destination
region.
Constraints: Must be the valid name of an existing cluster that has
cross-region snapshot copy enabled.
"""
params = {'ClusterIdentifier': cluster_identifier, }
return self._make_request(
action='DisableSnapshotCopy',
verb='POST',
path='/', params=params)
def enable_logging(self, cluster_identifier, bucket_name,
s3_key_prefix=None):
"""
Starts logging information, such as queries and connection
attempts, for the specified Amazon Redshift cluster.
:type cluster_identifier: string
:param cluster_identifier: The identifier of the cluster on which
logging is to be started.
Example: `examplecluster`
:type bucket_name: string
:param bucket_name:
The name of an existing S3 bucket where the log files are to be stored.
Constraints:
+ Must be in the same region as the cluster
+ The cluster must have read bucket and put object permissions
:type s3_key_prefix: string
:param s3_key_prefix:
The prefix applied to the log file names.
Constraints:
+ Cannot exceed 512 characters
+ Cannot contain spaces( ), double quotes ("), single quotes ('), a
backslash (\), or control characters. The hexadecimal codes for
invalid characters are:
+ x00 to x20
+ x22
+ x27
+ x5c
+ x7f or larger
"""
params = {
'ClusterIdentifier': cluster_identifier,
'BucketName': bucket_name,
}
if s3_key_prefix is not None:
params['S3KeyPrefix'] = s3_key_prefix
return self._make_request(
action='EnableLogging',
verb='POST',
path='/', params=params)
def enable_snapshot_copy(self, cluster_identifier, destination_region,
retention_period=None):
"""
Enables the automatic copy of snapshots from one region to
another region for a specified cluster.
:type cluster_identifier: string
:param cluster_identifier: The unique identifier of the source cluster
to copy snapshots from.
Constraints: Must be the valid name of an existing cluster that does
not already have cross-region snapshot copy enabled.
:type destination_region: string
:param destination_region: The destination region that you want to copy
snapshots to.
Constraints: Must be the name of a valid region. For more information,
see `Regions and Endpoints`_ in the Amazon Web Services General
Reference.
:type retention_period: integer
:param retention_period: The number of days to retain automated
snapshots in the destination region after they are copied from the
source region.
Default: 7.
Constraints: Must be at least 1 and no more than 35.
"""
params = {
'ClusterIdentifier': cluster_identifier,
'DestinationRegion': destination_region,
}
if retention_period is not None:
params['RetentionPeriod'] = retention_period
return self._make_request(
action='EnableSnapshotCopy',
verb='POST',
path='/', params=params)
def modify_cluster(self, cluster_identifier, cluster_type=None,
node_type=None, number_of_nodes=None,
cluster_security_groups=None,
vpc_security_group_ids=None,
master_user_password=None,
cluster_parameter_group_name=None,
automated_snapshot_retention_period=None,
preferred_maintenance_window=None,
cluster_version=None, allow_version_upgrade=None,
hsm_client_certificate_identifier=None,
hsm_configuration_identifier=None,
new_cluster_identifier=None):
"""
Modifies the settings for a cluster. For example, you can add
another security or parameter group, update the preferred
maintenance window, or change the master user password.
Resetting a cluster password or modifying the security groups
associated with a cluster do not need a reboot. However,
modifying a parameter group requires a reboot for parameters
to take effect. For more information about managing clusters,
go to `Amazon Redshift Clusters`_ in the Amazon Redshift
Management Guide
You can also change node type and the number of nodes to scale
up or down the cluster. When resizing a cluster, you must
specify both the number of nodes and the node type even if one
of the parameters does not change. If you specify the same
number of nodes and node type that are already configured for
the cluster, an error is returned.
:type cluster_identifier: string
:param cluster_identifier: The unique identifier of the cluster to be
modified.
Example: `examplecluster`
:type cluster_type: string
:param cluster_type: The new cluster type.
When you submit your cluster resize request, your existing cluster goes
into a read-only mode. After Amazon Redshift provisions a new
cluster based on your resize requirements, there will be outage for
a period while the old cluster is deleted and your connection is
switched to the new cluster. You can use DescribeResize to track
the progress of the resize request.
Valid Values: ` multi-node | single-node `
:type node_type: string
:param node_type: The new node type of the cluster. If you specify a
new node type, you must also specify the number of nodes parameter
also.
When you submit your request to resize a cluster, Amazon Redshift sets
access permissions for the cluster to read-only. After Amazon
Redshift provisions a new cluster according to your resize
requirements, there will be a temporary outage while the old
cluster is deleted and your connection is switched to the new
cluster. When the new connection is complete, the original access
permissions for the cluster are restored. You can use the
DescribeResize to track the progress of the resize request.
Valid Values: ` dw1.xlarge` | `dw1.8xlarge` | `dw2.large` |
`dw2.8xlarge`.
:type number_of_nodes: integer
:param number_of_nodes: The new number of nodes of the cluster. If you
specify a new number of nodes, you must also specify the node type
parameter also.
When you submit your request to resize a cluster, Amazon Redshift sets
access permissions for the cluster to read-only. After Amazon
Redshift provisions a new cluster according to your resize
requirements, there will be a temporary outage while the old
cluster is deleted and your connection is switched to the new
cluster. When the new connection is complete, the original access
permissions for the cluster are restored. You can use
DescribeResize to track the progress of the resize request.
Valid Values: Integer greater than `0`.
:type cluster_security_groups: list
:param cluster_security_groups:
A list of cluster security groups to be authorized on this cluster.
This change is asynchronously applied as soon as possible.
Security groups currently associated with the cluster, and not in the
list of groups to apply, will be revoked from the cluster.
Constraints:
+ Must be 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type vpc_security_group_ids: list
:param vpc_security_group_ids: A list of virtual private cloud (VPC)
security groups to be associated with the cluster.
:type master_user_password: string
:param master_user_password:
The new password for the cluster master user. This change is
asynchronously applied as soon as possible. Between the time of the
request and the completion of the request, the `MasterUserPassword`
element exists in the `PendingModifiedValues` element of the
operation response.
Default: Uses existing setting.
Constraints:
+ Must be between 8 and 64 characters in length.
+ Must contain at least one uppercase letter.
+ Must contain at least one lowercase letter.
+ Must contain one number.
+ Can be any printable ASCII character (ASCII code 33 to 126) except '
(single quote), " (double quote), \, /, @, or space.
:type cluster_parameter_group_name: string
:param cluster_parameter_group_name: The name of the cluster parameter
group to apply to this cluster. This change is applied only after
the cluster is rebooted. To reboot a cluster use RebootCluster.
Default: Uses existing setting.
Constraints: The cluster parameter group must be in the same parameter
group family that matches the cluster version.
:type automated_snapshot_retention_period: integer
:param automated_snapshot_retention_period: The number of days that
automated snapshots are retained. If the value is 0, automated
snapshots are disabled. Even if automated snapshots are disabled,
you can still create manual snapshots when you want with
CreateClusterSnapshot.
If you decrease the automated snapshot retention period from its
current value, existing automated snapshots that fall outside of
the new retention period will be immediately deleted.
Default: Uses existing setting.
Constraints: Must be a value from 0 to 35.
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur, if necessary. If system
maintenance is necessary during the window, it may result in an
outage.
This maintenance window change is made immediately. If the new
maintenance window indicates the current time, there must be at
least 120 minutes between the current time and end of the window in
order to ensure that pending changes are applied.
Default: Uses existing setting.
Format: ddd:hh24:mi-ddd:hh24:mi, for example `wed:07:30-wed:08:00`.
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Must be at least 30 minutes.
:type cluster_version: string
:param cluster_version: The new version number of the Amazon Redshift
engine to upgrade to.
For major version upgrades, if a non-default cluster parameter group is
currently in use, a new cluster parameter group in the cluster
parameter group family for the new version must be specified. The
new cluster parameter group can be the default for that cluster
parameter group family. For more information about managing
parameter groups, go to `Amazon Redshift Parameter Groups`_ in the
Amazon Redshift Management Guide .
Example: `1.0`
:type allow_version_upgrade: boolean
:param allow_version_upgrade: If `True`, upgrades will be applied
automatically to the cluster during the maintenance window.
Default: `False`
:type hsm_client_certificate_identifier: string
:param hsm_client_certificate_identifier: Specifies the name of the HSM
client certificate the Amazon Redshift cluster uses to retrieve the
data encryption keys stored in an HSM.
:type hsm_configuration_identifier: string
:param hsm_configuration_identifier: Specifies the name of the HSM
configuration that contains the information the Amazon Redshift
cluster can use to retrieve and store keys in an HSM.
:type new_cluster_identifier: string
:param new_cluster_identifier: The new identifier for the cluster.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens.
+ Alphabetic characters must be lowercase.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
+ Must be unique for all clusters within an AWS account.
Example: `examplecluster`
"""
params = {'ClusterIdentifier': cluster_identifier, }
if cluster_type is not None:
params['ClusterType'] = cluster_type
if node_type is not None:
params['NodeType'] = node_type
if number_of_nodes is not None:
params['NumberOfNodes'] = number_of_nodes
if cluster_security_groups is not None:
self.build_list_params(params,
cluster_security_groups,
'ClusterSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if master_user_password is not None:
params['MasterUserPassword'] = master_user_password
if cluster_parameter_group_name is not None:
params['ClusterParameterGroupName'] = cluster_parameter_group_name
if automated_snapshot_retention_period is not None:
params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if cluster_version is not None:
params['ClusterVersion'] = cluster_version
if allow_version_upgrade is not None:
params['AllowVersionUpgrade'] = str(
allow_version_upgrade).lower()
if hsm_client_certificate_identifier is not None:
params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier
if hsm_configuration_identifier is not None:
params['HsmConfigurationIdentifier'] = hsm_configuration_identifier
if new_cluster_identifier is not None:
params['NewClusterIdentifier'] = new_cluster_identifier
return self._make_request(
action='ModifyCluster',
verb='POST',
path='/', params=params)
def modify_cluster_parameter_group(self, parameter_group_name,
parameters):
"""
Modifies the parameters of a parameter group.
For more information about managing parameter groups, go to
`Amazon Redshift Parameter Groups`_ in the Amazon Redshift
Management Guide .
:type parameter_group_name: string
:param parameter_group_name: The name of the parameter group to be
modified.
:type parameters: list
:param parameters: An array of parameters to be modified. A maximum of
20 parameters can be modified in a single request.
For each parameter to be modified, you must supply at least the
parameter name and parameter value; other name-value pairs of the
parameter are optional.
For the workload management (WLM) configuration, you must supply all
the name-value pairs in the wlm_json_configuration parameter.
"""
params = {'ParameterGroupName': parameter_group_name, }
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion'))
return self._make_request(
action='ModifyClusterParameterGroup',
verb='POST',
path='/', params=params)
def modify_cluster_subnet_group(self, cluster_subnet_group_name,
subnet_ids, description=None):
"""
Modifies a cluster subnet group to include the specified list
of VPC subnets. The operation replaces the existing list of
subnets with the new list of subnets.
:type cluster_subnet_group_name: string
:param cluster_subnet_group_name: The name of the subnet group to be
modified.
:type description: string
:param description: A text description of the subnet group to be
modified.
:type subnet_ids: list
:param subnet_ids: An array of VPC subnet IDs. A maximum of 20 subnets
can be modified in a single request.
"""
params = {
'ClusterSubnetGroupName': cluster_subnet_group_name,
}
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
if description is not None:
params['Description'] = description
return self._make_request(
action='ModifyClusterSubnetGroup',
verb='POST',
path='/', params=params)
def modify_event_subscription(self, subscription_name,
sns_topic_arn=None, source_type=None,
source_ids=None, event_categories=None,
severity=None, enabled=None):
"""
Modifies an existing Amazon Redshift event notification
subscription.
:type subscription_name: string
:param subscription_name: The name of the modified Amazon Redshift
event notification subscription.
:type sns_topic_arn: string
:param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
to be used by the event notification subscription.
:type source_type: string
:param source_type: The type of source that will be generating the
events. For example, if you want to be notified of events generated
by a cluster, you would set this parameter to cluster. If this
value is not specified, events are returned for all Amazon Redshift
objects in your AWS account. You must specify a source type in
order to specify source IDs.
Valid values: cluster, cluster-parameter-group, cluster-security-group,
and cluster-snapshot.
:type source_ids: list
:param source_ids: A list of one or more identifiers of Amazon Redshift
source objects. All of the objects must be of the same type as was
specified in the source type parameter. The event subscription will
return only events generated by the specified objects. If not
specified, then events are returned for all objects within the
source type specified.
Example: my-cluster-1, my-cluster-2
Example: my-snapshot-20131010
:type event_categories: list
:param event_categories: Specifies the Amazon Redshift event categories
to be published by the event notification subscription.
Values: Configuration, Management, Monitoring, Security
:type severity: string
:param severity: Specifies the Amazon Redshift event severity to be
published by the event notification subscription.
Values: ERROR, INFO
:type enabled: boolean
:param enabled: A Boolean value indicating if the subscription is
enabled. `True` indicates the subscription is enabled
"""
params = {'SubscriptionName': subscription_name, }
if sns_topic_arn is not None:
params['SnsTopicArn'] = sns_topic_arn
if source_type is not None:
params['SourceType'] = source_type
if source_ids is not None:
self.build_list_params(params,
source_ids,
'SourceIds.member')
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if severity is not None:
params['Severity'] = severity
if enabled is not None:
params['Enabled'] = str(
enabled).lower()
return self._make_request(
action='ModifyEventSubscription',
verb='POST',
path='/', params=params)
def modify_snapshot_copy_retention_period(self, cluster_identifier,
retention_period):
"""
Modifies the number of days to retain automated snapshots in
the destination region after they are copied from the source
region.
:type cluster_identifier: string
:param cluster_identifier: The unique identifier of the cluster for
which you want to change the retention period for automated
snapshots that are copied to a destination region.
Constraints: Must be the valid name of an existing cluster that has
cross-region snapshot copy enabled.
:type retention_period: integer
:param retention_period: The number of days to retain automated
snapshots in the destination region after they are copied from the
source region.
If you decrease the retention period for automated snapshots that are
copied to a destination region, Amazon Redshift will delete any
existing automated snapshots that were copied to the destination
region and that fall outside of the new retention period.
Constraints: Must be at least 1 and no more than 35.
"""
params = {
'ClusterIdentifier': cluster_identifier,
'RetentionPeriod': retention_period,
}
return self._make_request(
action='ModifySnapshotCopyRetentionPeriod',
verb='POST',
path='/', params=params)
def purchase_reserved_node_offering(self, reserved_node_offering_id,
node_count=None):
"""
Allows you to purchase reserved nodes. Amazon Redshift offers
a predefined set of reserved node offerings. You can purchase
one of the offerings. You can call the
DescribeReservedNodeOfferings API to obtain the available
reserved node offerings. You can call this API by providing a
specific reserved node offering and the number of nodes you
want to reserve.
For more information about managing parameter groups, go to
`Purchasing Reserved Nodes`_ in the Amazon Redshift Management
Guide .
:type reserved_node_offering_id: string
:param reserved_node_offering_id: The unique identifier of the reserved
node offering you want to purchase.
:type node_count: integer
:param node_count: The number of reserved nodes you want to purchase.
Default: `1`
"""
params = {
'ReservedNodeOfferingId': reserved_node_offering_id,
}
if node_count is not None:
params['NodeCount'] = node_count
return self._make_request(
action='PurchaseReservedNodeOffering',
verb='POST',
path='/', params=params)
def reboot_cluster(self, cluster_identifier):
"""
Reboots a cluster. This action is taken as soon as possible.
It results in a momentary outage to the cluster, during which
the cluster status is set to `rebooting`. A cluster event is
created when the reboot is completed. Any pending cluster
modifications (see ModifyCluster) are applied at this reboot.
For more information about managing clusters, go to `Amazon
Redshift Clusters`_ in the Amazon Redshift Management Guide
:type cluster_identifier: string
:param cluster_identifier: The cluster identifier.
"""
params = {'ClusterIdentifier': cluster_identifier, }
return self._make_request(
action='RebootCluster',
verb='POST',
path='/', params=params)
def reset_cluster_parameter_group(self, parameter_group_name,
reset_all_parameters=None,
parameters=None):
"""
Sets one or more parameters of the specified parameter group
to their default values and sets the source values of the
parameters to "engine-default". To reset the entire parameter
group specify the ResetAllParameters parameter. For parameter
changes to take effect you must reboot any associated
clusters.
:type parameter_group_name: string
:param parameter_group_name: The name of the cluster parameter group to
be reset.
:type reset_all_parameters: boolean
:param reset_all_parameters: If `True`, all parameters in the specified
parameter group will be reset to their default values.
Default: `True`
:type parameters: list
:param parameters: An array of names of parameters to be reset. If
ResetAllParameters option is not used, then at least one parameter
name must be supplied.
Constraints: A maximum of 20 parameters can be reset in a single
request.
"""
params = {'ParameterGroupName': parameter_group_name, }
if reset_all_parameters is not None:
params['ResetAllParameters'] = str(
reset_all_parameters).lower()
if parameters is not None:
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion'))
return self._make_request(
action='ResetClusterParameterGroup',
verb='POST',
path='/', params=params)
def restore_from_cluster_snapshot(self, cluster_identifier,
snapshot_identifier,
snapshot_cluster_identifier=None,
port=None, availability_zone=None,
allow_version_upgrade=None,
cluster_subnet_group_name=None,
publicly_accessible=None,
owner_account=None,
hsm_client_certificate_identifier=None,
hsm_configuration_identifier=None,
elastic_ip=None,
cluster_parameter_group_name=None,
cluster_security_groups=None,
vpc_security_group_ids=None,
preferred_maintenance_window=None,
automated_snapshot_retention_period=None):
"""
Creates a new cluster from a snapshot. Amazon Redshift creates
the resulting cluster with the same configuration as the
original cluster from which the snapshot was created, except
that the new cluster is created with the default cluster
security and parameter group. After Amazon Redshift creates
the cluster you can use the ModifyCluster API to associate a
different security group and different parameter group with
the restored cluster.
If you restore a cluster into a VPC, you must provide a
cluster subnet group where you want the cluster restored.
For more information about working with snapshots, go to
`Amazon Redshift Snapshots`_ in the Amazon Redshift Management
Guide .
:type cluster_identifier: string
:param cluster_identifier: The identifier of the cluster that will be
created from restoring the snapshot.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens.
+ Alphabetic characters must be lowercase.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
+ Must be unique for all clusters within an AWS account.
:type snapshot_identifier: string
:param snapshot_identifier: The name of the snapshot from which to
create the new cluster. This parameter isn't case sensitive.
Example: `my-snapshot-id`
:type snapshot_cluster_identifier: string
:param snapshot_cluster_identifier: The name of the cluster the source
snapshot was created from. This parameter is required if your IAM
user has a policy containing a snapshot resource element that
specifies anything other than * for the cluster name.
:type port: integer
:param port: The port number on which the cluster accepts connections.
Default: The same port as the original cluster.
Constraints: Must be between `1115` and `65535`.
:type availability_zone: string
:param availability_zone: The Amazon EC2 Availability Zone in which to
restore the cluster.
Default: A random, system-chosen Availability Zone.
Example: `us-east-1a`
:type allow_version_upgrade: boolean
:param allow_version_upgrade: If `True`, upgrades can be applied during
the maintenance window to the Amazon Redshift engine that is
running on the cluster.
Default: `True`
:type cluster_subnet_group_name: string
:param cluster_subnet_group_name: The name of the subnet group where
you want to cluster restored.
A snapshot of cluster in VPC can be restored only in VPC. Therefore,
you must provide subnet group name where you want the cluster
restored.
:type publicly_accessible: boolean
:param publicly_accessible: If `True`, the cluster can be accessed from
a public network.
:type owner_account: string
:param owner_account: The AWS customer account used to create or copy
the snapshot. Required if you are restoring a snapshot you do not
own, optional if you own the snapshot.
:type hsm_client_certificate_identifier: string
:param hsm_client_certificate_identifier: Specifies the name of the HSM
client certificate the Amazon Redshift cluster uses to retrieve the
data encryption keys stored in an HSM.
:type hsm_configuration_identifier: string
:param hsm_configuration_identifier: Specifies the name of the HSM
configuration that contains the information the Amazon Redshift
cluster can use to retrieve and store keys in an HSM.
:type elastic_ip: string
:param elastic_ip: The elastic IP (EIP) address for the cluster.
:type cluster_parameter_group_name: string
:param cluster_parameter_group_name:
The name of the parameter group to be associated with this cluster.
Default: The default Amazon Redshift cluster parameter group. For
information about the default parameter group, go to `Working with
Amazon Redshift Parameter Groups`_.
Constraints:
+ Must be 1 to 255 alphanumeric characters or hyphens.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type cluster_security_groups: list
:param cluster_security_groups: A list of security groups to be
associated with this cluster.
Default: The default cluster security group for Amazon Redshift.
Cluster security groups only apply to clusters outside of VPCs.
:type vpc_security_group_ids: list
:param vpc_security_group_ids: A list of Virtual Private Cloud (VPC)
security groups to be associated with the cluster.
Default: The default VPC security group is associated with the cluster.
VPC security groups only apply to clusters in VPCs.
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which automated cluster maintenance can occur.
Format: `ddd:hh24:mi-ddd:hh24:mi`
Default: The value selected for the cluster from which the snapshot was
taken. The following list shows the time blocks for each region
from which the default maintenance windows are assigned.
+ **US-East (Northern Virginia) Region:** 03:00-11:00 UTC
+ **US-West (Oregon) Region** 06:00-14:00 UTC
+ **EU (Ireland) Region** 22:00-06:00 UTC
+ **Asia Pacific (Singapore) Region** 14:00-22:00 UTC
+ **Asia Pacific (Sydney) Region** 12:00-20:00 UTC
+ **Asia Pacific (Tokyo) Region** 17:00-03:00 UTC
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Minimum 30-minute window.
:type automated_snapshot_retention_period: integer
:param automated_snapshot_retention_period: The number of days that
automated snapshots are retained. If the value is 0, automated
snapshots are disabled. Even if automated snapshots are disabled,
you can still create manual snapshots when you want with
CreateClusterSnapshot.
Default: The value selected for the cluster from which the snapshot was
taken.
Constraints: Must be a value from 0 to 35.
"""
params = {
'ClusterIdentifier': cluster_identifier,
'SnapshotIdentifier': snapshot_identifier,
}
if snapshot_cluster_identifier is not None:
params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier
if port is not None:
params['Port'] = port
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if allow_version_upgrade is not None:
params['AllowVersionUpgrade'] = str(
allow_version_upgrade).lower()
if cluster_subnet_group_name is not None:
params['ClusterSubnetGroupName'] = cluster_subnet_group_name
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if owner_account is not None:
params['OwnerAccount'] = owner_account
if hsm_client_certificate_identifier is not None:
params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier
if hsm_configuration_identifier is not None:
params['HsmConfigurationIdentifier'] = hsm_configuration_identifier
if elastic_ip is not None:
params['ElasticIp'] = elastic_ip
if cluster_parameter_group_name is not None:
params['ClusterParameterGroupName'] = cluster_parameter_group_name
if cluster_security_groups is not None:
self.build_list_params(params,
cluster_security_groups,
'ClusterSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if automated_snapshot_retention_period is not None:
params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period
return self._make_request(
action='RestoreFromClusterSnapshot',
verb='POST',
path='/', params=params)
def revoke_cluster_security_group_ingress(self,
cluster_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_owner_id=None):
"""
Revokes an ingress rule in an Amazon Redshift security group
for a previously authorized IP range or Amazon EC2 security
group. To add an ingress rule, see
AuthorizeClusterSecurityGroupIngress. For information about
managing security groups, go to `Amazon Redshift Cluster
Security Groups`_ in the Amazon Redshift Management Guide .
:type cluster_security_group_name: string
:param cluster_security_group_name: The name of the security Group from
which to revoke the ingress rule.
:type cidrip: string
:param cidrip: The IP range for which to revoke access. This range must
be a valid Classless Inter-Domain Routing (CIDR) block of IP
addresses. If `CIDRIP` is specified, `EC2SecurityGroupName` and
`EC2SecurityGroupOwnerId` cannot be provided.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 Security Group
whose access is to be revoked. If `EC2SecurityGroupName` is
specified, `EC2SecurityGroupOwnerId` must also be provided and
`CIDRIP` cannot be provided.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS account number of the owner
of the security group specified in the `EC2SecurityGroupName`
parameter. The AWS access key ID is not an acceptable value. If
`EC2SecurityGroupOwnerId` is specified, `EC2SecurityGroupName` must
also be provided. and `CIDRIP` cannot be provided.
Example: `111122223333`
"""
params = {
'ClusterSecurityGroupName': cluster_security_group_name,
}
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='RevokeClusterSecurityGroupIngress',
verb='POST',
path='/', params=params)
def revoke_snapshot_access(self, snapshot_identifier,
account_with_restore_access,
snapshot_cluster_identifier=None):
"""
Removes the ability of the specified AWS customer account to
restore the specified snapshot. If the account is currently
restoring the snapshot, the restore will run to completion.
For more information about working with snapshots, go to
`Amazon Redshift Snapshots`_ in the Amazon Redshift Management
Guide .
:type snapshot_identifier: string
:param snapshot_identifier: The identifier of the snapshot that the
account can no longer access.
:type snapshot_cluster_identifier: string
:param snapshot_cluster_identifier: The identifier of the cluster the
snapshot was created from. This parameter is required if your IAM
user has a policy containing a snapshot resource element that
specifies anything other than * for the cluster name.
:type account_with_restore_access: string
:param account_with_restore_access: The identifier of the AWS customer
account that can no longer restore the specified snapshot.
"""
params = {
'SnapshotIdentifier': snapshot_identifier,
'AccountWithRestoreAccess': account_with_restore_access,
}
if snapshot_cluster_identifier is not None:
params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier
return self._make_request(
action='RevokeSnapshotAccess',
verb='POST',
path='/', params=params)
def rotate_encryption_key(self, cluster_identifier):
"""
Rotates the encryption keys for a cluster.
:type cluster_identifier: string
:param cluster_identifier: The unique identifier of the cluster that
you want to rotate the encryption keys for.
Constraints: Must be the name of valid cluster that has encryption
enabled.
"""
params = {'ClusterIdentifier': cluster_identifier, }
return self._make_request(
action='RotateEncryptionKey',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/redshift/layer1.py
| 0.680454 | 0.297107 |
layer1.py
|
|
from boto.ec2.ec2object import TaggedEC2Object
from boto.ec2.zone import Zone
class Snapshot(TaggedEC2Object):
"""
Represents an EBS snapshot.
:ivar id: The unique ID of the snapshot.
:ivar volume_id: The ID of the volume this snapshot was created
from.
:ivar status: The status of the snapshot.
:ivar progress: The percent complete of the snapshot.
:ivar start_time: The timestamp of when the snapshot was created.
:ivar owner_id: The id of the account that owns the snapshot.
:ivar owner_alias: The alias of the account that owns the snapshot.
:ivar volume_size: The size (in GB) of the volume the snapshot was created from.
:ivar description: The description of the snapshot.
:ivar encrypted: True if this snapshot is encrypted
"""
AttrName = 'createVolumePermission'
def __init__(self, connection=None):
super(Snapshot, self).__init__(connection)
self.id = None
self.volume_id = None
self.status = None
self.progress = None
self.start_time = None
self.owner_id = None
self.owner_alias = None
self.volume_size = None
self.description = None
self.encrypted = None
def __repr__(self):
return 'Snapshot:%s' % self.id
def endElement(self, name, value, connection):
if name == 'snapshotId':
self.id = value
elif name == 'volumeId':
self.volume_id = value
elif name == 'status':
self.status = value
elif name == 'startTime':
self.start_time = value
elif name == 'ownerId':
self.owner_id = value
elif name == 'ownerAlias':
self.owner_alias = value
elif name == 'volumeSize':
try:
self.volume_size = int(value)
except:
self.volume_size = value
elif name == 'description':
self.description = value
elif name == 'encrypted':
self.encrypted = (value.lower() == 'true')
else:
setattr(self, name, value)
def _update(self, updated):
self.progress = updated.progress
self.status = updated.status
def update(self, validate=False, dry_run=False):
"""
Update the data associated with this snapshot by querying EC2.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
snapshot the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
"""
rs = self.connection.get_all_snapshots([self.id], dry_run=dry_run)
if len(rs) > 0:
self._update(rs[0])
elif validate:
raise ValueError('%s is not a valid Snapshot ID' % self.id)
return self.progress
def delete(self, dry_run=False):
return self.connection.delete_snapshot(self.id, dry_run=dry_run)
def get_permissions(self, dry_run=False):
attrs = self.connection.get_snapshot_attribute(
self.id,
self.AttrName,
dry_run=dry_run
)
return attrs.attrs
def share(self, user_ids=None, groups=None, dry_run=False):
return self.connection.modify_snapshot_attribute(self.id,
self.AttrName,
'add',
user_ids,
groups,
dry_run=dry_run)
def unshare(self, user_ids=None, groups=None, dry_run=False):
return self.connection.modify_snapshot_attribute(self.id,
self.AttrName,
'remove',
user_ids,
groups,
dry_run=dry_run)
def reset_permissions(self, dry_run=False):
return self.connection.reset_snapshot_attribute(
self.id,
self.AttrName,
dry_run=dry_run
)
def create_volume(self, zone, size=None, volume_type=None, iops=None,
dry_run=False):
"""
Create a new EBS Volume from this Snapshot
:type zone: string or :class:`boto.ec2.zone.Zone`
:param zone: The availability zone in which the Volume will be created.
:type size: int
:param size: The size of the new volume, in GiB. (optional). Defaults to
the size of the snapshot.
:type volume_type: string
:param volume_type: The type of the volume. (optional). Valid
values are: standard | io1 | gp2.
:type iops: int
:param iops: The provisioned IOPs you want to associate with
this volume. (optional)
"""
if isinstance(zone, Zone):
zone = zone.name
return self.connection.create_volume(
size,
zone,
self.id,
volume_type,
iops,
self.encrypted,
dry_run=dry_run
)
class SnapshotAttribute(object):
def __init__(self, parent=None):
self.snapshot_id = None
self.attrs = {}
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'createVolumePermission':
self.name = 'create_volume_permission'
elif name == 'group':
if 'groups' in self.attrs:
self.attrs['groups'].append(value)
else:
self.attrs['groups'] = [value]
elif name == 'userId':
if 'user_ids' in self.attrs:
self.attrs['user_ids'].append(value)
else:
self.attrs['user_ids'] = [value]
elif name == 'snapshotId':
self.snapshot_id = value
else:
setattr(self, name, value)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/snapshot.py
| 0.707203 | 0.302526 |
snapshot.py
|
|
from boto.ec2.ec2object import EC2Object
class Address(EC2Object):
"""
Represents an EC2 Elastic IP Address
:ivar public_ip: The Elastic IP address.
:ivar instance_id: The instance the address is associated with (if any).
:ivar domain: Indicates whether the address is a EC2 address or a VPC address (standard|vpc).
:ivar allocation_id: The allocation ID for the address (VPC addresses only).
:ivar association_id: The association ID for the address (VPC addresses only).
:ivar network_interface_id: The network interface (if any) that the address is associated with (VPC addresses only).
:ivar network_interface_owner_id: The owner IID (VPC addresses only).
:ivar private_ip_address: The private IP address associated with the Elastic IP address (VPC addresses only).
"""
def __init__(self, connection=None, public_ip=None, instance_id=None):
super(Address, self).__init__(connection)
self.connection = connection
self.public_ip = public_ip
self.instance_id = instance_id
self.domain = None
self.allocation_id = None
self.association_id = None
self.network_interface_id = None
self.network_interface_owner_id = None
self.private_ip_address = None
def __repr__(self):
return 'Address:%s' % self.public_ip
def endElement(self, name, value, connection):
if name == 'publicIp':
self.public_ip = value
elif name == 'instanceId':
self.instance_id = value
elif name == 'domain':
self.domain = value
elif name == 'allocationId':
self.allocation_id = value
elif name == 'associationId':
self.association_id = value
elif name == 'networkInterfaceId':
self.network_interface_id = value
elif name == 'networkInterfaceOwnerId':
self.network_interface_owner_id = value
elif name == 'privateIpAddress':
self.private_ip_address = value
else:
setattr(self, name, value)
def release(self, dry_run=False):
"""
Free up this Elastic IP address.
:see: :meth:`boto.ec2.connection.EC2Connection.release_address`
"""
if self.allocation_id:
return self.connection.release_address(
allocation_id=self.allocation_id,
dry_run=dry_run)
else:
return self.connection.release_address(
public_ip=self.public_ip,
dry_run=dry_run
)
delete = release
def associate(self, instance_id=None, network_interface_id=None, private_ip_address=None, allow_reassociation=False, dry_run=False):
"""
Associate this Elastic IP address with a currently running instance.
:see: :meth:`boto.ec2.connection.EC2Connection.associate_address`
"""
if self.allocation_id:
return self.connection.associate_address(
instance_id=instance_id,
public_ip=self.public_ip,
allocation_id=self.allocation_id,
network_interface_id=network_interface_id,
private_ip_address=private_ip_address,
allow_reassociation=allow_reassociation,
dry_run=dry_run
)
return self.connection.associate_address(
instance_id=instance_id,
public_ip=self.public_ip,
network_interface_id=network_interface_id,
private_ip_address=private_ip_address,
allow_reassociation=allow_reassociation,
dry_run=dry_run
)
def disassociate(self, dry_run=False):
"""
Disassociate this Elastic IP address from a currently running instance.
:see: :meth:`boto.ec2.connection.EC2Connection.disassociate_address`
"""
if self.association_id:
return self.connection.disassociate_address(
association_id=self.association_id,
dry_run=dry_run
)
else:
return self.connection.disassociate_address(
public_ip=self.public_ip,
dry_run=dry_run
)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/address.py
| 0.784608 | 0.333883 |
address.py
|
|
from boto.ec2.ec2object import TaggedEC2Object
from boto.ec2.launchspecification import LaunchSpecification
class SpotInstanceStateFault(object):
"""
The fault codes for the Spot Instance request, if any.
:ivar code: The reason code for the Spot Instance state change.
:ivar message: The message for the Spot Instance state change.
"""
def __init__(self, code=None, message=None):
self.code = code
self.message = message
def __repr__(self):
return '(%s, %s)' % (self.code, self.message)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'code':
self.code = value
elif name == 'message':
self.message = value
setattr(self, name, value)
class SpotInstanceStatus(object):
"""
Contains the status of a Spot Instance Request.
:ivar code: Status code of the request.
:ivar message: The description for the status code for the Spot request.
:ivar update_time: Time the status was stated.
"""
def __init__(self, code=None, update_time=None, message=None):
self.code = code
self.update_time = update_time
self.message = message
def __repr__(self):
return '<Status: %s>' % self.code
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'code':
self.code = value
elif name == 'message':
self.message = value
elif name == 'updateTime':
self.update_time = value
class SpotInstanceRequest(TaggedEC2Object):
"""
:ivar id: The ID of the Spot Instance Request.
:ivar price: The maximum hourly price for any Spot Instance launched to
fulfill the request.
:ivar type: The Spot Instance request type.
:ivar state: The state of the Spot Instance request.
:ivar fault: The fault codes for the Spot Instance request, if any.
:ivar valid_from: The start date of the request. If this is a one-time
request, the request becomes active at this date and time and remains
active until all instances launch, the request expires, or the request is
canceled. If the request is persistent, the request becomes active at this
date and time and remains active until it expires or is canceled.
:ivar valid_until: The end date of the request. If this is a one-time
request, the request remains active until all instances launch, the request
is canceled, or this date is reached. If the request is persistent, it
remains active until it is canceled or this date is reached.
:ivar launch_group: The instance launch group. Launch groups are Spot
Instances that launch together and terminate together.
:ivar launched_availability_zone: foo
:ivar product_description: The Availability Zone in which the bid is
launched.
:ivar availability_zone_group: The Availability Zone group. If you specify
the same Availability Zone group for all Spot Instance requests, all Spot
Instances are launched in the same Availability Zone.
:ivar create_time: The time stamp when the Spot Instance request was
created.
:ivar launch_specification: Additional information for launching instances.
:ivar instance_id: The instance ID, if an instance has been launched to
fulfill the Spot Instance request.
:ivar status: The status code and status message describing the Spot
Instance request.
"""
def __init__(self, connection=None):
super(SpotInstanceRequest, self).__init__(connection)
self.id = None
self.price = None
self.type = None
self.state = None
self.fault = None
self.valid_from = None
self.valid_until = None
self.launch_group = None
self.launched_availability_zone = None
self.product_description = None
self.availability_zone_group = None
self.create_time = None
self.launch_specification = None
self.instance_id = None
self.status = None
def __repr__(self):
return 'SpotInstanceRequest:%s' % self.id
def startElement(self, name, attrs, connection):
retval = super(SpotInstanceRequest, self).startElement(name, attrs,
connection)
if retval is not None:
return retval
if name == 'launchSpecification':
self.launch_specification = LaunchSpecification(connection)
return self.launch_specification
elif name == 'fault':
self.fault = SpotInstanceStateFault()
return self.fault
elif name == 'status':
self.status = SpotInstanceStatus()
return self.status
else:
return None
def endElement(self, name, value, connection):
if name == 'spotInstanceRequestId':
self.id = value
elif name == 'spotPrice':
self.price = float(value)
elif name == 'type':
self.type = value
elif name == 'state':
self.state = value
elif name == 'validFrom':
self.valid_from = value
elif name == 'validUntil':
self.valid_until = value
elif name == 'launchGroup':
self.launch_group = value
elif name == 'availabilityZoneGroup':
self.availability_zone_group = value
elif name == 'launchedAvailabilityZone':
self.launched_availability_zone = value
elif name == 'instanceId':
self.instance_id = value
elif name == 'createTime':
self.create_time = value
elif name == 'productDescription':
self.product_description = value
else:
setattr(self, name, value)
def cancel(self, dry_run=False):
self.connection.cancel_spot_instance_requests(
[self.id],
dry_run=dry_run
)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/spotinstancerequest.py
| 0.682468 | 0.295249 |
spotinstancerequest.py
|
|
import os
from boto.ec2.ec2object import EC2Object
from boto.exception import BotoClientError
class KeyPair(EC2Object):
def __init__(self, connection=None):
super(KeyPair, self).__init__(connection)
self.name = None
self.fingerprint = None
self.material = None
def __repr__(self):
return 'KeyPair:%s' % self.name
def endElement(self, name, value, connection):
if name == 'keyName':
self.name = value
elif name == 'keyFingerprint':
self.fingerprint = value
elif name == 'keyMaterial':
self.material = value
else:
setattr(self, name, value)
def delete(self, dry_run=False):
"""
Delete the KeyPair.
:rtype: bool
:return: True if successful, otherwise False.
"""
return self.connection.delete_key_pair(self.name, dry_run=dry_run)
def save(self, directory_path):
"""
Save the material (the unencrypted PEM encoded RSA private key)
of a newly created KeyPair to a local file.
:type directory_path: string
:param directory_path: The fully qualified path to the directory
in which the keypair will be saved. The
keypair file will be named using the name
of the keypair as the base name and .pem
for the file extension. If a file of that
name already exists in the directory, an
exception will be raised and the old file
will not be overwritten.
:rtype: bool
:return: True if successful.
"""
if self.material:
directory_path = os.path.expanduser(directory_path)
file_path = os.path.join(directory_path, '%s.pem' % self.name)
if os.path.exists(file_path):
raise BotoClientError('%s already exists, it will not be overwritten' % file_path)
fp = open(file_path, 'wb')
fp.write(self.material)
fp.close()
os.chmod(file_path, 0o600)
return True
else:
raise BotoClientError('KeyPair contains no material')
def copy_to_region(self, region, dry_run=False):
"""
Create a new key pair of the same new in another region.
Note that the new key pair will use a different ssh
cert than the this key pair. After doing the copy,
you will need to save the material associated with the
new key pair (use the save method) to a local file.
:type region: :class:`boto.ec2.regioninfo.RegionInfo`
:param region: The region to which this security group will be copied.
:rtype: :class:`boto.ec2.keypair.KeyPair`
:return: The new key pair
"""
if region.name == self.region:
raise BotoClientError('Unable to copy to the same Region')
conn_params = self.connection.get_params()
rconn = region.connect(**conn_params)
kp = rconn.create_key_pair(self.name, dry_run=dry_run)
return kp
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/keypair.py
| 0.600305 | 0.170232 |
keypair.py
|
|
from boto.resultset import ResultSet
from boto.ec2.ec2object import EC2Object
from boto.utils import parse_ts
class ReservedInstancesOffering(EC2Object):
def __init__(self, connection=None, id=None, instance_type=None,
availability_zone=None, duration=None, fixed_price=None,
usage_price=None, description=None, instance_tenancy=None,
currency_code=None, offering_type=None,
recurring_charges=None, pricing_details=None):
super(ReservedInstancesOffering, self).__init__(connection)
self.id = id
self.instance_type = instance_type
self.availability_zone = availability_zone
self.duration = duration
self.fixed_price = fixed_price
self.usage_price = usage_price
self.description = description
self.instance_tenancy = instance_tenancy
self.currency_code = currency_code
self.offering_type = offering_type
self.recurring_charges = recurring_charges
self.pricing_details = pricing_details
def __repr__(self):
return 'ReservedInstanceOffering:%s' % self.id
def startElement(self, name, attrs, connection):
if name == 'recurringCharges':
self.recurring_charges = ResultSet([('item', RecurringCharge)])
return self.recurring_charges
elif name == 'pricingDetailsSet':
self.pricing_details = ResultSet([('item', PricingDetail)])
return self.pricing_details
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesOfferingId':
self.id = value
elif name == 'instanceType':
self.instance_type = value
elif name == 'availabilityZone':
self.availability_zone = value
elif name == 'duration':
self.duration = int(value)
elif name == 'fixedPrice':
self.fixed_price = value
elif name == 'usagePrice':
self.usage_price = value
elif name == 'productDescription':
self.description = value
elif name == 'instanceTenancy':
self.instance_tenancy = value
elif name == 'currencyCode':
self.currency_code = value
elif name == 'offeringType':
self.offering_type = value
elif name == 'marketplace':
self.marketplace = True if value == 'true' else False
def describe(self):
print('ID=%s' % self.id)
print('\tInstance Type=%s' % self.instance_type)
print('\tZone=%s' % self.availability_zone)
print('\tDuration=%s' % self.duration)
print('\tFixed Price=%s' % self.fixed_price)
print('\tUsage Price=%s' % self.usage_price)
print('\tDescription=%s' % self.description)
def purchase(self, instance_count=1, dry_run=False):
return self.connection.purchase_reserved_instance_offering(
self.id,
instance_count,
dry_run=dry_run
)
class RecurringCharge(object):
def __init__(self, connection=None, frequency=None, amount=None):
self.frequency = frequency
self.amount = amount
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class PricingDetail(object):
def __init__(self, connection=None, price=None, count=None):
self.price = price
self.count = count
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class ReservedInstance(ReservedInstancesOffering):
def __init__(self, connection=None, id=None, instance_type=None,
availability_zone=None, duration=None, fixed_price=None,
usage_price=None, description=None,
instance_count=None, state=None):
super(ReservedInstance, self).__init__(connection, id, instance_type,
availability_zone, duration,
fixed_price, usage_price,
description)
self.instance_count = instance_count
self.state = state
self.start = None
self.end = None
def __repr__(self):
return 'ReservedInstance:%s' % self.id
def endElement(self, name, value, connection):
if name == 'reservedInstancesId':
self.id = value
if name == 'instanceCount':
self.instance_count = int(value)
elif name == 'state':
self.state = value
elif name == 'start':
self.start = value
elif name == 'end':
self.end = value
else:
super(ReservedInstance, self).endElement(name, value, connection)
class ReservedInstanceListing(EC2Object):
def __init__(self, connection=None, listing_id=None, id=None,
create_date=None, update_date=None,
status=None, status_message=None, client_token=None):
self.connection = connection
self.listing_id = listing_id
self.id = id
self.create_date = create_date
self.update_date = update_date
self.status = status
self.status_message = status_message
self.client_token = client_token
def startElement(self, name, attrs, connection):
if name == 'instanceCounts':
self.instance_counts = ResultSet([('item', InstanceCount)])
return self.instance_counts
elif name == 'priceSchedules':
self.price_schedules = ResultSet([('item', PriceSchedule)])
return self.price_schedules
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesListingId':
self.listing_id = value
elif name == 'reservedInstancesId':
self.id = value
elif name == 'createDate':
self.create_date = value
elif name == 'updateDate':
self.update_date = value
elif name == 'status':
self.status = value
elif name == 'statusMessage':
self.status_message = value
else:
setattr(self, name, value)
class InstanceCount(object):
def __init__(self, connection=None, state=None, instance_count=None):
self.state = state
self.instance_count = instance_count
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'state':
self.state = value
elif name == 'instanceCount':
self.instance_count = int(value)
else:
setattr(self, name, value)
class PriceSchedule(object):
def __init__(self, connection=None, term=None, price=None,
currency_code=None, active=None):
self.connection = connection
self.term = term
self.price = price
self.currency_code = currency_code
self.active = active
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'term':
self.term = int(value)
elif name == 'price':
self.price = value
elif name == 'currencyCode':
self.currency_code = value
elif name == 'active':
self.active = True if value == 'true' else False
else:
setattr(self, name, value)
class ReservedInstancesConfiguration(object):
def __init__(self, connection=None, availability_zone=None, platform=None,
instance_count=None, instance_type=None):
self.connection = connection
self.availability_zone = availability_zone
self.platform = platform
self.instance_count = instance_count
self.instance_type = instance_type
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'availabilityZone':
self.availability_zone = value
elif name == 'platform':
self.platform = value
elif name == 'instanceCount':
self.instance_count = int(value)
elif name == 'instanceType':
self.instance_type = value
else:
setattr(self, name, value)
class ModifyReservedInstancesResult(object):
def __init__(self, connection=None, modification_id=None):
self.connection = connection
self.modification_id = modification_id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesModificationId':
self.modification_id = value
else:
setattr(self, name, value)
class ModificationResult(object):
def __init__(self, connection=None, modification_id=None,
availability_zone=None, platform=None, instance_count=None,
instance_type=None):
self.connection = connection
self.modification_id = modification_id
self.availability_zone = availability_zone
self.platform = platform
self.instance_count = instance_count
self.instance_type = instance_type
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesModificationId':
self.modification_id = value
elif name == 'availabilityZone':
self.availability_zone = value
elif name == 'platform':
self.platform = value
elif name == 'instanceCount':
self.instance_count = int(value)
elif name == 'instanceType':
self.instance_type = value
else:
setattr(self, name, value)
class ReservedInstancesModification(object):
def __init__(self, connection=None, modification_id=None,
reserved_instances=None, modification_results=None,
create_date=None, update_date=None, effective_date=None,
status=None, status_message=None, client_token=None):
self.connection = connection
self.modification_id = modification_id
self.reserved_instances = reserved_instances
self.modification_results = modification_results
self.create_date = create_date
self.update_date = update_date
self.effective_date = effective_date
self.status = status
self.status_message = status_message
self.client_token = client_token
def startElement(self, name, attrs, connection):
if name == 'reservedInstancesSet':
self.reserved_instances = ResultSet([
('item', ReservedInstance)
])
return self.reserved_instances
elif name == 'modificationResultSet':
self.modification_results = ResultSet([
('item', ModificationResult)
])
return self.modification_results
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesModificationId':
self.modification_id = value
elif name == 'createDate':
self.create_date = parse_ts(value)
elif name == 'updateDate':
self.update_date = parse_ts(value)
elif name == 'effectiveDate':
self.effective_date = parse_ts(value)
elif name == 'status':
self.status = value
elif name == 'statusMessage':
self.status_message = value
elif name == 'clientToken':
self.client_token = value
else:
setattr(self, name, value)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/reservedinstance.py
| 0.597256 | 0.163145 |
reservedinstance.py
|
|
from boto.ec2.instancestatus import Status, Details
class Event(object):
"""
A status event for an instance.
:ivar type: The type of the event.
:ivar id: The ID of the event.
:ivar description: A string describing the reason for the event.
:ivar not_before: A datestring describing the earliest time for
the event.
:ivar not_after: A datestring describing the latest time for
the event.
"""
def __init__(self, type=None, id=None, description=None,
not_before=None, not_after=None):
self.type = type
self.id = id
self.description = description
self.not_before = not_before
self.not_after = not_after
def __repr__(self):
return 'Event:%s' % self.type
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'eventType':
self.type = value
elif name == 'eventId':
self.id = value
elif name == 'description':
self.description = value
elif name == 'notBefore':
self.not_before = value
elif name == 'notAfter':
self.not_after = value
else:
setattr(self, name, value)
class EventSet(list):
def startElement(self, name, attrs, connection):
if name == 'item':
event = Event()
self.append(event)
return event
else:
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class Action(object):
"""
An action for an instance.
:ivar code: The code for the type of the action.
:ivar id: The ID of the event.
:ivar type: The type of the event.
:ivar description: A description of the action.
"""
def __init__(self, code=None, id=None, description=None, type=None):
self.code = code
self.id = id
self.type = type
self.description = description
def __repr__(self):
return 'Action:%s' % self.code
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'eventType':
self.type = value
elif name == 'eventId':
self.id = value
elif name == 'description':
self.description = value
elif name == 'code':
self.code = value
else:
setattr(self, name, value)
class ActionSet(list):
def startElement(self, name, attrs, connection):
if name == 'item':
action = Action()
self.append(action)
return action
else:
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class VolumeStatus(object):
"""
Represents an EC2 Volume status as reported by
DescribeVolumeStatus request.
:ivar id: The volume identifier.
:ivar zone: The availability zone of the volume
:ivar volume_status: A Status object that reports impaired
functionality that arises from problems internal to the instance.
:ivar events: A list of events relevant to the instance.
:ivar actions: A list of events relevant to the instance.
"""
def __init__(self, id=None, zone=None):
self.id = id
self.zone = zone
self.volume_status = Status()
self.events = None
self.actions = None
def __repr__(self):
return 'VolumeStatus:%s' % self.id
def startElement(self, name, attrs, connection):
if name == 'eventsSet':
self.events = EventSet()
return self.events
elif name == 'actionsSet':
self.actions = ActionSet()
return self.actions
elif name == 'volumeStatus':
return self.volume_status
else:
return None
def endElement(self, name, value, connection):
if name == 'volumeId':
self.id = value
elif name == 'availabilityZone':
self.zone = value
else:
setattr(self, name, value)
class VolumeStatusSet(list):
"""
A list object that contains the results of a call to
DescribeVolumeStatus request. Each element of the
list will be an VolumeStatus object.
:ivar next_token: If the response was truncated by
the EC2 service, the next_token attribute of the
object will contain the string that needs to be
passed in to the next request to retrieve the next
set of results.
"""
def __init__(self, connection=None):
list.__init__(self)
self.connection = connection
self.next_token = None
def startElement(self, name, attrs, connection):
if name == 'item':
status = VolumeStatus()
self.append(status)
return status
else:
return None
def endElement(self, name, value, connection):
if name == 'NextToken':
self.next_token = value
setattr(self, name, value)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/volumestatus.py
| 0.68742 | 0.257473 |
volumestatus.py
|
|
import boto
from boto.ec2.ec2object import EC2Object, TaggedEC2Object
from boto.resultset import ResultSet
from boto.ec2.address import Address
from boto.ec2.blockdevicemapping import BlockDeviceMapping
from boto.ec2.image import ProductCodes
from boto.ec2.networkinterface import NetworkInterface
from boto.ec2.group import Group
import base64
class InstanceState(object):
"""
The state of the instance.
:ivar code: The low byte represents the state. The high byte is an
opaque internal value and should be ignored. Valid values:
* 0 (pending)
* 16 (running)
* 32 (shutting-down)
* 48 (terminated)
* 64 (stopping)
* 80 (stopped)
:ivar name: The name of the state of the instance. Valid values:
* "pending"
* "running"
* "shutting-down"
* "terminated"
* "stopping"
* "stopped"
"""
def __init__(self, code=0, name=None):
self.code = code
self.name = name
def __repr__(self):
return '%s(%d)' % (self.name, self.code)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'code':
self.code = int(value)
elif name == 'name':
self.name = value
else:
setattr(self, name, value)
class InstancePlacement(object):
"""
The location where the instance launched.
:ivar zone: The Availability Zone of the instance.
:ivar group_name: The name of the placement group the instance is
in (for cluster compute instances).
:ivar tenancy: The tenancy of the instance (if the instance is
running within a VPC). An instance with a tenancy of dedicated
runs on single-tenant hardware.
"""
def __init__(self, zone=None, group_name=None, tenancy=None):
self.zone = zone
self.group_name = group_name
self.tenancy = tenancy
def __repr__(self):
return self.zone
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'availabilityZone':
self.zone = value
elif name == 'groupName':
self.group_name = value
elif name == 'tenancy':
self.tenancy = value
else:
setattr(self, name, value)
class Reservation(EC2Object):
"""
Represents a Reservation response object.
:ivar id: The unique ID of the Reservation.
:ivar owner_id: The unique ID of the owner of the Reservation.
:ivar groups: A list of Group objects representing the security
groups associated with launched instances.
:ivar instances: A list of Instance objects launched in this
Reservation.
"""
def __init__(self, connection=None):
super(Reservation, self).__init__(connection)
self.id = None
self.owner_id = None
self.groups = []
self.instances = []
def __repr__(self):
return 'Reservation:%s' % self.id
def startElement(self, name, attrs, connection):
if name == 'instancesSet':
self.instances = ResultSet([('item', Instance)])
return self.instances
elif name == 'groupSet':
self.groups = ResultSet([('item', Group)])
return self.groups
else:
return None
def endElement(self, name, value, connection):
if name == 'reservationId':
self.id = value
elif name == 'ownerId':
self.owner_id = value
else:
setattr(self, name, value)
def stop_all(self, dry_run=False):
for instance in self.instances:
instance.stop(dry_run=dry_run)
class Instance(TaggedEC2Object):
"""
Represents an instance.
:ivar id: The unique ID of the Instance.
:ivar groups: A list of Group objects representing the security
groups associated with the instance.
:ivar public_dns_name: The public dns name of the instance.
:ivar private_dns_name: The private dns name of the instance.
:ivar state: The string representation of the instance's current state.
:ivar state_code: An integer representation of the instance's
current state.
:ivar previous_state: The string representation of the instance's
previous state.
:ivar previous_state_code: An integer representation of the
instance's current state.
:ivar key_name: The name of the SSH key associated with the instance.
:ivar instance_type: The type of instance (e.g. m1.small).
:ivar launch_time: The time the instance was launched.
:ivar image_id: The ID of the AMI used to launch this instance.
:ivar placement: The availability zone in which the instance is running.
:ivar placement_group: The name of the placement group the instance
is in (for cluster compute instances).
:ivar placement_tenancy: The tenancy of the instance, if the instance
is running within a VPC. An instance with a tenancy of dedicated
runs on a single-tenant hardware.
:ivar kernel: The kernel associated with the instance.
:ivar ramdisk: The ramdisk associated with the instance.
:ivar architecture: The architecture of the image (i386|x86_64).
:ivar hypervisor: The hypervisor used.
:ivar virtualization_type: The type of virtualization used.
:ivar product_codes: A list of product codes associated with this instance.
:ivar ami_launch_index: This instances position within it's launch group.
:ivar monitored: A boolean indicating whether monitoring is enabled or not.
:ivar monitoring_state: A string value that contains the actual value
of the monitoring element returned by EC2.
:ivar spot_instance_request_id: The ID of the spot instance request
if this is a spot instance.
:ivar subnet_id: The VPC Subnet ID, if running in VPC.
:ivar vpc_id: The VPC ID, if running in VPC.
:ivar private_ip_address: The private IP address of the instance.
:ivar ip_address: The public IP address of the instance.
:ivar platform: Platform of the instance (e.g. Windows)
:ivar root_device_name: The name of the root device.
:ivar root_device_type: The root device type (ebs|instance-store).
:ivar block_device_mapping: The Block Device Mapping for the instance.
:ivar state_reason: The reason for the most recent state transition.
:ivar interfaces: List of Elastic Network Interfaces associated with
this instance.
:ivar ebs_optimized: Whether instance is using optimized EBS volumes
or not.
:ivar instance_profile: A Python dict containing the instance
profile id and arn associated with this instance.
"""
def __init__(self, connection=None):
super(Instance, self).__init__(connection)
self.id = None
self.dns_name = None
self.public_dns_name = None
self.private_dns_name = None
self.key_name = None
self.instance_type = None
self.launch_time = None
self.image_id = None
self.kernel = None
self.ramdisk = None
self.product_codes = ProductCodes()
self.ami_launch_index = None
self.monitored = False
self.monitoring_state = None
self.spot_instance_request_id = None
self.subnet_id = None
self.vpc_id = None
self.private_ip_address = None
self.ip_address = None
self.requester_id = None
self._in_monitoring_element = False
self.persistent = False
self.root_device_name = None
self.root_device_type = None
self.block_device_mapping = None
self.state_reason = None
self.group_name = None
self.client_token = None
self.eventsSet = None
self.groups = []
self.platform = None
self.interfaces = []
self.hypervisor = None
self.virtualization_type = None
self.architecture = None
self.instance_profile = None
self._previous_state = None
self._state = InstanceState()
self._placement = InstancePlacement()
def __repr__(self):
return 'Instance:%s' % self.id
@property
def state(self):
return self._state.name
@property
def state_code(self):
return self._state.code
@property
def previous_state(self):
if self._previous_state:
return self._previous_state.name
return None
@property
def previous_state_code(self):
if self._previous_state:
return self._previous_state.code
return 0
@property
def placement(self):
return self._placement.zone
@property
def placement_group(self):
return self._placement.group_name
@property
def placement_tenancy(self):
return self._placement.tenancy
def startElement(self, name, attrs, connection):
retval = super(Instance, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'monitoring':
self._in_monitoring_element = True
elif name == 'blockDeviceMapping':
self.block_device_mapping = BlockDeviceMapping()
return self.block_device_mapping
elif name == 'productCodes':
return self.product_codes
elif name == 'stateReason':
self.state_reason = SubParse('stateReason')
return self.state_reason
elif name == 'groupSet':
self.groups = ResultSet([('item', Group)])
return self.groups
elif name == "eventsSet":
self.eventsSet = SubParse('eventsSet')
return self.eventsSet
elif name == 'networkInterfaceSet':
self.interfaces = ResultSet([('item', NetworkInterface)])
return self.interfaces
elif name == 'iamInstanceProfile':
self.instance_profile = SubParse('iamInstanceProfile')
return self.instance_profile
elif name == 'currentState':
return self._state
elif name == 'previousState':
self._previous_state = InstanceState()
return self._previous_state
elif name == 'instanceState':
return self._state
elif name == 'placement':
return self._placement
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.id = value
elif name == 'imageId':
self.image_id = value
elif name == 'dnsName' or name == 'publicDnsName':
self.dns_name = value # backwards compatibility
self.public_dns_name = value
elif name == 'privateDnsName':
self.private_dns_name = value
elif name == 'keyName':
self.key_name = value
elif name == 'amiLaunchIndex':
self.ami_launch_index = value
elif name == 'previousState':
self.previous_state = value
elif name == 'instanceType':
self.instance_type = value
elif name == 'rootDeviceName':
self.root_device_name = value
elif name == 'rootDeviceType':
self.root_device_type = value
elif name == 'launchTime':
self.launch_time = value
elif name == 'platform':
self.platform = value
elif name == 'kernelId':
self.kernel = value
elif name == 'ramdiskId':
self.ramdisk = value
elif name == 'state':
if self._in_monitoring_element:
self.monitoring_state = value
if value == 'enabled':
self.monitored = True
self._in_monitoring_element = False
elif name == 'spotInstanceRequestId':
self.spot_instance_request_id = value
elif name == 'subnetId':
self.subnet_id = value
elif name == 'vpcId':
self.vpc_id = value
elif name == 'privateIpAddress':
self.private_ip_address = value
elif name == 'ipAddress':
self.ip_address = value
elif name == 'requesterId':
self.requester_id = value
elif name == 'persistent':
if value == 'true':
self.persistent = True
else:
self.persistent = False
elif name == 'groupName':
if self._in_monitoring_element:
self.group_name = value
elif name == 'clientToken':
self.client_token = value
elif name == "eventsSet":
self.events = value
elif name == 'hypervisor':
self.hypervisor = value
elif name == 'virtualizationType':
self.virtualization_type = value
elif name == 'architecture':
self.architecture = value
elif name == 'ebsOptimized':
self.ebs_optimized = (value == 'true')
else:
setattr(self, name, value)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def update(self, validate=False, dry_run=False):
"""
Update the instance's state information by making a call to fetch
the current instance attributes from the service.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
instance the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
"""
rs = self.connection.get_all_reservations([self.id], dry_run=dry_run)
if len(rs) > 0:
r = rs[0]
for i in r.instances:
if i.id == self.id:
self._update(i)
elif validate:
raise ValueError('%s is not a valid Instance ID' % self.id)
return self.state
def terminate(self, dry_run=False):
"""
Terminate the instance
"""
rs = self.connection.terminate_instances([self.id], dry_run=dry_run)
if len(rs) > 0:
self._update(rs[0])
def stop(self, force=False, dry_run=False):
"""
Stop the instance
:type force: bool
:param force: Forces the instance to stop
:rtype: list
:return: A list of the instances stopped
"""
rs = self.connection.stop_instances([self.id], force, dry_run=dry_run)
if len(rs) > 0:
self._update(rs[0])
def start(self, dry_run=False):
"""
Start the instance.
"""
rs = self.connection.start_instances([self.id], dry_run=dry_run)
if len(rs) > 0:
self._update(rs[0])
def reboot(self, dry_run=False):
return self.connection.reboot_instances([self.id], dry_run=dry_run)
def get_console_output(self, dry_run=False):
"""
Retrieves the console output for the instance.
:rtype: :class:`boto.ec2.instance.ConsoleOutput`
:return: The console output as a ConsoleOutput object
"""
return self.connection.get_console_output(self.id, dry_run=dry_run)
def confirm_product(self, product_code, dry_run=False):
return self.connection.confirm_product_instance(
self.id,
product_code,
dry_run=dry_run
)
def use_ip(self, ip_address, dry_run=False):
"""
Associates an Elastic IP to the instance.
:type ip_address: Either an instance of
:class:`boto.ec2.address.Address` or a string.
:param ip_address: The IP address to associate
with the instance.
:rtype: bool
:return: True if successful
"""
if isinstance(ip_address, Address):
ip_address = ip_address.public_ip
return self.connection.associate_address(
self.id,
ip_address,
dry_run=dry_run
)
def monitor(self, dry_run=False):
return self.connection.monitor_instance(self.id, dry_run=dry_run)
def unmonitor(self, dry_run=False):
return self.connection.unmonitor_instance(self.id, dry_run=dry_run)
def get_attribute(self, attribute, dry_run=False):
"""
Gets an attribute from this instance.
:type attribute: string
:param attribute: The attribute you need information about
Valid choices are:
* instanceType
* kernel
* ramdisk
* userData
* disableApiTermination
* instanceInitiatedShutdownBehavior
* rootDeviceName
* blockDeviceMapping
* productCodes
* sourceDestCheck
* groupSet
* ebsOptimized
:rtype: :class:`boto.ec2.image.InstanceAttribute`
:return: An InstanceAttribute object representing the value of the
attribute requested
"""
return self.connection.get_instance_attribute(
self.id,
attribute,
dry_run=dry_run
)
def modify_attribute(self, attribute, value, dry_run=False):
"""
Changes an attribute of this instance
:type attribute: string
:param attribute: The attribute you wish to change.
* instanceType - A valid instance type (m1.small)
* kernel - Kernel ID (None)
* ramdisk - Ramdisk ID (None)
* userData - Base64 encoded String (None)
* disableApiTermination - Boolean (true)
* instanceInitiatedShutdownBehavior - stop|terminate
* sourceDestCheck - Boolean (true)
* groupSet - Set of Security Groups or IDs
* ebsOptimized - Boolean (false)
:type value: string
:param value: The new value for the attribute
:rtype: bool
:return: Whether the operation succeeded or not
"""
return self.connection.modify_instance_attribute(
self.id,
attribute,
value,
dry_run=dry_run
)
def reset_attribute(self, attribute, dry_run=False):
"""
Resets an attribute of this instance to its default value.
:type attribute: string
:param attribute: The attribute to reset. Valid values are:
kernel|ramdisk
:rtype: bool
:return: Whether the operation succeeded or not
"""
return self.connection.reset_instance_attribute(
self.id,
attribute,
dry_run=dry_run
)
def create_image(self, name, description=None, no_reboot=False,
dry_run=False):
"""
Will create an AMI from the instance in the running or stopped
state.
:type name: string
:param name: The name of the new image
:type description: string
:param description: An optional human-readable string describing
the contents and purpose of the AMI.
:type no_reboot: bool
:param no_reboot: An optional flag indicating that the bundling process
should not attempt to shutdown the instance before
bundling. If this flag is True, the responsibility
of maintaining file system integrity is left to the
owner of the instance.
:rtype: string
:return: The new image id
"""
return self.connection.create_image(
self.id,
name,
description,
no_reboot,
dry_run=dry_run
)
class ConsoleOutput(object):
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
self.timestamp = None
self.output = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.instance_id = value
elif name == 'timestamp':
self.timestamp = value
elif name == 'output':
self.output = base64.b64decode(value)
else:
setattr(self, name, value)
class InstanceAttribute(dict):
ValidValues = ['instanceType', 'kernel', 'ramdisk', 'userData',
'disableApiTermination',
'instanceInitiatedShutdownBehavior',
'rootDeviceName', 'blockDeviceMapping', 'sourceDestCheck',
'groupSet']
def __init__(self, parent=None):
dict.__init__(self)
self.instance_id = None
self.request_id = None
self._current_value = None
def startElement(self, name, attrs, connection):
if name == 'blockDeviceMapping':
self[name] = BlockDeviceMapping()
return self[name]
elif name == 'groupSet':
self[name] = ResultSet([('item', Group)])
return self[name]
else:
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.instance_id = value
elif name == 'requestId':
self.request_id = value
elif name == 'value':
if value == 'true':
value = True
elif value == 'false':
value = False
self._current_value = value
elif name in self.ValidValues:
self[name] = self._current_value
class SubParse(dict):
def __init__(self, section, parent=None):
dict.__init__(self)
self.section = section
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name != self.section:
self[name] = value
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/instance.py
| 0.682256 | 0.303922 |
instance.py
|
|
from boto.resultset import ResultSet
from boto.ec2.tag import Tag
from boto.ec2.ec2object import TaggedEC2Object
class Volume(TaggedEC2Object):
"""
Represents an EBS volume.
:ivar id: The unique ID of the volume.
:ivar create_time: The timestamp of when the volume was created.
:ivar status: The status of the volume.
:ivar size: The size (in GB) of the volume.
:ivar snapshot_id: The ID of the snapshot this volume was created
from, if applicable.
:ivar attach_data: An AttachmentSet object.
:ivar zone: The availability zone this volume is in.
:ivar type: The type of volume (standard or consistent-iops)
:ivar iops: If this volume is of type consistent-iops, this is
the number of IOPS provisioned (10-300).
:ivar encrypted: True if this volume is encrypted.
"""
def __init__(self, connection=None):
super(Volume, self).__init__(connection)
self.id = None
self.create_time = None
self.status = None
self.size = None
self.snapshot_id = None
self.attach_data = None
self.zone = None
self.type = None
self.iops = None
self.encrypted = None
def __repr__(self):
return 'Volume:%s' % self.id
def startElement(self, name, attrs, connection):
retval = super(Volume, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'attachmentSet':
self.attach_data = AttachmentSet()
return self.attach_data
elif name == 'tagSet':
self.tags = ResultSet([('item', Tag)])
return self.tags
else:
return None
def endElement(self, name, value, connection):
if name == 'volumeId':
self.id = value
elif name == 'createTime':
self.create_time = value
elif name == 'status':
if value != '':
self.status = value
elif name == 'size':
self.size = int(value)
elif name == 'snapshotId':
self.snapshot_id = value
elif name == 'availabilityZone':
self.zone = value
elif name == 'volumeType':
self.type = value
elif name == 'iops':
self.iops = int(value)
elif name == 'encrypted':
self.encrypted = (value.lower() == 'true')
else:
setattr(self, name, value)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def update(self, validate=False, dry_run=False):
"""
Update the data associated with this volume by querying EC2.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
volume the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
"""
# Check the resultset since Eucalyptus ignores the volumeId param
unfiltered_rs = self.connection.get_all_volumes(
[self.id],
dry_run=dry_run
)
rs = [x for x in unfiltered_rs if x.id == self.id]
if len(rs) > 0:
self._update(rs[0])
elif validate:
raise ValueError('%s is not a valid Volume ID' % self.id)
return self.status
def delete(self, dry_run=False):
"""
Delete this EBS volume.
:rtype: bool
:return: True if successful
"""
return self.connection.delete_volume(self.id, dry_run=dry_run)
def attach(self, instance_id, device, dry_run=False):
"""
Attach this EBS volume to an EC2 instance.
:type instance_id: str
:param instance_id: The ID of the EC2 instance to which it will
be attached.
:type device: str
:param device: The device on the instance through which the
volume will be exposed (e.g. /dev/sdh)
:rtype: bool
:return: True if successful
"""
return self.connection.attach_volume(
self.id,
instance_id,
device,
dry_run=dry_run
)
def detach(self, force=False, dry_run=False):
"""
Detach this EBS volume from an EC2 instance.
:type force: bool
:param force: Forces detachment if the previous detachment
attempt did not occur cleanly. This option can lead to
data loss or a corrupted file system. Use this option only
as a last resort to detach a volume from a failed
instance. The instance will not have an opportunity to
flush file system caches nor file system meta data. If you
use this option, you must perform file system check and
repair procedures.
:rtype: bool
:return: True if successful
"""
instance_id = None
if self.attach_data:
instance_id = self.attach_data.instance_id
device = None
if self.attach_data:
device = self.attach_data.device
return self.connection.detach_volume(
self.id,
instance_id,
device,
force,
dry_run=dry_run
)
def create_snapshot(self, description=None, dry_run=False):
"""
Create a snapshot of this EBS Volume.
:type description: str
:param description: A description of the snapshot.
Limited to 256 characters.
:rtype: :class:`boto.ec2.snapshot.Snapshot`
:return: The created Snapshot object
"""
return self.connection.create_snapshot(
self.id,
description,
dry_run=dry_run
)
def volume_state(self):
"""
Returns the state of the volume. Same value as the status attribute.
"""
return self.status
def attachment_state(self):
"""
Get the attachment state.
"""
state = None
if self.attach_data:
state = self.attach_data.status
return state
def snapshots(self, owner=None, restorable_by=None, dry_run=False):
"""
Get all snapshots related to this volume. Note that this requires
that all available snapshots for the account be retrieved from EC2
first and then the list is filtered client-side to contain only
those for this volume.
:type owner: str
:param owner: If present, only the snapshots owned by the
specified user will be returned. Valid values are:
* self
* amazon
* AWS Account ID
:type restorable_by: str
:param restorable_by: If present, only the snapshots that
are restorable by the specified account id will be returned.
:rtype: list of L{boto.ec2.snapshot.Snapshot}
:return: The requested Snapshot objects
"""
rs = self.connection.get_all_snapshots(
owner=owner,
restorable_by=restorable_by,
dry_run=dry_run
)
mine = []
for snap in rs:
if snap.volume_id == self.id:
mine.append(snap)
return mine
class AttachmentSet(object):
"""
Represents an EBS attachmentset.
:ivar id: The unique ID of the volume.
:ivar instance_id: The unique ID of the attached instance
:ivar status: The status of the attachment
:ivar attach_time: Attached since
:ivar device: The device the instance has mapped
"""
def __init__(self):
self.id = None
self.instance_id = None
self.status = None
self.attach_time = None
self.device = None
def __repr__(self):
return 'AttachmentSet:%s' % self.id
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'volumeId':
self.id = value
elif name == 'instanceId':
self.instance_id = value
elif name == 'status':
self.status = value
elif name == 'attachTime':
self.attach_time = value
elif name == 'device':
self.device = value
else:
setattr(self, name, value)
class VolumeAttribute(object):
def __init__(self, parent=None):
self.id = None
self._key_name = None
self.attrs = {}
def startElement(self, name, attrs, connection):
if name == 'autoEnableIO':
self._key_name = name
return None
def endElement(self, name, value, connection):
if name == 'value':
if value.lower() == 'true':
self.attrs[self._key_name] = True
else:
self.attrs[self._key_name] = False
elif name == 'volumeId':
self.id = value
else:
setattr(self, name, value)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/volume.py
| 0.767603 | 0.339362 |
volume.py
|
|
class Details(dict):
"""
A dict object that contains name/value pairs which provide
more detailed information about the status of the system
or the instance.
"""
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'name':
self._name = value
elif name == 'status':
self[self._name] = value
else:
setattr(self, name, value)
class Event(object):
"""
A status event for an instance.
:ivar code: A string indicating the event type.
:ivar description: A string describing the reason for the event.
:ivar not_before: A datestring describing the earliest time for
the event.
:ivar not_after: A datestring describing the latest time for
the event.
"""
def __init__(self, code=None, description=None,
not_before=None, not_after=None):
self.code = code
self.description = description
self.not_before = not_before
self.not_after = not_after
def __repr__(self):
return 'Event:%s' % self.code
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'code':
self.code = value
elif name == 'description':
self.description = value
elif name == 'notBefore':
self.not_before = value
elif name == 'notAfter':
self.not_after = value
else:
setattr(self, name, value)
class Status(object):
"""
A generic Status object used for system status and instance status.
:ivar status: A string indicating overall status.
:ivar details: A dict containing name-value pairs which provide
more details about the current status.
"""
def __init__(self, status=None, details=None):
self.status = status
if not details:
details = Details()
self.details = details
def __repr__(self):
return 'Status:%s' % self.status
def startElement(self, name, attrs, connection):
if name == 'details':
return self.details
return None
def endElement(self, name, value, connection):
if name == 'status':
self.status = value
else:
setattr(self, name, value)
class EventSet(list):
def startElement(self, name, attrs, connection):
if name == 'item':
event = Event()
self.append(event)
return event
else:
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class InstanceStatus(object):
"""
Represents an EC2 Instance status as reported by
DescribeInstanceStatus request.
:ivar id: The instance identifier.
:ivar zone: The availability zone of the instance.
:ivar events: A list of events relevant to the instance.
:ivar state_code: An integer representing the current state
of the instance.
:ivar state_name: A string describing the current state
of the instance.
:ivar system_status: A Status object that reports impaired
functionality that stems from issues related to the systems
that support an instance, such as such as hardware failures
and network connectivity problems.
:ivar instance_status: A Status object that reports impaired
functionality that arises from problems internal to the instance.
"""
def __init__(self, id=None, zone=None, events=None,
state_code=None, state_name=None):
self.id = id
self.zone = zone
self.events = events
self.state_code = state_code
self.state_name = state_name
self.system_status = Status()
self.instance_status = Status()
def __repr__(self):
return 'InstanceStatus:%s' % self.id
def startElement(self, name, attrs, connection):
if name == 'eventsSet':
self.events = EventSet()
return self.events
elif name == 'systemStatus':
return self.system_status
elif name == 'instanceStatus':
return self.instance_status
else:
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.id = value
elif name == 'availabilityZone':
self.zone = value
elif name == 'code':
self.state_code = int(value)
elif name == 'name':
self.state_name = value
else:
setattr(self, name, value)
class InstanceStatusSet(list):
"""
A list object that contains the results of a call to
DescribeInstanceStatus request. Each element of the
list will be an InstanceStatus object.
:ivar next_token: If the response was truncated by
the EC2 service, the next_token attribute of the
object will contain the string that needs to be
passed in to the next request to retrieve the next
set of results.
"""
def __init__(self, connection=None):
list.__init__(self)
self.connection = connection
self.next_token = None
def startElement(self, name, attrs, connection):
if name == 'item':
status = InstanceStatus()
self.append(status)
return status
else:
return None
def endElement(self, name, value, connection):
if name == 'nextToken':
self.next_token = value
setattr(self, name, value)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/instancestatus.py
| 0.845161 | 0.367611 |
instancestatus.py
|
|
from boto.ec2.tag import TagSet
class EC2Object(object):
def __init__(self, connection=None):
self.connection = connection
if self.connection and hasattr(self.connection, 'region'):
self.region = connection.region
else:
self.region = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class TaggedEC2Object(EC2Object):
"""
Any EC2 resource that can be tagged should be represented
by a Python object that subclasses this class. This class
has the mechanism in place to handle the tagSet element in
the Describe* responses. If tags are found, it will create
a TagSet object and allow it to parse and collect the tags
into a dict that is stored in the "tags" attribute of the
object.
"""
def __init__(self, connection=None):
super(TaggedEC2Object, self).__init__(connection)
self.tags = TagSet()
def startElement(self, name, attrs, connection):
if name == 'tagSet':
return self.tags
else:
return None
def add_tag(self, key, value='', dry_run=False):
"""
Add a tag to this object. Tags are stored by AWS and can be used
to organize and filter resources. Adding a tag involves a round-trip
to the EC2 service.
:type key: str
:param key: The key or name of the tag being stored.
:type value: str
:param value: An optional value that can be stored with the tag.
If you want only the tag name and no value, the
value should be the empty string.
"""
self.add_tags({key: value}, dry_run)
def add_tags(self, tags, dry_run=False):
"""
Add tags to this object. Tags are stored by AWS and can be used
to organize and filter resources. Adding tags involves a round-trip
to the EC2 service.
:type tags: dict
:param tags: A dictionary of key-value pairs for the tags being stored.
If for some tags you want only the name and no value, the
corresponding value for that tag name should be an empty
string.
"""
status = self.connection.create_tags(
[self.id],
tags,
dry_run=dry_run
)
if self.tags is None:
self.tags = TagSet()
self.tags.update(tags)
def remove_tag(self, key, value=None, dry_run=False):
"""
Remove a tag from this object. Removing a tag involves a round-trip
to the EC2 service.
:type key: str
:param key: The key or name of the tag being stored.
:type value: str
:param value: An optional value that can be stored with the tag.
If a value is provided, it must match the value currently
stored in EC2. If not, the tag will not be removed. If
a value of None is provided, the tag will be
unconditionally deleted.
NOTE: There is an important distinction between a value
of '' and a value of None.
"""
self.remove_tags({key: value}, dry_run)
def remove_tags(self, tags, dry_run=False):
"""
Removes tags from this object. Removing tags involves a round-trip
to the EC2 service.
:type tags: dict
:param tags: A dictionary of key-value pairs for the tags being removed.
For each key, the provided value must match the value
currently stored in EC2. If not, that particular tag will
not be removed. However, if a value of None is provided,
the tag will be unconditionally deleted.
NOTE: There is an important distinction between a value of
'' and a value of None.
"""
status = self.connection.delete_tags(
[self.id],
tags,
dry_run=dry_run
)
for key, value in tags.items():
if key in self.tags:
if value is None or value == self.tags[key]:
del self.tags[key]
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/ec2object.py
| 0.761982 | 0.375191 |
ec2object.py
|
|
from boto.ec2.elb.listelement import ListElement
from boto.resultset import ResultSet
from boto.ec2.autoscale.launchconfig import LaunchConfiguration
from boto.ec2.autoscale.request import Request
from boto.ec2.autoscale.instance import Instance
from boto.ec2.autoscale.tag import Tag
class ProcessType(object):
def __init__(self, connection=None):
self.connection = connection
self.process_name = None
def __repr__(self):
return 'ProcessType(%s)' % self.process_name
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'ProcessName':
self.process_name = value
class SuspendedProcess(object):
def __init__(self, connection=None):
self.connection = connection
self.process_name = None
self.reason = None
def __repr__(self):
return 'SuspendedProcess(%s, %s)' % (self.process_name, self.reason)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'ProcessName':
self.process_name = value
elif name == 'SuspensionReason':
self.reason = value
class EnabledMetric(object):
def __init__(self, connection=None, metric=None, granularity=None):
self.connection = connection
self.metric = metric
self.granularity = granularity
def __repr__(self):
return 'EnabledMetric(%s, %s)' % (self.metric, self.granularity)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Granularity':
self.granularity = value
elif name == 'Metric':
self.metric = value
class TerminationPolicies(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'member':
self.append(value)
class AutoScalingGroup(object):
def __init__(self, connection=None, name=None,
launch_config=None, availability_zones=None,
load_balancers=None, default_cooldown=None,
health_check_type=None, health_check_period=None,
placement_group=None, vpc_zone_identifier=None,
desired_capacity=None, min_size=None, max_size=None,
tags=None, termination_policies=None, instance_id=None,
**kwargs):
"""
Creates a new AutoScalingGroup with the specified name.
You must not have already used up your entire quota of
AutoScalingGroups in order for this call to be successful. Once the
creation request is completed, the AutoScalingGroup is ready to be
used in other calls.
:type name: str
:param name: Name of autoscaling group (required).
:type availability_zones: list
:param availability_zones: List of availability zones (required).
:type default_cooldown: int
:param default_cooldown: Number of seconds after a Scaling Activity
completes before any further scaling activities can start.
:type desired_capacity: int
:param desired_capacity: The desired capacity for the group.
:type health_check_period: str
:param health_check_period: Length of time in seconds after a new
EC2 instance comes into service that Auto Scaling starts
checking its health.
:type health_check_type: str
:param health_check_type: The service you want the health status from,
Amazon EC2 or Elastic Load Balancer.
:type launch_config: str or LaunchConfiguration
:param launch_config: Name of launch configuration (required).
:type load_balancers: list
:param load_balancers: List of load balancers.
:type max_size: int
:param max_size: Maximum size of group (required).
:type min_size: int
:param min_size: Minimum size of group (required).
:type placement_group: str
:param placement_group: Physical location of your cluster placement
group created in Amazon EC2.
:type vpc_zone_identifier: str or list
:param vpc_zone_identifier: A comma-separated string or python list of
the subnet identifiers of the Virtual Private Cloud.
:type tags: list
:param tags: List of :class:`boto.ec2.autoscale.tag.Tag`s
:type termination_policies: list
:param termination_policies: A list of termination policies. Valid values
are: "OldestInstance", "NewestInstance", "OldestLaunchConfiguration",
"ClosestToNextInstanceHour", "Default". If no value is specified,
the "Default" value is used.
:type instance_id: str
:param instance_id: The ID of the Amazon EC2 instance you want to use
to create the Auto Scaling group.
:rtype: :class:`boto.ec2.autoscale.group.AutoScalingGroup`
:return: An autoscale group.
"""
self.name = name or kwargs.get('group_name') # backwards compat
self.connection = connection
self.min_size = int(min_size) if min_size is not None else None
self.max_size = int(max_size) if max_size is not None else None
self.created_time = None
# backwards compatibility
default_cooldown = default_cooldown or kwargs.get('cooldown')
if default_cooldown is not None:
default_cooldown = int(default_cooldown)
self.default_cooldown = default_cooldown
self.launch_config_name = launch_config
if launch_config and isinstance(launch_config, LaunchConfiguration):
self.launch_config_name = launch_config.name
self.desired_capacity = desired_capacity
lbs = load_balancers or []
self.load_balancers = ListElement(lbs)
zones = availability_zones or []
self.availability_zones = ListElement(zones)
self.health_check_period = health_check_period
self.health_check_type = health_check_type
self.placement_group = placement_group
self.autoscaling_group_arn = None
if type(vpc_zone_identifier) is list:
vpc_zone_identifier = ','.join(vpc_zone_identifier)
self.vpc_zone_identifier = vpc_zone_identifier
self.instances = None
self.tags = tags or None
termination_policies = termination_policies or []
self.termination_policies = ListElement(termination_policies)
self.instance_id = instance_id
# backwards compatible access to 'cooldown' param
def _get_cooldown(self):
return self.default_cooldown
def _set_cooldown(self, val):
self.default_cooldown = val
cooldown = property(_get_cooldown, _set_cooldown)
def __repr__(self):
return 'AutoScaleGroup<%s>' % self.name
def startElement(self, name, attrs, connection):
if name == 'Instances':
self.instances = ResultSet([('member', Instance)])
return self.instances
elif name == 'LoadBalancerNames':
return self.load_balancers
elif name == 'AvailabilityZones':
return self.availability_zones
elif name == 'EnabledMetrics':
self.enabled_metrics = ResultSet([('member', EnabledMetric)])
return self.enabled_metrics
elif name == 'SuspendedProcesses':
self.suspended_processes = ResultSet([('member', SuspendedProcess)])
return self.suspended_processes
elif name == 'Tags':
self.tags = ResultSet([('member', Tag)])
return self.tags
elif name == 'TerminationPolicies':
return self.termination_policies
else:
return
def endElement(self, name, value, connection):
if name == 'MinSize':
self.min_size = int(value)
elif name == 'AutoScalingGroupARN':
self.autoscaling_group_arn = value
elif name == 'CreatedTime':
self.created_time = value
elif name == 'DefaultCooldown':
self.default_cooldown = int(value)
elif name == 'LaunchConfigurationName':
self.launch_config_name = value
elif name == 'DesiredCapacity':
self.desired_capacity = int(value)
elif name == 'MaxSize':
self.max_size = int(value)
elif name == 'AutoScalingGroupName':
self.name = value
elif name == 'PlacementGroup':
self.placement_group = value
elif name == 'HealthCheckGracePeriod':
try:
self.health_check_period = int(value)
except ValueError:
self.health_check_period = None
elif name == 'HealthCheckType':
self.health_check_type = value
elif name == 'VPCZoneIdentifier':
self.vpc_zone_identifier = value
elif name == 'InstanceId':
self.instance_id = value
else:
setattr(self, name, value)
def set_capacity(self, capacity):
"""
Set the desired capacity for the group.
"""
params = {'AutoScalingGroupName': self.name,
'DesiredCapacity': capacity}
req = self.connection.get_object('SetDesiredCapacity', params,
Request)
self.connection.last_request = req
return req
def update(self):
"""
Sync local changes with AutoScaling group.
"""
return self.connection._update_group('UpdateAutoScalingGroup', self)
def shutdown_instances(self):
"""
Convenience method which shuts down all instances associated with
this group.
"""
self.min_size = 0
self.max_size = 0
self.desired_capacity = 0
self.update()
def delete(self, force_delete=False):
"""
Delete this auto-scaling group if no instances attached or no
scaling activities in progress.
"""
return self.connection.delete_auto_scaling_group(self.name,
force_delete)
def get_activities(self, activity_ids=None, max_records=50):
"""
Get all activies for this group.
"""
return self.connection.get_all_activities(self, activity_ids,
max_records)
def put_notification_configuration(self, topic, notification_types):
"""
Configures an Auto Scaling group to send notifications when
specified events take place. Valid notification types are:
'autoscaling:EC2_INSTANCE_LAUNCH',
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
'autoscaling:EC2_INSTANCE_TERMINATE',
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR',
'autoscaling:TEST_NOTIFICATION'
"""
return self.connection.put_notification_configuration(self,
topic,
notification_types)
def delete_notification_configuration(self, topic):
"""
Deletes notifications created by put_notification_configuration.
"""
return self.connection.delete_notification_configuration(self, topic)
def suspend_processes(self, scaling_processes=None):
"""
Suspends Auto Scaling processes for an Auto Scaling group.
"""
return self.connection.suspend_processes(self.name, scaling_processes)
def resume_processes(self, scaling_processes=None):
"""
Resumes Auto Scaling processes for an Auto Scaling group.
"""
return self.connection.resume_processes(self.name, scaling_processes)
class AutoScalingGroupMetric(object):
def __init__(self, connection=None):
self.connection = connection
self.metric = None
self.granularity = None
def __repr__(self):
return 'AutoScalingGroupMetric:%s' % self.metric
def startElement(self, name, attrs, connection):
return
def endElement(self, name, value, connection):
if name == 'Metric':
self.metric = value
elif name == 'Granularity':
self.granularity = value
else:
setattr(self, name, value)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/autoscale/group.py
| 0.744935 | 0.179081 |
group.py
|
|
class Tag(object):
"""
A name/value tag on an AutoScalingGroup resource.
:ivar key: The key of the tag.
:ivar value: The value of the tag.
:ivar propagate_at_launch: Boolean value which specifies whether the
new tag will be applied to instances launched after the tag is created.
:ivar resource_id: The name of the autoscaling group.
:ivar resource_type: The only supported resource type at this time
is "auto-scaling-group".
"""
def __init__(self, connection=None, key=None, value=None,
propagate_at_launch=False, resource_id=None,
resource_type='auto-scaling-group'):
self.connection = connection
self.key = key
self.value = value
self.propagate_at_launch = propagate_at_launch
self.resource_id = resource_id
self.resource_type = resource_type
def __repr__(self):
return 'Tag(%s=%s)' % (self.key, self.value)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Key':
self.key = value
elif name == 'Value':
self.value = value
elif name == 'PropagateAtLaunch':
if value.lower() == 'true':
self.propagate_at_launch = True
else:
self.propagate_at_launch = False
elif name == 'ResourceId':
self.resource_id = value
elif name == 'ResourceType':
self.resource_type = value
def build_params(self, params, i):
"""
Populates a dictionary with the name/value pairs necessary
to identify this Tag in a request.
"""
prefix = 'Tags.member.%d.' % i
params[prefix + 'ResourceId'] = self.resource_id
params[prefix + 'ResourceType'] = self.resource_type
params[prefix + 'Key'] = self.key
params[prefix + 'Value'] = self.value
if self.propagate_at_launch:
params[prefix + 'PropagateAtLaunch'] = 'true'
else:
params[prefix + 'PropagateAtLaunch'] = 'false'
def delete(self):
return self.connection.delete_tags([self])
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/autoscale/tag.py
| 0.813275 | 0.242475 |
tag.py
|
|
from boto.resultset import ResultSet
from boto.ec2.elb.listelement import ListElement
class Alarm(object):
def __init__(self, connection=None):
self.connection = connection
self.name = None
self.alarm_arn = None
def __repr__(self):
return 'Alarm:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'AlarmName':
self.name = value
elif name == 'AlarmARN':
self.alarm_arn = value
else:
setattr(self, name, value)
class AdjustmentType(object):
def __init__(self, connection=None):
self.connection = connection
self.adjustment_type = None
def __repr__(self):
return 'AdjustmentType:%s' % self.adjustment_type
def startElement(self, name, attrs, connection):
return
def endElement(self, name, value, connection):
if name == 'AdjustmentType':
self.adjustment_type = value
return
class MetricCollectionTypes(object):
class BaseType(object):
arg = ''
def __init__(self, connection):
self.connection = connection
self.val = None
def __repr__(self):
return '%s:%s' % (self.arg, self.val)
def startElement(self, name, attrs, connection):
return
def endElement(self, name, value, connection):
if name == self.arg:
self.val = value
class Metric(BaseType):
arg = 'Metric'
class Granularity(BaseType):
arg = 'Granularity'
def __init__(self, connection=None):
self.connection = connection
self.metrics = []
self.granularities = []
def __repr__(self):
return 'MetricCollectionTypes:<%s, %s>' % (self.metrics, self.granularities)
def startElement(self, name, attrs, connection):
if name == 'Granularities':
self.granularities = ResultSet([('member', self.Granularity)])
return self.granularities
elif name == 'Metrics':
self.metrics = ResultSet([('member', self.Metric)])
return self.metrics
def endElement(self, name, value, connection):
return
class ScalingPolicy(object):
def __init__(self, connection=None, **kwargs):
"""
Scaling Policy
:type name: str
:param name: Name of scaling policy.
:type adjustment_type: str
:param adjustment_type: Specifies the type of adjustment. Valid values are `ChangeInCapacity`, `ExactCapacity` and `PercentChangeInCapacity`.
:type as_name: str or int
:param as_name: Name or ARN of the Auto Scaling Group.
:type scaling_adjustment: int
:param scaling_adjustment: Value of adjustment (type specified in `adjustment_type`).
:type min_adjustment_step: int
:param min_adjustment_step: Value of min adjustment step required to
apply the scaling policy (only make sense when use `PercentChangeInCapacity` as adjustment_type.).
:type cooldown: int
:param cooldown: Time (in seconds) before Alarm related Scaling Activities can start after the previous Scaling Activity ends.
"""
self.name = kwargs.get('name', None)
self.adjustment_type = kwargs.get('adjustment_type', None)
self.as_name = kwargs.get('as_name', None)
self.scaling_adjustment = kwargs.get('scaling_adjustment', None)
self.cooldown = kwargs.get('cooldown', None)
self.connection = connection
self.min_adjustment_step = kwargs.get('min_adjustment_step', None)
def __repr__(self):
return 'ScalingPolicy(%s group:%s adjustment:%s)' % (self.name,
self.as_name,
self.adjustment_type)
def startElement(self, name, attrs, connection):
if name == 'Alarms':
self.alarms = ResultSet([('member', Alarm)])
return self.alarms
def endElement(self, name, value, connection):
if name == 'PolicyName':
self.name = value
elif name == 'AutoScalingGroupName':
self.as_name = value
elif name == 'PolicyARN':
self.policy_arn = value
elif name == 'ScalingAdjustment':
self.scaling_adjustment = int(value)
elif name == 'Cooldown':
self.cooldown = int(value)
elif name == 'AdjustmentType':
self.adjustment_type = value
elif name == 'MinAdjustmentStep':
self.min_adjustment_step = int(value)
def delete(self):
return self.connection.delete_policy(self.name, self.as_name)
class TerminationPolicies(list):
def __init__(self, connection=None, **kwargs):
pass
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'member':
self.append(value)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/autoscale/policy.py
| 0.671578 | 0.179423 |
policy.py
|
|
import base64
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo, get_regions, load_regions
from boto.regioninfo import connect
from boto.ec2.autoscale.request import Request
from boto.ec2.autoscale.launchconfig import LaunchConfiguration
from boto.ec2.autoscale.group import AutoScalingGroup
from boto.ec2.autoscale.group import ProcessType
from boto.ec2.autoscale.activity import Activity
from boto.ec2.autoscale.policy import AdjustmentType
from boto.ec2.autoscale.policy import MetricCollectionTypes
from boto.ec2.autoscale.policy import ScalingPolicy
from boto.ec2.autoscale.policy import TerminationPolicies
from boto.ec2.autoscale.instance import Instance
from boto.ec2.autoscale.scheduled import ScheduledUpdateGroupAction
from boto.ec2.autoscale.tag import Tag
from boto.ec2.autoscale.limits import AccountLimits
from boto.compat import six
RegionData = load_regions().get('autoscaling', {})
def regions():
"""
Get all available regions for the Auto Scaling service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
return get_regions('autoscaling', connection_cls=AutoScaleConnection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ec2.autoscale.AutoScaleConnection`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.ec2.AutoScaleConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
return connect('autoscaling', region_name,
connection_cls=AutoScaleConnection, **kw_params)
class AutoScaleConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'autoscale_version', '2011-01-01')
DefaultRegionEndpoint = boto.config.get('Boto', 'autoscale_endpoint',
'autoscaling.us-east-1.amazonaws.com')
DefaultRegionName = boto.config.get('Boto', 'autoscale_region_name',
'us-east-1')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True, profile_name=None,
use_block_device_types=False):
"""
Init method to create a new connection to the AutoScaling service.
B{Note:} The host argument is overridden by the host specified in the
boto configuration file.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint,
AutoScaleConnection)
self.region = region
self.use_block_device_types = use_block_device_types
super(AutoScaleConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path=path,
security_token=security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def build_list_params(self, params, items, label):
"""
Items is a list of dictionaries or strings::
[
{
'Protocol' : 'HTTP',
'LoadBalancerPort' : '80',
'InstancePort' : '80'
},
..
] etc.
or::
['us-east-1b',...]
"""
# different from EC2 list params
for i in range(1, len(items) + 1):
if isinstance(items[i - 1], dict):
for k, v in six.iteritems(items[i - 1]):
if isinstance(v, dict):
for kk, vv in six.iteritems(v):
params['%s.member.%d.%s.%s' % (label, i, k, kk)] = vv
else:
params['%s.member.%d.%s' % (label, i, k)] = v
elif isinstance(items[i - 1], six.string_types):
params['%s.member.%d' % (label, i)] = items[i - 1]
def _update_group(self, op, as_group):
params = {'AutoScalingGroupName': as_group.name,
'LaunchConfigurationName': as_group.launch_config_name,
'MinSize': as_group.min_size,
'MaxSize': as_group.max_size}
# get availability zone information (required param)
zones = as_group.availability_zones
self.build_list_params(params, zones, 'AvailabilityZones')
if as_group.desired_capacity is not None:
params['DesiredCapacity'] = as_group.desired_capacity
if as_group.vpc_zone_identifier:
params['VPCZoneIdentifier'] = as_group.vpc_zone_identifier
if as_group.health_check_period:
params['HealthCheckGracePeriod'] = as_group.health_check_period
if as_group.health_check_type:
params['HealthCheckType'] = as_group.health_check_type
if as_group.default_cooldown:
params['DefaultCooldown'] = as_group.default_cooldown
if as_group.placement_group:
params['PlacementGroup'] = as_group.placement_group
if as_group.instance_id:
params['InstanceId'] = as_group.instance_id
if as_group.termination_policies:
self.build_list_params(params, as_group.termination_policies,
'TerminationPolicies')
if op.startswith('Create'):
# you can only associate load balancers with an autoscale
# group at creation time
if as_group.load_balancers:
self.build_list_params(params, as_group.load_balancers,
'LoadBalancerNames')
if as_group.tags:
for i, tag in enumerate(as_group.tags):
tag.build_params(params, i + 1)
return self.get_object(op, params, Request)
def attach_instances(self, name, instance_ids):
"""
Attach instances to an autoscaling group.
"""
params = {
'AutoScalingGroupName': name,
}
self.build_list_params(params, instance_ids, 'InstanceIds')
return self.get_status('AttachInstances', params)
def detach_instances(self, name, instance_ids, decrement_capacity=True):
"""
Detach instances from an Auto Scaling group.
:type name: str
:param name: The name of the Auto Scaling group from which to detach instances.
:type instance_ids: list
:param instance_ids: Instance ids to be detached from the Auto Scaling group.
:type decrement_capacity: bool
:param decrement_capacity: Whether to decrement the size of the
Auto Scaling group or not.
"""
params = {'AutoScalingGroupName': name}
params['ShouldDecrementDesiredCapacity'] = 'true' if decrement_capacity else 'false'
self.build_list_params(params, instance_ids, 'InstanceIds')
return self.get_status('DetachInstances', params)
def create_auto_scaling_group(self, as_group):
"""
Create auto scaling group.
"""
return self._update_group('CreateAutoScalingGroup', as_group)
def delete_auto_scaling_group(self, name, force_delete=False):
"""
Deletes the specified auto scaling group if the group has no instances
and no scaling activities in progress.
"""
if(force_delete):
params = {'AutoScalingGroupName': name, 'ForceDelete': 'true'}
else:
params = {'AutoScalingGroupName': name}
return self.get_object('DeleteAutoScalingGroup', params, Request)
def create_launch_configuration(self, launch_config):
"""
Creates a new Launch Configuration.
:type launch_config: :class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration`
:param launch_config: LaunchConfiguration object.
"""
params = {'ImageId': launch_config.image_id,
'LaunchConfigurationName': launch_config.name,
'InstanceType': launch_config.instance_type}
if launch_config.key_name:
params['KeyName'] = launch_config.key_name
if launch_config.user_data:
user_data = launch_config.user_data
if isinstance(user_data, six.text_type):
user_data = user_data.encode('utf-8')
params['UserData'] = base64.b64encode(user_data).decode('utf-8')
if launch_config.kernel_id:
params['KernelId'] = launch_config.kernel_id
if launch_config.ramdisk_id:
params['RamdiskId'] = launch_config.ramdisk_id
if launch_config.block_device_mappings:
[x.autoscale_build_list_params(params) for x in launch_config.block_device_mappings]
if launch_config.security_groups:
self.build_list_params(params, launch_config.security_groups,
'SecurityGroups')
if launch_config.instance_monitoring:
params['InstanceMonitoring.Enabled'] = 'true'
else:
params['InstanceMonitoring.Enabled'] = 'false'
if launch_config.spot_price is not None:
params['SpotPrice'] = str(launch_config.spot_price)
if launch_config.instance_profile_name is not None:
params['IamInstanceProfile'] = launch_config.instance_profile_name
if launch_config.ebs_optimized:
params['EbsOptimized'] = 'true'
else:
params['EbsOptimized'] = 'false'
if launch_config.associate_public_ip_address is True:
params['AssociatePublicIpAddress'] = 'true'
elif launch_config.associate_public_ip_address is False:
params['AssociatePublicIpAddress'] = 'false'
if launch_config.volume_type:
params['VolumeType'] = launch_config.volume_type
if launch_config.delete_on_termination:
params['DeleteOnTermination'] = 'true'
else:
params['DeleteOnTermination'] = 'false'
if launch_config.iops:
params['Iops'] = launch_config.iops
if launch_config.classic_link_vpc_id:
params['ClassicLinkVPCId'] = launch_config.classic_link_vpc_id
if launch_config.classic_link_vpc_security_groups:
self.build_list_params(
params,
launch_config.classic_link_vpc_security_groups,
'ClassicLinkVPCSecurityGroups'
)
return self.get_object('CreateLaunchConfiguration', params,
Request, verb='POST')
def get_account_limits(self):
"""
Returns the limits for the Auto Scaling resources currently granted for
your AWS account.
"""
params = {}
return self.get_object('DescribeAccountLimits', params, AccountLimits)
def create_scaling_policy(self, scaling_policy):
"""
Creates a new Scaling Policy.
:type scaling_policy: :class:`boto.ec2.autoscale.policy.ScalingPolicy`
:param scaling_policy: ScalingPolicy object.
"""
params = {'AdjustmentType': scaling_policy.adjustment_type,
'AutoScalingGroupName': scaling_policy.as_name,
'PolicyName': scaling_policy.name,
'ScalingAdjustment': scaling_policy.scaling_adjustment}
if scaling_policy.adjustment_type == "PercentChangeInCapacity" and \
scaling_policy.min_adjustment_step is not None:
params['MinAdjustmentStep'] = scaling_policy.min_adjustment_step
if scaling_policy.cooldown is not None:
params['Cooldown'] = scaling_policy.cooldown
return self.get_object('PutScalingPolicy', params, Request)
def delete_launch_configuration(self, launch_config_name):
"""
Deletes the specified LaunchConfiguration.
The specified launch configuration must not be attached to an Auto
Scaling group. Once this call completes, the launch configuration is no
longer available for use.
"""
params = {'LaunchConfigurationName': launch_config_name}
return self.get_object('DeleteLaunchConfiguration', params, Request)
def get_all_groups(self, names=None, max_records=None, next_token=None):
"""
Returns a full description of each Auto Scaling group in the given
list. This includes all Amazon EC2 instances that are members of the
group. If a list of names is not provided, the service returns the full
details of all Auto Scaling groups.
This action supports pagination by returning a token if there are more
pages to retrieve. To get the next page, call this action again with
the returned token as the NextToken parameter.
:type names: list
:param names: List of group names which should be searched for.
:type max_records: int
:param max_records: Maximum amount of groups to return.
:rtype: list
:returns: List of :class:`boto.ec2.autoscale.group.AutoScalingGroup`
instances.
"""
params = {}
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
if names:
self.build_list_params(params, names, 'AutoScalingGroupNames')
return self.get_list('DescribeAutoScalingGroups', params,
[('member', AutoScalingGroup)])
def get_all_launch_configurations(self, **kwargs):
"""
Returns a full description of the launch configurations given the
specified names.
If no names are specified, then the full details of all launch
configurations are returned.
:type names: list
:param names: List of configuration names which should be searched for.
:type max_records: int
:param max_records: Maximum amount of configurations to return.
:type next_token: str
:param next_token: If you have more results than can be returned
at once, pass in this parameter to page through all results.
:rtype: list
:returns: List of
:class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration`
instances.
"""
params = {}
max_records = kwargs.get('max_records', None)
names = kwargs.get('names', None)
if max_records is not None:
params['MaxRecords'] = max_records
if names:
self.build_list_params(params, names, 'LaunchConfigurationNames')
next_token = kwargs.get('next_token')
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeLaunchConfigurations', params,
[('member', LaunchConfiguration)])
def get_all_activities(self, autoscale_group, activity_ids=None,
max_records=None, next_token=None):
"""
Get all activities for the given autoscaling group.
This action supports pagination by returning a token if there are more
pages to retrieve. To get the next page, call this action again with
the returned token as the NextToken parameter
:type autoscale_group: str or
:class:`boto.ec2.autoscale.group.AutoScalingGroup` object
:param autoscale_group: The auto scaling group to get activities on.
:type max_records: int
:param max_records: Maximum amount of activities to return.
:rtype: list
:returns: List of
:class:`boto.ec2.autoscale.activity.Activity` instances.
"""
name = autoscale_group
if isinstance(autoscale_group, AutoScalingGroup):
name = autoscale_group.name
params = {'AutoScalingGroupName': name}
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
if activity_ids:
self.build_list_params(params, activity_ids, 'ActivityIds')
return self.get_list('DescribeScalingActivities',
params, [('member', Activity)])
def get_termination_policies(self):
"""Gets all valid termination policies.
These values can then be used as the termination_policies arg
when creating and updating autoscale groups.
"""
return self.get_object('DescribeTerminationPolicyTypes',
{}, TerminationPolicies)
def delete_scheduled_action(self, scheduled_action_name,
autoscale_group=None):
"""
Deletes a previously scheduled action.
:type scheduled_action_name: str
:param scheduled_action_name: The name of the action you want
to delete.
:type autoscale_group: str
:param autoscale_group: The name of the autoscale group.
"""
params = {'ScheduledActionName': scheduled_action_name}
if autoscale_group:
params['AutoScalingGroupName'] = autoscale_group
return self.get_status('DeleteScheduledAction', params)
def terminate_instance(self, instance_id, decrement_capacity=True):
"""
Terminates the specified instance. The desired group size can
also be adjusted, if desired.
:type instance_id: str
:param instance_id: The ID of the instance to be terminated.
:type decrement_capability: bool
:param decrement_capacity: Whether to decrement the size of the
autoscaling group or not.
"""
params = {'InstanceId': instance_id}
if decrement_capacity:
params['ShouldDecrementDesiredCapacity'] = 'true'
else:
params['ShouldDecrementDesiredCapacity'] = 'false'
return self.get_object('TerminateInstanceInAutoScalingGroup', params,
Activity)
def delete_policy(self, policy_name, autoscale_group=None):
"""
Delete a policy.
:type policy_name: str
:param policy_name: The name or ARN of the policy to delete.
:type autoscale_group: str
:param autoscale_group: The name of the autoscale group.
"""
params = {'PolicyName': policy_name}
if autoscale_group:
params['AutoScalingGroupName'] = autoscale_group
return self.get_status('DeletePolicy', params)
def get_all_adjustment_types(self):
return self.get_list('DescribeAdjustmentTypes', {},
[('member', AdjustmentType)])
def get_all_autoscaling_instances(self, instance_ids=None,
max_records=None, next_token=None):
"""
Returns a description of each Auto Scaling instance in the instance_ids
list. If a list is not provided, the service returns the full details
of all instances up to a maximum of fifty.
This action supports pagination by returning a token if there are more
pages to retrieve. To get the next page, call this action again with
the returned token as the NextToken parameter.
:type instance_ids: list
:param instance_ids: List of Autoscaling Instance IDs which should be
searched for.
:type max_records: int
:param max_records: Maximum number of results to return.
:rtype: list
:returns: List of
:class:`boto.ec2.autoscale.instance.Instance` objects.
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceIds')
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeAutoScalingInstances',
params, [('member', Instance)])
def get_all_metric_collection_types(self):
"""
Returns a list of metrics and a corresponding list of granularities
for each metric.
"""
return self.get_object('DescribeMetricCollectionTypes',
{}, MetricCollectionTypes)
def get_all_policies(self, as_group=None, policy_names=None,
max_records=None, next_token=None):
"""
Returns descriptions of what each policy does. This action supports
pagination. If the response includes a token, there are more records
available. To get the additional records, repeat the request with the
response token as the NextToken parameter.
If no group name or list of policy names are provided, all
available policies are returned.
:type as_group: str
:param as_group: The name of the
:class:`boto.ec2.autoscale.group.AutoScalingGroup` to filter for.
:type policy_names: list
:param policy_names: List of policy names which should be searched for.
:type max_records: int
:param max_records: Maximum amount of groups to return.
:type next_token: str
:param next_token: If you have more results than can be returned
at once, pass in this parameter to page through all results.
"""
params = {}
if as_group:
params['AutoScalingGroupName'] = as_group
if policy_names:
self.build_list_params(params, policy_names, 'PolicyNames')
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribePolicies', params,
[('member', ScalingPolicy)])
def get_all_scaling_process_types(self):
"""
Returns scaling process types for use in the ResumeProcesses and
SuspendProcesses actions.
"""
return self.get_list('DescribeScalingProcessTypes', {},
[('member', ProcessType)])
def suspend_processes(self, as_group, scaling_processes=None):
"""
Suspends Auto Scaling processes for an Auto Scaling group.
:type as_group: string
:param as_group: The auto scaling group to suspend processes on.
:type scaling_processes: list
:param scaling_processes: Processes you want to suspend. If omitted,
all processes will be suspended.
"""
params = {'AutoScalingGroupName': as_group}
if scaling_processes:
self.build_list_params(params, scaling_processes,
'ScalingProcesses')
return self.get_status('SuspendProcesses', params)
def resume_processes(self, as_group, scaling_processes=None):
"""
Resumes Auto Scaling processes for an Auto Scaling group.
:type as_group: string
:param as_group: The auto scaling group to resume processes on.
:type scaling_processes: list
:param scaling_processes: Processes you want to resume. If omitted, all
processes will be resumed.
"""
params = {'AutoScalingGroupName': as_group}
if scaling_processes:
self.build_list_params(params, scaling_processes,
'ScalingProcesses')
return self.get_status('ResumeProcesses', params)
def create_scheduled_group_action(self, as_group, name, time=None,
desired_capacity=None,
min_size=None, max_size=None,
start_time=None, end_time=None,
recurrence=None):
"""
Creates a scheduled scaling action for a Auto Scaling group. If you
leave a parameter unspecified, the corresponding value remains
unchanged in the affected Auto Scaling group.
:type as_group: string
:param as_group: The auto scaling group to get activities on.
:type name: string
:param name: Scheduled action name.
:type time: datetime.datetime
:param time: The time for this action to start. (Depracated)
:type desired_capacity: int
:param desired_capacity: The number of EC2 instances that should
be running in this group.
:type min_size: int
:param min_size: The minimum size for the new auto scaling group.
:type max_size: int
:param max_size: The minimum size for the new auto scaling group.
:type start_time: datetime.datetime
:param start_time: The time for this action to start. When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop.
:type end_time: datetime.datetime
:param end_time: The time for this action to end. When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop.
:type recurrence: string
:param recurrence: The time when recurring future actions will start. Start time is specified by the user following the Unix cron syntax format. EXAMPLE: '0 10 * * *'
"""
params = {'AutoScalingGroupName': as_group,
'ScheduledActionName': name}
if start_time is not None:
params['StartTime'] = start_time.isoformat()
if end_time is not None:
params['EndTime'] = end_time.isoformat()
if recurrence is not None:
params['Recurrence'] = recurrence
if time:
params['Time'] = time.isoformat()
if desired_capacity is not None:
params['DesiredCapacity'] = desired_capacity
if min_size is not None:
params['MinSize'] = min_size
if max_size is not None:
params['MaxSize'] = max_size
return self.get_status('PutScheduledUpdateGroupAction', params)
def get_all_scheduled_actions(self, as_group=None, start_time=None,
end_time=None, scheduled_actions=None,
max_records=None, next_token=None):
params = {}
if as_group:
params['AutoScalingGroupName'] = as_group
if scheduled_actions:
self.build_list_params(params, scheduled_actions,
'ScheduledActionNames')
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeScheduledActions', params,
[('member', ScheduledUpdateGroupAction)])
def disable_metrics_collection(self, as_group, metrics=None):
"""
Disables monitoring of group metrics for the Auto Scaling group
specified in AutoScalingGroupName. You can specify the list of affected
metrics with the Metrics parameter.
"""
params = {'AutoScalingGroupName': as_group}
if metrics:
self.build_list_params(params, metrics, 'Metrics')
return self.get_status('DisableMetricsCollection', params)
def enable_metrics_collection(self, as_group, granularity, metrics=None):
"""
Enables monitoring of group metrics for the Auto Scaling group
specified in AutoScalingGroupName. You can specify the list of enabled
metrics with the Metrics parameter.
Auto scaling metrics collection can be turned on only if the
InstanceMonitoring.Enabled flag, in the Auto Scaling group's launch
configuration, is set to true.
:type autoscale_group: string
:param autoscale_group: The auto scaling group to get activities on.
:type granularity: string
:param granularity: The granularity to associate with the metrics to
collect. Currently, the only legal granularity is "1Minute".
:type metrics: string list
:param metrics: The list of metrics to collect. If no metrics are
specified, all metrics are enabled.
"""
params = {'AutoScalingGroupName': as_group,
'Granularity': granularity}
if metrics:
self.build_list_params(params, metrics, 'Metrics')
return self.get_status('EnableMetricsCollection', params)
def execute_policy(self, policy_name, as_group=None, honor_cooldown=None):
params = {'PolicyName': policy_name}
if as_group:
params['AutoScalingGroupName'] = as_group
if honor_cooldown:
params['HonorCooldown'] = honor_cooldown
return self.get_status('ExecutePolicy', params)
def put_notification_configuration(self, autoscale_group, topic, notification_types):
"""
Configures an Auto Scaling group to send notifications when
specified events take place.
:type autoscale_group: str or
:class:`boto.ec2.autoscale.group.AutoScalingGroup` object
:param autoscale_group: The Auto Scaling group to put notification
configuration on.
:type topic: str
:param topic: The Amazon Resource Name (ARN) of the Amazon Simple
Notification Service (SNS) topic.
:type notification_types: list
:param notification_types: The type of events that will trigger
the notification. Valid types are:
'autoscaling:EC2_INSTANCE_LAUNCH',
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
'autoscaling:EC2_INSTANCE_TERMINATE',
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR',
'autoscaling:TEST_NOTIFICATION'
"""
name = autoscale_group
if isinstance(autoscale_group, AutoScalingGroup):
name = autoscale_group.name
params = {'AutoScalingGroupName': name,
'TopicARN': topic}
self.build_list_params(params, notification_types, 'NotificationTypes')
return self.get_status('PutNotificationConfiguration', params)
def delete_notification_configuration(self, autoscale_group, topic):
"""
Deletes notifications created by put_notification_configuration.
:type autoscale_group: str or
:class:`boto.ec2.autoscale.group.AutoScalingGroup` object
:param autoscale_group: The Auto Scaling group to put notification
configuration on.
:type topic: str
:param topic: The Amazon Resource Name (ARN) of the Amazon Simple
Notification Service (SNS) topic.
"""
name = autoscale_group
if isinstance(autoscale_group, AutoScalingGroup):
name = autoscale_group.name
params = {'AutoScalingGroupName': name,
'TopicARN': topic}
return self.get_status('DeleteNotificationConfiguration', params)
def set_instance_health(self, instance_id, health_status,
should_respect_grace_period=True):
"""
Explicitly set the health status of an instance.
:type instance_id: str
:param instance_id: The identifier of the EC2 instance.
:type health_status: str
:param health_status: The health status of the instance.
"Healthy" means that the instance is healthy and should remain
in service. "Unhealthy" means that the instance is unhealthy.
Auto Scaling should terminate and replace it.
:type should_respect_grace_period: bool
:param should_respect_grace_period: If True, this call should
respect the grace period associated with the group.
"""
params = {'InstanceId': instance_id,
'HealthStatus': health_status}
if should_respect_grace_period:
params['ShouldRespectGracePeriod'] = 'true'
else:
params['ShouldRespectGracePeriod'] = 'false'
return self.get_status('SetInstanceHealth', params)
def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
"""
Adjusts the desired size of the AutoScalingGroup by initiating scaling
activities. When reducing the size of the group, it is not possible to define
which Amazon EC2 instances will be terminated. This applies to any Auto Scaling
decisions that might result in terminating instances.
:type group_name: string
:param group_name: name of the auto scaling group
:type desired_capacity: integer
:param desired_capacity: new capacity setting for auto scaling group
:type honor_cooldown: boolean
:param honor_cooldown: by default, overrides any cooldown period
"""
params = {'AutoScalingGroupName': group_name,
'DesiredCapacity': desired_capacity}
if honor_cooldown:
params['HonorCooldown'] = 'true'
return self.get_status('SetDesiredCapacity', params)
# Tag methods
def get_all_tags(self, filters=None, max_records=None, next_token=None):
"""
Lists the Auto Scaling group tags.
This action supports pagination by returning a token if there
are more pages to retrieve. To get the next page, call this
action again with the returned token as the NextToken
parameter.
:type filters: dict
:param filters: The value of the filter type used to identify
the tags to be returned. NOT IMPLEMENTED YET.
:type max_records: int
:param max_records: Maximum number of tags to return.
:rtype: list
:returns: List of :class:`boto.ec2.autoscale.tag.Tag`
instances.
"""
params = {}
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeTags', params,
[('member', Tag)])
def create_or_update_tags(self, tags):
"""
Creates new tags or updates existing tags for an Auto Scaling group.
:type tags: List of :class:`boto.ec2.autoscale.tag.Tag`
:param tags: The new or updated tags.
"""
params = {}
for i, tag in enumerate(tags):
tag.build_params(params, i + 1)
return self.get_status('CreateOrUpdateTags', params, verb='POST')
def delete_tags(self, tags):
"""
Deletes existing tags for an Auto Scaling group.
:type tags: List of :class:`boto.ec2.autoscale.tag.Tag`
:param tags: The new or updated tags.
"""
params = {}
for i, tag in enumerate(tags):
tag.build_params(params, i + 1)
return self.get_status('DeleteTags', params, verb='POST')
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/autoscale/__init__.py
| 0.598312 | 0.162115 |
__init__.py
|
|
from datetime import datetime
from boto.ec2.cloudwatch.listelement import ListElement
from boto.ec2.cloudwatch.dimension import Dimension
from boto.compat import json
from boto.compat import six
class MetricAlarms(list):
def __init__(self, connection=None):
"""
Parses a list of MetricAlarms.
"""
list.__init__(self)
self.connection = connection
def startElement(self, name, attrs, connection):
if name == 'member':
metric_alarm = MetricAlarm(connection)
self.append(metric_alarm)
return metric_alarm
def endElement(self, name, value, connection):
pass
class MetricAlarm(object):
OK = 'OK'
ALARM = 'ALARM'
INSUFFICIENT_DATA = 'INSUFFICIENT_DATA'
_cmp_map = {
'>=': 'GreaterThanOrEqualToThreshold',
'>': 'GreaterThanThreshold',
'<': 'LessThanThreshold',
'<=': 'LessThanOrEqualToThreshold',
}
_rev_cmp_map = dict((v, k) for (k, v) in six.iteritems(_cmp_map))
def __init__(self, connection=None, name=None, metric=None,
namespace=None, statistic=None, comparison=None,
threshold=None, period=None, evaluation_periods=None,
unit=None, description='', dimensions=None,
alarm_actions=None, insufficient_data_actions=None,
ok_actions=None):
"""
Creates a new Alarm.
:type name: str
:param name: Name of alarm.
:type metric: str
:param metric: Name of alarm's associated metric.
:type namespace: str
:param namespace: The namespace for the alarm's metric.
:type statistic: str
:param statistic: The statistic to apply to the alarm's associated
metric.
Valid values: SampleCount|Average|Sum|Minimum|Maximum
:type comparison: str
:param comparison: Comparison used to compare statistic with threshold.
Valid values: >= | > | < | <=
:type threshold: float
:param threshold: The value against which the specified statistic
is compared.
:type period: int
:param period: The period in seconds over which the specified
statistic is applied.
:type evaluation_periods: int
:param evaluation_periods: The number of periods over which data is
compared to the specified threshold.
:type unit: str
:param unit: Allowed Values are:
Seconds|Microseconds|Milliseconds,
Bytes|Kilobytes|Megabytes|Gigabytes|Terabytes,
Bits|Kilobits|Megabits|Gigabits|Terabits,
Percent|Count|
Bytes/Second|Kilobytes/Second|Megabytes/Second|
Gigabytes/Second|Terabytes/Second,
Bits/Second|Kilobits/Second|Megabits/Second,
Gigabits/Second|Terabits/Second|Count/Second|None
:type description: str
:param description: Description of MetricAlarm
:type dimensions: dict
:param dimensions: A dictionary of dimension key/values where
the key is the dimension name and the value
is either a scalar value or an iterator
of values to be associated with that
dimension.
Example: {
'InstanceId': ['i-0123456', 'i-0123457'],
'LoadBalancerName': 'test-lb'
}
:type alarm_actions: list of strs
:param alarm_actions: A list of the ARNs of the actions to take in
ALARM state
:type insufficient_data_actions: list of strs
:param insufficient_data_actions: A list of the ARNs of the actions to
take in INSUFFICIENT_DATA state
:type ok_actions: list of strs
:param ok_actions: A list of the ARNs of the actions to take in OK state
"""
self.name = name
self.connection = connection
self.metric = metric
self.namespace = namespace
self.statistic = statistic
if threshold is not None:
self.threshold = float(threshold)
else:
self.threshold = None
self.comparison = self._cmp_map.get(comparison)
if period is not None:
self.period = int(period)
else:
self.period = None
if evaluation_periods is not None:
self.evaluation_periods = int(evaluation_periods)
else:
self.evaluation_periods = None
self.actions_enabled = None
self.alarm_arn = None
self.last_updated = None
self.description = description
self.dimensions = dimensions
self.state_reason = None
self.state_value = None
self.unit = unit
self.alarm_actions = alarm_actions
self.insufficient_data_actions = insufficient_data_actions
self.ok_actions = ok_actions
def __repr__(self):
return 'MetricAlarm:%s[%s(%s) %s %s]' % (self.name, self.metric,
self.statistic,
self.comparison,
self.threshold)
def startElement(self, name, attrs, connection):
if name == 'AlarmActions':
self.alarm_actions = ListElement()
return self.alarm_actions
elif name == 'InsufficientDataActions':
self.insufficient_data_actions = ListElement()
return self.insufficient_data_actions
elif name == 'OKActions':
self.ok_actions = ListElement()
return self.ok_actions
elif name == 'Dimensions':
self.dimensions = Dimension()
return self.dimensions
else:
pass
def endElement(self, name, value, connection):
if name == 'ActionsEnabled':
self.actions_enabled = value
elif name == 'AlarmArn':
self.alarm_arn = value
elif name == 'AlarmConfigurationUpdatedTimestamp':
self.last_updated = value
elif name == 'AlarmDescription':
self.description = value
elif name == 'AlarmName':
self.name = value
elif name == 'ComparisonOperator':
setattr(self, 'comparison', self._rev_cmp_map[value])
elif name == 'EvaluationPeriods':
self.evaluation_periods = int(value)
elif name == 'MetricName':
self.metric = value
elif name == 'Namespace':
self.namespace = value
elif name == 'Period':
self.period = int(value)
elif name == 'StateReason':
self.state_reason = value
elif name == 'StateValue':
self.state_value = value
elif name == 'Statistic':
self.statistic = value
elif name == 'Threshold':
self.threshold = float(value)
elif name == 'Unit':
self.unit = value
else:
setattr(self, name, value)
def set_state(self, value, reason, data=None):
""" Temporarily sets the state of an alarm.
:type value: str
:param value: OK | ALARM | INSUFFICIENT_DATA
:type reason: str
:param reason: Reason alarm set (human readable).
:type data: str
:param data: Reason data (will be jsonified).
"""
return self.connection.set_alarm_state(self.name, reason, value, data)
def update(self):
return self.connection.update_alarm(self)
def enable_actions(self):
return self.connection.enable_alarm_actions([self.name])
def disable_actions(self):
return self.connection.disable_alarm_actions([self.name])
def describe_history(self, start_date=None, end_date=None, max_records=None,
history_item_type=None, next_token=None):
return self.connection.describe_alarm_history(self.name, start_date,
end_date, max_records,
history_item_type,
next_token)
def add_alarm_action(self, action_arn=None):
"""
Adds an alarm action, represented as an SNS topic, to this alarm.
What do do when alarm is triggered.
:type action_arn: str
:param action_arn: SNS topics to which notification should be
sent if the alarm goes to state ALARM.
"""
if not action_arn:
return # Raise exception instead?
self.actions_enabled = 'true'
self.alarm_actions.append(action_arn)
def add_insufficient_data_action(self, action_arn=None):
"""
Adds an insufficient_data action, represented as an SNS topic, to
this alarm. What to do when the insufficient_data state is reached.
:type action_arn: str
:param action_arn: SNS topics to which notification should be
sent if the alarm goes to state INSUFFICIENT_DATA.
"""
if not action_arn:
return
self.actions_enabled = 'true'
self.insufficient_data_actions.append(action_arn)
def add_ok_action(self, action_arn=None):
"""
Adds an ok action, represented as an SNS topic, to this alarm. What
to do when the ok state is reached.
:type action_arn: str
:param action_arn: SNS topics to which notification should be
sent if the alarm goes to state INSUFFICIENT_DATA.
"""
if not action_arn:
return
self.actions_enabled = 'true'
self.ok_actions.append(action_arn)
def delete(self):
self.connection.delete_alarms([self.name])
class AlarmHistoryItem(object):
def __init__(self, connection=None):
self.connection = connection
def __repr__(self):
return 'AlarmHistory:%s[%s at %s]' % (self.name, self.summary, self.timestamp)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'AlarmName':
self.name = value
elif name == 'HistoryData':
self.data = json.loads(value)
elif name == 'HistoryItemType':
self.tem_type = value
elif name == 'HistorySummary':
self.summary = value
elif name == 'Timestamp':
try:
self.timestamp = datetime.strptime(value,
'%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/cloudwatch/alarm.py
| 0.889415 | 0.301806 |
alarm.py
|
|
from boto.compat import json, map, six, zip
from boto.connection import AWSQueryConnection
from boto.ec2.cloudwatch.metric import Metric
from boto.ec2.cloudwatch.alarm import MetricAlarm, MetricAlarms, AlarmHistoryItem
from boto.ec2.cloudwatch.datapoint import Datapoint
from boto.regioninfo import RegionInfo, get_regions, load_regions
from boto.regioninfo import connect
import boto
RegionData = load_regions().get('cloudwatch', {})
def regions():
"""
Get all available regions for the CloudWatch service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
return get_regions('cloudwatch', connection_cls=CloudWatchConnection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ec2.cloudwatch.CloudWatchConnection`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.ec2.CloudWatchConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
return connect('cloudwatch', region_name,
connection_cls=CloudWatchConnection, **kw_params)
class CloudWatchConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'cloudwatch_version', '2010-08-01')
DefaultRegionName = boto.config.get('Boto', 'cloudwatch_region_name',
'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto',
'cloudwatch_region_endpoint',
'monitoring.us-east-1.amazonaws.com')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True, profile_name=None):
"""
Init method to create a new connection to EC2 Monitoring Service.
B{Note:} The host argument is overridden by the host specified in the
boto configuration file.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
# Ugly hack to get around both a bug in Python and a
# misconfigured SSL cert for the eu-west-1 endpoint
if self.region.name == 'eu-west-1':
validate_certs = False
super(CloudWatchConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def build_dimension_param(self, dimension, params):
prefix = 'Dimensions.member'
i = 0
for dim_name in dimension:
dim_value = dimension[dim_name]
if dim_value:
if isinstance(dim_value, six.string_types):
dim_value = [dim_value]
for value in dim_value:
params['%s.%d.Name' % (prefix, i + 1)] = dim_name
params['%s.%d.Value' % (prefix, i + 1)] = value
i += 1
else:
params['%s.%d.Name' % (prefix, i + 1)] = dim_name
i += 1
def build_list_params(self, params, items, label):
if isinstance(items, six.string_types):
items = [items]
for index, item in enumerate(items):
i = index + 1
if isinstance(item, dict):
for k, v in six.iteritems(item):
params[label % (i, 'Name')] = k
if v is not None:
params[label % (i, 'Value')] = v
else:
params[label % i] = item
def build_put_params(self, params, name, value=None, timestamp=None,
unit=None, dimensions=None, statistics=None):
args = (name, value, unit, dimensions, statistics, timestamp)
length = max(map(lambda a: len(a) if isinstance(a, list) else 1, args))
def aslist(a):
if isinstance(a, list):
if len(a) != length:
raise Exception('Must specify equal number of elements; expected %d.' % length)
return a
return [a] * length
for index, (n, v, u, d, s, t) in enumerate(zip(*map(aslist, args))):
metric_data = {'MetricName': n}
if timestamp:
metric_data['Timestamp'] = t.isoformat()
if unit:
metric_data['Unit'] = u
if dimensions:
self.build_dimension_param(d, metric_data)
if statistics:
metric_data['StatisticValues.Maximum'] = s['maximum']
metric_data['StatisticValues.Minimum'] = s['minimum']
metric_data['StatisticValues.SampleCount'] = s['samplecount']
metric_data['StatisticValues.Sum'] = s['sum']
if value is not None:
msg = 'You supplied a value and statistics for a ' + \
'metric.Posting statistics and not value.'
boto.log.warn(msg)
elif value is not None:
metric_data['Value'] = v
else:
raise Exception('Must specify a value or statistics to put.')
for key, val in six.iteritems(metric_data):
params['MetricData.member.%d.%s' % (index + 1, key)] = val
def get_metric_statistics(self, period, start_time, end_time, metric_name,
namespace, statistics, dimensions=None,
unit=None):
"""
Get time-series data for one or more statistics of a given metric.
:type period: integer
:param period: The granularity, in seconds, of the returned datapoints.
Period must be at least 60 seconds and must be a multiple
of 60. The default value is 60.
:type start_time: datetime
:param start_time: The time stamp to use for determining the
first datapoint to return. The value specified is
inclusive; results include datapoints with the time stamp
specified.
:type end_time: datetime
:param end_time: The time stamp to use for determining the
last datapoint to return. The value specified is
exclusive; results will include datapoints up to the time
stamp specified.
:type metric_name: string
:param metric_name: The metric name.
:type namespace: string
:param namespace: The metric's namespace.
:type statistics: list
:param statistics: A list of statistics names Valid values:
Average | Sum | SampleCount | Maximum | Minimum
:type dimensions: dict
:param dimensions: A dictionary of dimension key/values where
the key is the dimension name and the value
is either a scalar value or an iterator
of values to be associated with that
dimension.
:type unit: string
:param unit: The unit for the metric. Value values are:
Seconds | Microseconds | Milliseconds | Bytes | Kilobytes |
Megabytes | Gigabytes | Terabytes | Bits | Kilobits |
Megabits | Gigabits | Terabits | Percent | Count |
Bytes/Second | Kilobytes/Second | Megabytes/Second |
Gigabytes/Second | Terabytes/Second | Bits/Second |
Kilobits/Second | Megabits/Second | Gigabits/Second |
Terabits/Second | Count/Second | None
:rtype: list
"""
params = {'Period': period,
'MetricName': metric_name,
'Namespace': namespace,
'StartTime': start_time.isoformat(),
'EndTime': end_time.isoformat()}
self.build_list_params(params, statistics, 'Statistics.member.%d')
if dimensions:
self.build_dimension_param(dimensions, params)
if unit:
params['Unit'] = unit
return self.get_list('GetMetricStatistics', params,
[('member', Datapoint)])
def list_metrics(self, next_token=None, dimensions=None,
metric_name=None, namespace=None):
"""
Returns a list of the valid metrics for which there is recorded
data available.
:type next_token: str
:param next_token: A maximum of 500 metrics will be returned
at one time. If more results are available, the ResultSet
returned will contain a non-Null next_token attribute.
Passing that token as a parameter to list_metrics will
retrieve the next page of metrics.
:type dimensions: dict
:param dimensions: A dictionary containing name/value
pairs that will be used to filter the results. The key in
the dictionary is the name of a Dimension. The value in
the dictionary is either a scalar value of that Dimension
name that you want to filter on or None if you want all
metrics with that Dimension name. To be included in the
result a metric must contain all specified dimensions,
although the metric may contain additional dimensions beyond
the requested metrics. The Dimension names, and values must
be strings between 1 and 250 characters long. A maximum of
10 dimensions are allowed.
:type metric_name: str
:param metric_name: The name of the Metric to filter against. If None,
all Metric names will be returned.
:type namespace: str
:param namespace: A Metric namespace to filter against (e.g. AWS/EC2).
If None, Metrics from all namespaces will be returned.
"""
params = {}
if next_token:
params['NextToken'] = next_token
if dimensions:
self.build_dimension_param(dimensions, params)
if metric_name:
params['MetricName'] = metric_name
if namespace:
params['Namespace'] = namespace
return self.get_list('ListMetrics', params, [('member', Metric)])
def put_metric_data(self, namespace, name, value=None, timestamp=None,
unit=None, dimensions=None, statistics=None):
"""
Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch
associates the data points with the specified metric. If the specified
metric does not exist, Amazon CloudWatch creates the metric. If a list
is specified for some, but not all, of the arguments, the remaining
arguments are repeated a corresponding number of times.
:type namespace: str
:param namespace: The namespace of the metric.
:type name: str or list
:param name: The name of the metric.
:type value: float or list
:param value: The value for the metric.
:type timestamp: datetime or list
:param timestamp: The time stamp used for the metric. If not specified,
the default value is set to the time the metric data was received.
:type unit: string or list
:param unit: The unit of the metric. Valid Values: Seconds |
Microseconds | Milliseconds | Bytes | Kilobytes |
Megabytes | Gigabytes | Terabytes | Bits | Kilobits |
Megabits | Gigabits | Terabits | Percent | Count |
Bytes/Second | Kilobytes/Second | Megabytes/Second |
Gigabytes/Second | Terabytes/Second | Bits/Second |
Kilobits/Second | Megabits/Second | Gigabits/Second |
Terabits/Second | Count/Second | None
:type dimensions: dict
:param dimensions: Add extra name value pairs to associate
with the metric, i.e.:
{'name1': value1, 'name2': (value2, value3)}
:type statistics: dict or list
:param statistics: Use a statistic set instead of a value, for example::
{'maximum': 30, 'minimum': 1, 'samplecount': 100, 'sum': 10000}
"""
params = {'Namespace': namespace}
self.build_put_params(params, name, value=value, timestamp=timestamp,
unit=unit, dimensions=dimensions, statistics=statistics)
return self.get_status('PutMetricData', params, verb="POST")
def describe_alarms(self, action_prefix=None, alarm_name_prefix=None,
alarm_names=None, max_records=None, state_value=None,
next_token=None):
"""
Retrieves alarms with the specified names. If no name is specified, all
alarms for the user are returned. Alarms can be retrieved by using only
a prefix for the alarm name, the alarm state, or a prefix for any
action.
:type action_prefix: string
:param action_prefix: The action name prefix.
:type alarm_name_prefix: string
:param alarm_name_prefix: The alarm name prefix. AlarmNames cannot
be specified if this parameter is specified.
:type alarm_names: list
:param alarm_names: A list of alarm names to retrieve information for.
:type max_records: int
:param max_records: The maximum number of alarm descriptions
to retrieve.
:type state_value: string
:param state_value: The state value to be used in matching alarms.
:type next_token: string
:param next_token: The token returned by a previous call to
indicate that there is more data.
:rtype list
"""
params = {}
if action_prefix:
params['ActionPrefix'] = action_prefix
if alarm_name_prefix:
params['AlarmNamePrefix'] = alarm_name_prefix
elif alarm_names:
self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
if state_value:
params['StateValue'] = state_value
result = self.get_list('DescribeAlarms', params,
[('MetricAlarms', MetricAlarms)])
ret = result[0]
ret.next_token = result.next_token
return ret
def describe_alarm_history(self, alarm_name=None,
start_date=None, end_date=None,
max_records=None, history_item_type=None,
next_token=None):
"""
Retrieves history for the specified alarm. Filter alarms by date range
or item type. If an alarm name is not specified, Amazon CloudWatch
returns histories for all of the owner's alarms.
Amazon CloudWatch retains the history of deleted alarms for a period of
six weeks. If an alarm has been deleted, its history can still be
queried.
:type alarm_name: string
:param alarm_name: The name of the alarm.
:type start_date: datetime
:param start_date: The starting date to retrieve alarm history.
:type end_date: datetime
:param end_date: The starting date to retrieve alarm history.
:type history_item_type: string
:param history_item_type: The type of alarm histories to retreive
(ConfigurationUpdate | StateUpdate | Action)
:type max_records: int
:param max_records: The maximum number of alarm descriptions
to retrieve.
:type next_token: string
:param next_token: The token returned by a previous call to indicate
that there is more data.
:rtype list
"""
params = {}
if alarm_name:
params['AlarmName'] = alarm_name
if start_date:
params['StartDate'] = start_date.isoformat()
if end_date:
params['EndDate'] = end_date.isoformat()
if history_item_type:
params['HistoryItemType'] = history_item_type
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeAlarmHistory', params,
[('member', AlarmHistoryItem)])
def describe_alarms_for_metric(self, metric_name, namespace, period=None,
statistic=None, dimensions=None, unit=None):
"""
Retrieves all alarms for a single metric. Specify a statistic, period,
or unit to filter the set of alarms further.
:type metric_name: string
:param metric_name: The name of the metric.
:type namespace: string
:param namespace: The namespace of the metric.
:type period: int
:param period: The period in seconds over which the statistic
is applied.
:type statistic: string
:param statistic: The statistic for the metric.
:type dimensions: dict
:param dimensions: A dictionary containing name/value
pairs that will be used to filter the results. The key in
the dictionary is the name of a Dimension. The value in
the dictionary is either a scalar value of that Dimension
name that you want to filter on, a list of values to
filter on or None if you want all metrics with that
Dimension name.
:type unit: string
:rtype list
"""
params = {'MetricName': metric_name,
'Namespace': namespace}
if period:
params['Period'] = period
if statistic:
params['Statistic'] = statistic
if dimensions:
self.build_dimension_param(dimensions, params)
if unit:
params['Unit'] = unit
return self.get_list('DescribeAlarmsForMetric', params,
[('member', MetricAlarm)])
def put_metric_alarm(self, alarm):
"""
Creates or updates an alarm and associates it with the specified Amazon
CloudWatch metric. Optionally, this operation can associate one or more
Amazon Simple Notification Service resources with the alarm.
When this operation creates an alarm, the alarm state is immediately
set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is
set appropriately. Any actions associated with the StateValue is then
executed.
When updating an existing alarm, its StateValue is left unchanged.
:type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm
:param alarm: MetricAlarm object.
"""
params = {
'AlarmName': alarm.name,
'MetricName': alarm.metric,
'Namespace': alarm.namespace,
'Statistic': alarm.statistic,
'ComparisonOperator': alarm.comparison,
'Threshold': alarm.threshold,
'EvaluationPeriods': alarm.evaluation_periods,
'Period': alarm.period,
}
if alarm.actions_enabled is not None:
params['ActionsEnabled'] = alarm.actions_enabled
if alarm.alarm_actions:
self.build_list_params(params, alarm.alarm_actions,
'AlarmActions.member.%s')
if alarm.description:
params['AlarmDescription'] = alarm.description
if alarm.dimensions:
self.build_dimension_param(alarm.dimensions, params)
if alarm.insufficient_data_actions:
self.build_list_params(params, alarm.insufficient_data_actions,
'InsufficientDataActions.member.%s')
if alarm.ok_actions:
self.build_list_params(params, alarm.ok_actions,
'OKActions.member.%s')
if alarm.unit:
params['Unit'] = alarm.unit
alarm.connection = self
return self.get_status('PutMetricAlarm', params)
create_alarm = put_metric_alarm
update_alarm = put_metric_alarm
def delete_alarms(self, alarms):
"""
Deletes all specified alarms. In the event of an error, no
alarms are deleted.
:type alarms: list
:param alarms: List of alarm names.
"""
params = {}
self.build_list_params(params, alarms, 'AlarmNames.member.%s')
return self.get_status('DeleteAlarms', params)
def set_alarm_state(self, alarm_name, state_reason, state_value,
state_reason_data=None):
"""
Temporarily sets the state of an alarm. When the updated StateValue
differs from the previous value, the action configured for the
appropriate state is invoked. This is not a permanent change. The next
periodic alarm check (in about a minute) will set the alarm to its
actual state.
:type alarm_name: string
:param alarm_name: Descriptive name for alarm.
:type state_reason: string
:param state_reason: Human readable reason.
:type state_value: string
:param state_value: OK | ALARM | INSUFFICIENT_DATA
:type state_reason_data: string
:param state_reason_data: Reason string (will be jsonified).
"""
params = {'AlarmName': alarm_name,
'StateReason': state_reason,
'StateValue': state_value}
if state_reason_data:
params['StateReasonData'] = json.dumps(state_reason_data)
return self.get_status('SetAlarmState', params)
def enable_alarm_actions(self, alarm_names):
"""
Enables actions for the specified alarms.
:type alarms: list
:param alarms: List of alarm names.
"""
params = {}
self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
return self.get_status('EnableAlarmActions', params)
def disable_alarm_actions(self, alarm_names):
"""
Disables actions for the specified alarms.
:type alarms: list
:param alarms: List of alarm names.
"""
params = {}
self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
return self.get_status('DisableAlarmActions', params)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/cloudwatch/__init__.py
| 0.805861 | 0.212927 |
__init__.py
|
|
from boto.ec2.cloudwatch.alarm import MetricAlarm
from boto.ec2.cloudwatch.dimension import Dimension
class Metric(object):
Statistics = ['Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount']
Units = ['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes',
'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits',
'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count',
'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second',
'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second',
'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second',
'Terabits/Second', 'Count/Second', None]
def __init__(self, connection=None):
self.connection = connection
self.name = None
self.namespace = None
self.dimensions = None
def __repr__(self):
return 'Metric:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'Dimensions':
self.dimensions = Dimension()
return self.dimensions
def endElement(self, name, value, connection):
if name == 'MetricName':
self.name = value
elif name == 'Namespace':
self.namespace = value
else:
setattr(self, name, value)
def query(self, start_time, end_time, statistics, unit=None, period=60):
"""
:type start_time: datetime
:param start_time: The time stamp to use for determining the
first datapoint to return. The value specified is
inclusive; results include datapoints with the time stamp
specified.
:type end_time: datetime
:param end_time: The time stamp to use for determining the
last datapoint to return. The value specified is
exclusive; results will include datapoints up to the time
stamp specified.
:type statistics: list
:param statistics: A list of statistics names Valid values:
Average | Sum | SampleCount | Maximum | Minimum
:type unit: string
:param unit: The unit for the metric. Value values are:
Seconds | Microseconds | Milliseconds | Bytes | Kilobytes |
Megabytes | Gigabytes | Terabytes | Bits | Kilobits |
Megabits | Gigabits | Terabits | Percent | Count |
Bytes/Second | Kilobytes/Second | Megabytes/Second |
Gigabytes/Second | Terabytes/Second | Bits/Second |
Kilobits/Second | Megabits/Second | Gigabits/Second |
Terabits/Second | Count/Second | None
:type period: integer
:param period: The granularity, in seconds, of the returned datapoints.
Period must be at least 60 seconds and must be a multiple
of 60. The default value is 60.
"""
if not isinstance(statistics, list):
statistics = [statistics]
return self.connection.get_metric_statistics(period,
start_time,
end_time,
self.name,
self.namespace,
statistics,
self.dimensions,
unit)
def create_alarm(self, name, comparison, threshold,
period, evaluation_periods,
statistic, enabled=True, description=None,
dimensions=None, alarm_actions=None, ok_actions=None,
insufficient_data_actions=None, unit=None):
"""
Creates or updates an alarm and associates it with this metric.
Optionally, this operation can associate one or more
Amazon Simple Notification Service resources with the alarm.
When this operation creates an alarm, the alarm state is immediately
set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is
set appropriately. Any actions associated with the StateValue is then
executed.
When updating an existing alarm, its StateValue is left unchanged.
:type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm
:param alarm: MetricAlarm object.
"""
if not dimensions:
dimensions = self.dimensions
alarm = MetricAlarm(self.connection, name, self.name,
self.namespace, statistic, comparison,
threshold, period, evaluation_periods,
unit, description, dimensions,
alarm_actions, insufficient_data_actions,
ok_actions)
if self.connection.put_metric_alarm(alarm):
return alarm
def describe_alarms(self, period=None, statistic=None,
dimensions=None, unit=None):
"""
Retrieves all alarms for this metric. Specify a statistic, period,
or unit to filter the set of alarms further.
:type period: int
:param period: The period in seconds over which the statistic
is applied.
:type statistic: string
:param statistic: The statistic for the metric.
:type dimensions: dict
:param dimension: A dictionary containing name/value
pairs that will be used to filter the results. The key in
the dictionary is the name of a Dimension. The value in
the dictionary is either a scalar value of that Dimension
name that you want to filter on, a list of values to
filter on or None if you want all metrics with that
Dimension name.
:type unit: string
:rtype list
"""
return self.connection.describe_alarms_for_metric(self.name,
self.namespace,
period,
statistic,
dimensions,
unit)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/cloudwatch/metric.py
| 0.891846 | 0.358465 |
metric.py
|
|
class HealthCheck(object):
"""
Represents an EC2 Access Point Health Check. See
:ref:`elb-configuring-a-health-check` for a walkthrough on configuring
load balancer health checks.
"""
def __init__(self, access_point=None, interval=30, target=None,
healthy_threshold=3, timeout=5, unhealthy_threshold=5):
"""
:ivar str access_point: The name of the load balancer this
health check is associated with.
:ivar int interval: Specifies how many seconds there are between
health checks.
:ivar str target: Determines what to check on an instance. See the
Amazon HealthCheck_ documentation for possible Target values.
.. _HealthCheck: http://docs.amazonwebservices.com/ElasticLoadBalancing/latest/APIReference/API_HealthCheck.html
"""
self.access_point = access_point
self.interval = interval
self.target = target
self.healthy_threshold = healthy_threshold
self.timeout = timeout
self.unhealthy_threshold = unhealthy_threshold
def __repr__(self):
return 'HealthCheck:%s' % self.target
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Interval':
self.interval = int(value)
elif name == 'Target':
self.target = value
elif name == 'HealthyThreshold':
self.healthy_threshold = int(value)
elif name == 'Timeout':
self.timeout = int(value)
elif name == 'UnhealthyThreshold':
self.unhealthy_threshold = int(value)
else:
setattr(self, name, value)
def update(self):
"""
In the case where you have accessed an existing health check on a
load balancer, this method applies this instance's health check
values to the load balancer it is attached to.
.. note:: This method will not do anything if the :py:attr:`access_point`
attribute isn't set, as is the case with a newly instantiated
HealthCheck instance.
"""
if not self.access_point:
return
new_hc = self.connection.configure_health_check(self.access_point,
self)
self.interval = new_hc.interval
self.target = new_hc.target
self.healthy_threshold = new_hc.healthy_threshold
self.unhealthy_threshold = new_hc.unhealthy_threshold
self.timeout = new_hc.timeout
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/elb/healthcheck.py
| 0.915668 | 0.378517 |
healthcheck.py
|
|
from boto.ec2.elb.listelement import ListElement
class Listener(object):
"""
Represents an EC2 Load Balancer Listener tuple
"""
def __init__(self, load_balancer=None, load_balancer_port=0,
instance_port=0, protocol='', ssl_certificate_id=None, instance_protocol=None):
self.load_balancer = load_balancer
self.load_balancer_port = load_balancer_port
self.instance_port = instance_port
self.protocol = protocol
self.instance_protocol = instance_protocol
self.ssl_certificate_id = ssl_certificate_id
self.policy_names = ListElement()
def __repr__(self):
r = "(%d, %d, '%s'" % (self.load_balancer_port, self.instance_port, self.protocol)
if self.instance_protocol:
r += ", '%s'" % self.instance_protocol
if self.ssl_certificate_id:
r += ', %s' % (self.ssl_certificate_id)
r += ')'
return r
def startElement(self, name, attrs, connection):
if name == 'PolicyNames':
return self.policy_names
return None
def endElement(self, name, value, connection):
if name == 'LoadBalancerPort':
self.load_balancer_port = int(value)
elif name == 'InstancePort':
self.instance_port = int(value)
elif name == 'InstanceProtocol':
self.instance_protocol = value
elif name == 'Protocol':
self.protocol = value
elif name == 'SSLCertificateId':
self.ssl_certificate_id = value
else:
setattr(self, name, value)
def get_tuple(self):
return self.load_balancer_port, self.instance_port, self.protocol
def get_complex_tuple(self):
return self.load_balancer_port, self.instance_port, self.protocol, self.instance_protocol
def __getitem__(self, key):
if key == 0:
return self.load_balancer_port
if key == 1:
return self.instance_port
if key == 2:
return self.protocol
if key == 3:
return self.instance_protocol
if key == 4:
return self.ssl_certificate_id
raise KeyError
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/elb/listener.py
| 0.595375 | 0.167797 |
listener.py
|
|
from boto.ec2.elb.healthcheck import HealthCheck
from boto.ec2.elb.listener import Listener
from boto.ec2.elb.listelement import ListElement
from boto.ec2.elb.policies import Policies, OtherPolicy
from boto.ec2.elb.securitygroup import SecurityGroup
from boto.ec2.instanceinfo import InstanceInfo
from boto.resultset import ResultSet
from boto.compat import six
class Backend(object):
"""Backend server description"""
def __init__(self, connection=None):
self.connection = connection
self.instance_port = None
self.policies = None
def __repr__(self):
return 'Backend(%r:%r)' % (self.instance_port, self.policies)
def startElement(self, name, attrs, connection):
if name == 'PolicyNames':
self.policies = ResultSet([('member', OtherPolicy)])
return self.policies
def endElement(self, name, value, connection):
if name == 'InstancePort':
self.instance_port = int(value)
return
class LoadBalancerZones(object):
"""
Used to collect the zones for a Load Balancer when enable_zones
or disable_zones are called.
"""
def __init__(self, connection=None):
self.connection = connection
self.zones = ListElement()
def startElement(self, name, attrs, connection):
if name == 'AvailabilityZones':
return self.zones
def endElement(self, name, value, connection):
pass
class LoadBalancer(object):
"""
Represents an EC2 Load Balancer.
"""
def __init__(self, connection=None, name=None, endpoints=None):
"""
:ivar boto.ec2.elb.ELBConnection connection: The connection this load
balancer was instance was instantiated from.
:ivar list listeners: A list of tuples in the form of
``(<Inbound port>, <Outbound port>, <Protocol>)``
:ivar boto.ec2.elb.healthcheck.HealthCheck health_check: The health
check policy for this load balancer.
:ivar boto.ec2.elb.policies.Policies policies: Cookie stickiness and
other policies.
:ivar str name: The name of the Load Balancer.
:ivar str dns_name: The external DNS name for the balancer.
:ivar str created_time: A date+time string showing when the
load balancer was created.
:ivar list instances: A list of :py:class:`boto.ec2.instanceinfo.InstanceInfo`
instances, representing the EC2 instances this load balancer is
distributing requests to.
:ivar list availability_zones: The availability zones this balancer
covers.
:ivar str canonical_hosted_zone_name: Current CNAME for the balancer.
:ivar str canonical_hosted_zone_name_id: The Route 53 hosted zone
ID of this balancer. Needed when creating an Alias record in a
Route 53 hosted zone.
:ivar boto.ec2.elb.securitygroup.SecurityGroup source_security_group:
The security group that you can use as part of your inbound rules
for your load balancer back-end instances to disallow traffic
from sources other than your load balancer.
:ivar list subnets: A list of subnets this balancer is on.
:ivar list security_groups: A list of additional security groups that
have been applied.
:ivar str vpc_id: The ID of the VPC that this ELB resides within.
:ivar list backends: A list of :py:class:`boto.ec2.elb.loadbalancer.Backend
back-end server descriptions.
"""
self.connection = connection
self.name = name
self.listeners = None
self.health_check = None
self.policies = None
self.dns_name = None
self.created_time = None
self.instances = None
self.availability_zones = ListElement()
self.canonical_hosted_zone_name = None
self.canonical_hosted_zone_name_id = None
self.source_security_group = None
self.subnets = ListElement()
self.security_groups = ListElement()
self.vpc_id = None
self.scheme = None
self.backends = None
self._attributes = None
def __repr__(self):
return 'LoadBalancer:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'HealthCheck':
self.health_check = HealthCheck(self)
return self.health_check
elif name == 'ListenerDescriptions':
self.listeners = ResultSet([('member', Listener)])
return self.listeners
elif name == 'AvailabilityZones':
return self.availability_zones
elif name == 'Instances':
self.instances = ResultSet([('member', InstanceInfo)])
return self.instances
elif name == 'Policies':
self.policies = Policies(self)
return self.policies
elif name == 'SourceSecurityGroup':
self.source_security_group = SecurityGroup()
return self.source_security_group
elif name == 'Subnets':
return self.subnets
elif name == 'SecurityGroups':
return self.security_groups
elif name == 'VPCId':
pass
elif name == "BackendServerDescriptions":
self.backends = ResultSet([('member', Backend)])
return self.backends
else:
return None
def endElement(self, name, value, connection):
if name == 'LoadBalancerName':
self.name = value
elif name == 'DNSName':
self.dns_name = value
elif name == 'CreatedTime':
self.created_time = value
elif name == 'InstanceId':
self.instances.append(value)
elif name == 'CanonicalHostedZoneName':
self.canonical_hosted_zone_name = value
elif name == 'CanonicalHostedZoneNameID':
self.canonical_hosted_zone_name_id = value
elif name == 'VPCId':
self.vpc_id = value
elif name == 'Scheme':
self.scheme = value
else:
setattr(self, name, value)
def enable_zones(self, zones):
"""
Enable availability zones to this Access Point.
All zones must be in the same region as the Access Point.
:type zones: string or List of strings
:param zones: The name of the zone(s) to add.
"""
if isinstance(zones, six.string_types):
zones = [zones]
new_zones = self.connection.enable_availability_zones(self.name, zones)
self.availability_zones = new_zones
def disable_zones(self, zones):
"""
Disable availability zones from this Access Point.
:type zones: string or List of strings
:param zones: The name of the zone(s) to add.
"""
if isinstance(zones, six.string_types):
zones = [zones]
new_zones = self.connection.disable_availability_zones(
self.name, zones)
self.availability_zones = new_zones
def get_attributes(self, force=False):
"""
Gets the LbAttributes. The Attributes will be cached.
:type force: bool
:param force: Ignore cache value and reload.
:rtype: boto.ec2.elb.attributes.LbAttributes
:return: The LbAttribues object
"""
if not self._attributes or force:
self._attributes = self.connection.get_all_lb_attributes(self.name)
return self._attributes
def is_cross_zone_load_balancing(self, force=False):
"""
Identifies if the ELB is current configured to do CrossZone Balancing.
:type force: bool
:param force: Ignore cache value and reload.
:rtype: bool
:return: True if balancing is enabled, False if not.
"""
return self.get_attributes(force).cross_zone_load_balancing.enabled
def enable_cross_zone_load_balancing(self):
"""
Turns on CrossZone Load Balancing for this ELB.
:rtype: bool
:return: True if successful, False if not.
"""
success = self.connection.modify_lb_attribute(
self.name, 'crossZoneLoadBalancing', True)
if success and self._attributes:
self._attributes.cross_zone_load_balancing.enabled = True
return success
def disable_cross_zone_load_balancing(self):
"""
Turns off CrossZone Load Balancing for this ELB.
:rtype: bool
:return: True if successful, False if not.
"""
success = self.connection.modify_lb_attribute(
self.name, 'crossZoneLoadBalancing', False)
if success and self._attributes:
self._attributes.cross_zone_load_balancing.enabled = False
return success
def register_instances(self, instances):
"""
Adds instances to this load balancer. All instances must be in the same
region as the load balancer. Adding endpoints that are already
registered with the load balancer has no effect.
:param list instances: List of instance IDs (strings) that you'd like
to add to this load balancer.
"""
if isinstance(instances, six.string_types):
instances = [instances]
new_instances = self.connection.register_instances(self.name,
instances)
self.instances = new_instances
def deregister_instances(self, instances):
"""
Remove instances from this load balancer. Removing instances that are
not registered with the load balancer has no effect.
:param list instances: List of instance IDs (strings) that you'd like
to remove from this load balancer.
"""
if isinstance(instances, six.string_types):
instances = [instances]
new_instances = self.connection.deregister_instances(self.name,
instances)
self.instances = new_instances
def delete(self):
"""
Delete this load balancer.
"""
return self.connection.delete_load_balancer(self.name)
def configure_health_check(self, health_check):
"""
Configures the health check behavior for the instances behind this
load balancer. See :ref:`elb-configuring-a-health-check` for a
walkthrough.
:param boto.ec2.elb.healthcheck.HealthCheck health_check: A
HealthCheck instance that tells the load balancer how to check
its instances for health.
"""
return self.connection.configure_health_check(self.name, health_check)
def get_instance_health(self, instances=None):
"""
Returns a list of :py:class:`boto.ec2.elb.instancestate.InstanceState`
objects, which show the health of the instances attached to this
load balancer.
:rtype: list
:returns: A list of
:py:class:`InstanceState <boto.ec2.elb.instancestate.InstanceState>`
instances, representing the instances
attached to this load balancer.
"""
return self.connection.describe_instance_health(self.name, instances)
def create_listeners(self, listeners):
return self.connection.create_load_balancer_listeners(self.name,
listeners)
def create_listener(self, inPort, outPort=None, proto="tcp"):
if outPort is None:
outPort = inPort
return self.create_listeners([(inPort, outPort, proto)])
def delete_listeners(self, listeners):
return self.connection.delete_load_balancer_listeners(self.name,
listeners)
def delete_listener(self, inPort):
return self.delete_listeners([inPort])
def delete_policy(self, policy_name):
"""
Deletes a policy from the LoadBalancer. The specified policy must not
be enabled for any listeners.
"""
return self.connection.delete_lb_policy(self.name, policy_name)
def set_policies_of_listener(self, lb_port, policies):
return self.connection.set_lb_policies_of_listener(self.name,
lb_port,
policies)
def set_policies_of_backend_server(self, instance_port, policies):
return self.connection.set_lb_policies_of_backend_server(
self.name, instance_port, policies)
def create_cookie_stickiness_policy(self, cookie_expiration_period,
policy_name):
return self.connection.create_lb_cookie_stickiness_policy(
cookie_expiration_period, self.name, policy_name)
def create_app_cookie_stickiness_policy(self, name, policy_name):
return self.connection.create_app_cookie_stickiness_policy(name,
self.name,
policy_name)
def set_listener_SSL_certificate(self, lb_port, ssl_certificate_id):
return self.connection.set_lb_listener_SSL_certificate(
self.name, lb_port, ssl_certificate_id)
def create_lb_policy(self, policy_name, policy_type, policy_attribute):
return self.connection.create_lb_policy(
self.name, policy_name, policy_type, policy_attribute)
def attach_subnets(self, subnets):
"""
Attaches load balancer to one or more subnets.
Attaching subnets that are already registered with the
Load Balancer has no effect.
:type subnets: string or List of strings
:param subnets: The name of the subnet(s) to add.
"""
if isinstance(subnets, six.string_types):
subnets = [subnets]
new_subnets = self.connection.attach_lb_to_subnets(self.name, subnets)
self.subnets = new_subnets
def detach_subnets(self, subnets):
"""
Detaches load balancer from one or more subnets.
:type subnets: string or List of strings
:param subnets: The name of the subnet(s) to detach.
"""
if isinstance(subnets, six.string_types):
subnets = [subnets]
new_subnets = self.connection.detach_lb_from_subnets(
self.name, subnets)
self.subnets = new_subnets
def apply_security_groups(self, security_groups):
"""
Associates one or more security groups with the load balancer.
The provided security groups will override any currently applied
security groups.
:type security_groups: string or List of strings
:param security_groups: The name of the security group(s) to add.
"""
if isinstance(security_groups, six.string_types):
security_groups = [security_groups]
new_sgs = self.connection.apply_security_groups_to_lb(
self.name, security_groups)
self.security_groups = new_sgs
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/elb/loadbalancer.py
| 0.710427 | 0.195268 |
loadbalancer.py
|
|
from boto.connection import AWSQueryConnection
from boto.ec2.instanceinfo import InstanceInfo
from boto.ec2.elb.loadbalancer import LoadBalancer, LoadBalancerZones
from boto.ec2.elb.instancestate import InstanceState
from boto.ec2.elb.healthcheck import HealthCheck
from boto.regioninfo import RegionInfo, get_regions, load_regions
from boto.regioninfo import connect
import boto
from boto.compat import six
RegionData = load_regions().get('elasticloadbalancing', {})
def regions():
"""
Get all available regions for the ELB service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
return get_regions('elasticloadbalancing', connection_cls=ELBConnection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ec2.elb.ELBConnection`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.ec2.ELBConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
return connect('elasticloadbalancing', region_name,
connection_cls=ELBConnection, **kw_params)
class ELBConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'elb_version', '2012-06-01')
DefaultRegionName = boto.config.get('Boto', 'elb_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get(
'Boto', 'elb_region_endpoint',
'elasticloadbalancing.us-east-1.amazonaws.com')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True, profile_name=None):
"""
Init method to create a new connection to EC2 Load Balancing Service.
.. note:: The region argument is overridden by the region specified in
the boto configuration file.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(ELBConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def build_list_params(self, params, items, label):
if isinstance(items, six.string_types):
items = [items]
for index, item in enumerate(items):
params[label % (index + 1)] = item
def get_all_load_balancers(self, load_balancer_names=None, marker=None):
"""
Retrieve all load balancers associated with your account.
:type load_balancer_names: list
:keyword load_balancer_names: An optional list of load balancer names.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:rtype: :py:class:`boto.resultset.ResultSet`
:return: A ResultSet containing instances of
:class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
params = {}
if load_balancer_names:
self.build_list_params(params, load_balancer_names,
'LoadBalancerNames.member.%d')
if marker:
params['Marker'] = marker
return self.get_list('DescribeLoadBalancers', params,
[('member', LoadBalancer)])
def create_load_balancer(self, name, zones, listeners=None, subnets=None,
security_groups=None, scheme='internet-facing',
complex_listeners=None):
"""
Create a new load balancer for your account. By default the load
balancer will be created in EC2. To create a load balancer inside a
VPC, parameter zones must be set to None and subnets must not be None.
The load balancer will be automatically created under the VPC that
contains the subnet(s) specified.
:type name: string
:param name: The mnemonic name associated with the new load balancer
:type zones: List of strings
:param zones: The names of the availability zone(s) to add.
:type listeners: List of tuples
:param listeners: Each tuple contains three or four values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
[SSLCertificateId]) where LoadBalancerPortNumber and
InstancePortNumber are integer values between 1 and 65535,
Protocol is a string containing either 'TCP', 'SSL', HTTP', or
'HTTPS'; SSLCertificateID is the ARN of a AWS IAM
certificate, and must be specified when doing HTTPS.
:type subnets: list of strings
:param subnets: A list of subnet IDs in your VPC to attach to
your LoadBalancer.
:type security_groups: list of strings
:param security_groups: The security groups assigned to your
LoadBalancer within your VPC.
:type scheme: string
:param scheme: The type of a LoadBalancer. By default, Elastic
Load Balancing creates an internet-facing LoadBalancer with
a publicly resolvable DNS name, which resolves to public IP
addresses.
Specify the value internal for this option to create an
internal LoadBalancer with a DNS name that resolves to
private IP addresses.
This option is only available for LoadBalancers attached
to an Amazon VPC.
:type complex_listeners: List of tuples
:param complex_listeners: Each tuple contains four or five values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
InstanceProtocol, SSLCertificateId).
Where:
- LoadBalancerPortNumber and InstancePortNumber are integer
values between 1 and 65535
- Protocol and InstanceProtocol is a string containing
either 'TCP',
'SSL', 'HTTP', or 'HTTPS'
- SSLCertificateId is the ARN of an SSL certificate loaded into
AWS IAM
:rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
:return: The newly created
:class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
if not listeners and not complex_listeners:
# Must specify one of the two options
return None
params = {'LoadBalancerName': name,
'Scheme': scheme}
# Handle legacy listeners
if listeners:
for index, listener in enumerate(listeners):
i = index + 1
protocol = listener[2].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[3]
# Handle the full listeners
if complex_listeners:
for index, listener in enumerate(complex_listeners):
i = index + 1
protocol = listener[2].upper()
InstanceProtocol = listener[3].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
params['Listeners.member.%d.InstanceProtocol' % i] = listener[3]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[4]
if zones:
self.build_list_params(params, zones, 'AvailabilityZones.member.%d')
if subnets:
self.build_list_params(params, subnets, 'Subnets.member.%d')
if security_groups:
self.build_list_params(params, security_groups,
'SecurityGroups.member.%d')
load_balancer = self.get_object('CreateLoadBalancer',
params, LoadBalancer)
load_balancer.name = name
load_balancer.listeners = listeners
load_balancer.availability_zones = zones
load_balancer.subnets = subnets
load_balancer.security_groups = security_groups
return load_balancer
def create_load_balancer_listeners(self, name, listeners=None,
complex_listeners=None):
"""
Creates a Listener (or group of listeners) for an existing
Load Balancer
:type name: string
:param name: The name of the load balancer to create the listeners for
:type listeners: List of tuples
:param listeners: Each tuple contains three or four values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
[SSLCertificateId]) where LoadBalancerPortNumber and
InstancePortNumber are integer values between 1 and 65535,
Protocol is a string containing either 'TCP', 'SSL', HTTP', or
'HTTPS'; SSLCertificateID is the ARN of a AWS IAM
certificate, and must be specified when doing HTTPS.
:type complex_listeners: List of tuples
:param complex_listeners: Each tuple contains four or five values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
InstanceProtocol, SSLCertificateId).
Where:
- LoadBalancerPortNumber and InstancePortNumber are integer
values between 1 and 65535
- Protocol and InstanceProtocol is a string containing
either 'TCP',
'SSL', 'HTTP', or 'HTTPS'
- SSLCertificateId is the ARN of an SSL certificate loaded into
AWS IAM
:return: The status of the request
"""
if not listeners and not complex_listeners:
# Must specify one of the two options
return None
params = {'LoadBalancerName': name}
# Handle the simple listeners
if listeners:
for index, listener in enumerate(listeners):
i = index + 1
protocol = listener[2].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[3]
# Handle the full listeners
if complex_listeners:
for index, listener in enumerate(complex_listeners):
i = index + 1
protocol = listener[2].upper()
InstanceProtocol = listener[3].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
params['Listeners.member.%d.InstanceProtocol' % i] = listener[3]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[4]
return self.get_status('CreateLoadBalancerListeners', params)
def delete_load_balancer(self, name):
"""
Delete a Load Balancer from your account.
:type name: string
:param name: The name of the Load Balancer to delete
"""
params = {'LoadBalancerName': name}
return self.get_status('DeleteLoadBalancer', params)
def delete_load_balancer_listeners(self, name, ports):
"""
Deletes a load balancer listener (or group of listeners)
:type name: string
:param name: The name of the load balancer to create the listeners for
:type ports: List int
:param ports: Each int represents the port on the ELB to be removed
:return: The status of the request
"""
params = {'LoadBalancerName': name}
for index, port in enumerate(ports):
params['LoadBalancerPorts.member.%d' % (index + 1)] = port
return self.get_status('DeleteLoadBalancerListeners', params)
def enable_availability_zones(self, load_balancer_name, zones_to_add):
"""
Add availability zones to an existing Load Balancer
All zones must be in the same region as the Load Balancer
Adding zones that are already registered with the Load Balancer
has no effect.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type zones: List of strings
:param zones: The name of the zone(s) to add.
:rtype: List of strings
:return: An updated list of zones for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, zones_to_add,
'AvailabilityZones.member.%d')
obj = self.get_object('EnableAvailabilityZonesForLoadBalancer',
params, LoadBalancerZones)
return obj.zones
def disable_availability_zones(self, load_balancer_name, zones_to_remove):
"""
Remove availability zones from an existing Load Balancer.
All zones must be in the same region as the Load Balancer.
Removing zones that are not registered with the Load Balancer
has no effect.
You cannot remove all zones from an Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type zones: List of strings
:param zones: The name of the zone(s) to remove.
:rtype: List of strings
:return: An updated list of zones for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, zones_to_remove,
'AvailabilityZones.member.%d')
obj = self.get_object('DisableAvailabilityZonesForLoadBalancer',
params, LoadBalancerZones)
return obj.zones
def modify_lb_attribute(self, load_balancer_name, attribute, value):
"""Changes an attribute of a Load Balancer
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type attribute: string
:param attribute: The attribute you wish to change.
* crossZoneLoadBalancing - Boolean (true)
* connectingSettings - :py:class:`ConnectionSettingAttribute` instance
* accessLog - :py:class:`AccessLogAttribute` instance
* connectionDraining - :py:class:`ConnectionDrainingAttribute` instance
:type value: string
:param value: The new value for the attribute
:rtype: bool
:return: Whether the operation succeeded or not
"""
bool_reqs = ('crosszoneloadbalancing',)
if attribute.lower() in bool_reqs:
if isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
params = {'LoadBalancerName': load_balancer_name}
if attribute.lower() == 'crosszoneloadbalancing':
params['LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled'
] = value
elif attribute.lower() == 'accesslog':
params['LoadBalancerAttributes.AccessLog.Enabled'] = \
value.enabled and 'true' or 'false'
params['LoadBalancerAttributes.AccessLog.S3BucketName'] = \
value.s3_bucket_name
params['LoadBalancerAttributes.AccessLog.S3BucketPrefix'] = \
value.s3_bucket_prefix
params['LoadBalancerAttributes.AccessLog.EmitInterval'] = \
value.emit_interval
elif attribute.lower() == 'connectiondraining':
params['LoadBalancerAttributes.ConnectionDraining.Enabled'] = \
value.enabled and 'true' or 'false'
params['LoadBalancerAttributes.ConnectionDraining.Timeout'] = \
value.timeout
elif attribute.lower() == 'connectingsettings':
params['LoadBalancerAttributes.ConnectionSettings.IdleTimeout'] = \
value.idle_timeout
else:
raise ValueError('InvalidAttribute', attribute)
return self.get_status('ModifyLoadBalancerAttributes', params,
verb='GET')
def get_all_lb_attributes(self, load_balancer_name):
"""Gets all Attributes of a Load Balancer
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:rtype: boto.ec2.elb.attribute.LbAttributes
:return: The attribute object of the ELB.
"""
from boto.ec2.elb.attributes import LbAttributes
params = {'LoadBalancerName': load_balancer_name}
return self.get_object('DescribeLoadBalancerAttributes',
params, LbAttributes)
def get_lb_attribute(self, load_balancer_name, attribute):
"""Gets an attribute of a Load Balancer
This will make an EC2 call for each method call.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type attribute: string
:param attribute: The attribute you wish to see.
* accessLog - :py:class:`AccessLogAttribute` instance
* crossZoneLoadBalancing - Boolean
* connectingSettings - :py:class:`ConnectionSettingAttribute` instance
* connectionDraining - :py:class:`ConnectionDrainingAttribute`
instance
:rtype: Attribute dependent
:return: The new value for the attribute
"""
attributes = self.get_all_lb_attributes(load_balancer_name)
if attribute.lower() == 'accesslog':
return attributes.access_log
if attribute.lower() == 'crosszoneloadbalancing':
return attributes.cross_zone_load_balancing.enabled
if attribute.lower() == 'connectiondraining':
return attributes.connection_draining
if attribute.lower() == 'connectingsettings':
return attributes.connecting_settings
return None
def register_instances(self, load_balancer_name, instances):
"""
Add new Instances to an existing Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances to add.
:rtype: List of strings
:return: An updated list of instances for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('RegisterInstancesWithLoadBalancer',
params, [('member', InstanceInfo)])
def deregister_instances(self, load_balancer_name, instances):
"""
Remove Instances from an existing Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances to remove.
:rtype: List of strings
:return: An updated list of instances for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('DeregisterInstancesFromLoadBalancer',
params, [('member', InstanceInfo)])
def describe_instance_health(self, load_balancer_name, instances=None):
"""
Get current state of all Instances registered to an Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances
to return status for. If not provided,
the state of all instances will be returned.
:rtype: List of :class:`boto.ec2.elb.instancestate.InstanceState`
:return: list of state info for instances in this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
if instances:
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('DescribeInstanceHealth', params,
[('member', InstanceState)])
def configure_health_check(self, name, health_check):
"""
Define a health check for the EndPoints.
:type name: string
:param name: The mnemonic name associated with the load balancer
:type health_check: :class:`boto.ec2.elb.healthcheck.HealthCheck`
:param health_check: A HealthCheck object populated with the desired
values.
:rtype: :class:`boto.ec2.elb.healthcheck.HealthCheck`
:return: The updated :class:`boto.ec2.elb.healthcheck.HealthCheck`
"""
params = {'LoadBalancerName': name,
'HealthCheck.Timeout': health_check.timeout,
'HealthCheck.Target': health_check.target,
'HealthCheck.Interval': health_check.interval,
'HealthCheck.UnhealthyThreshold': health_check.unhealthy_threshold,
'HealthCheck.HealthyThreshold': health_check.healthy_threshold}
return self.get_object('ConfigureHealthCheck', params, HealthCheck)
def set_lb_listener_SSL_certificate(self, lb_name, lb_port,
ssl_certificate_id):
"""
Sets the certificate that terminates the specified listener's SSL
connections. The specified certificate replaces any prior certificate
that was used on the same LoadBalancer and port.
"""
params = {'LoadBalancerName': lb_name,
'LoadBalancerPort': lb_port,
'SSLCertificateId': ssl_certificate_id}
return self.get_status('SetLoadBalancerListenerSSLCertificate', params)
def create_app_cookie_stickiness_policy(self, name, lb_name, policy_name):
"""
Generates a stickiness policy with sticky session lifetimes that follow
that of an application-generated cookie. This policy can only be
associated with HTTP listeners.
This policy is similar to the policy created by
CreateLBCookieStickinessPolicy, except that the lifetime of the special
Elastic Load Balancing cookie follows the lifetime of the
application-generated cookie specified in the policy configuration. The
load balancer only inserts a new stickiness cookie when the application
response includes a new application cookie.
If the application cookie is explicitly removed or expires, the session
stops being sticky until a new application cookie is issued.
"""
params = {'CookieName': name,
'LoadBalancerName': lb_name,
'PolicyName': policy_name}
return self.get_status('CreateAppCookieStickinessPolicy', params)
def create_lb_cookie_stickiness_policy(self, cookie_expiration_period,
lb_name, policy_name):
"""
Generates a stickiness policy with sticky session lifetimes controlled
by the lifetime of the browser (user-agent) or a specified expiration
period. This policy can only be associated only with HTTP listeners.
When a load balancer implements this policy, the load balancer uses a
special cookie to track the backend server instance for each request.
When the load balancer receives a request, it first checks to see if
this cookie is present in the request. If so, the load balancer sends
the request to the application server specified in the cookie. If not,
the load balancer sends the request to a server that is chosen based on
the existing load balancing algorithm.
A cookie is inserted into the response for binding subsequent requests
from the same user to that server. The validity of the cookie is based
on the cookie expiration time, which is specified in the policy
configuration.
None may be passed for cookie_expiration_period.
"""
params = {'LoadBalancerName': lb_name,
'PolicyName': policy_name}
if cookie_expiration_period is not None:
params['CookieExpirationPeriod'] = cookie_expiration_period
return self.get_status('CreateLBCookieStickinessPolicy', params)
def create_lb_policy(self, lb_name, policy_name, policy_type,
policy_attributes):
"""
Creates a new policy that contains the necessary attributes
depending on the policy type. Policies are settings that are
saved for your load balancer and that can be applied to the
front-end listener, or the back-end application server.
"""
params = {'LoadBalancerName': lb_name,
'PolicyName': policy_name,
'PolicyTypeName': policy_type}
for index, (name, value) in enumerate(six.iteritems(policy_attributes), 1):
params['PolicyAttributes.member.%d.AttributeName' % index] = name
params['PolicyAttributes.member.%d.AttributeValue' % index] = value
else:
params['PolicyAttributes'] = ''
return self.get_status('CreateLoadBalancerPolicy', params)
def delete_lb_policy(self, lb_name, policy_name):
"""
Deletes a policy from the LoadBalancer. The specified policy must not
be enabled for any listeners.
"""
params = {'LoadBalancerName': lb_name,
'PolicyName': policy_name}
return self.get_status('DeleteLoadBalancerPolicy', params)
def set_lb_policies_of_listener(self, lb_name, lb_port, policies):
"""
Associates, updates, or disables a policy with a listener on the load
balancer. Currently only zero (0) or one (1) policy can be associated
with a listener.
"""
params = {'LoadBalancerName': lb_name,
'LoadBalancerPort': lb_port}
if len(policies):
self.build_list_params(params, policies, 'PolicyNames.member.%d')
else:
params['PolicyNames'] = ''
return self.get_status('SetLoadBalancerPoliciesOfListener', params)
def set_lb_policies_of_backend_server(self, lb_name, instance_port,
policies):
"""
Replaces the current set of policies associated with a port on which
the back-end server is listening with a new set of policies.
"""
params = {'LoadBalancerName': lb_name,
'InstancePort': instance_port}
if policies:
self.build_list_params(params, policies, 'PolicyNames.member.%d')
else:
params['PolicyNames'] = ''
return self.get_status('SetLoadBalancerPoliciesForBackendServer',
params)
def apply_security_groups_to_lb(self, name, security_groups):
"""
Associates one or more security groups with the load balancer.
The provided security groups will override any currently applied
security groups.
:type name: string
:param name: The name of the Load Balancer
:type security_groups: List of strings
:param security_groups: The name of the security group(s) to add.
:rtype: List of strings
:return: An updated list of security groups for this Load Balancer.
"""
params = {'LoadBalancerName': name}
self.build_list_params(params, security_groups,
'SecurityGroups.member.%d')
return self.get_list('ApplySecurityGroupsToLoadBalancer',
params, None)
def attach_lb_to_subnets(self, name, subnets):
"""
Attaches load balancer to one or more subnets.
Attaching subnets that are already registered with the
Load Balancer has no effect.
:type name: string
:param name: The name of the Load Balancer
:type subnets: List of strings
:param subnets: The name of the subnet(s) to add.
:rtype: List of strings
:return: An updated list of subnets for this Load Balancer.
"""
params = {'LoadBalancerName': name}
self.build_list_params(params, subnets,
'Subnets.member.%d')
return self.get_list('AttachLoadBalancerToSubnets',
params, None)
def detach_lb_from_subnets(self, name, subnets):
"""
Detaches load balancer from one or more subnets.
:type name: string
:param name: The name of the Load Balancer
:type subnets: List of strings
:param subnets: The name of the subnet(s) to detach.
:rtype: List of strings
:return: An updated list of subnets for this Load Balancer.
"""
params = {'LoadBalancerName': name}
self.build_list_params(params, subnets,
'Subnets.member.%d')
return self.get_list('DetachLoadBalancerFromSubnets',
params, None)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2/elb/__init__.py
| 0.765067 | 0.223441 |
__init__.py
|
|
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
class ElastiCacheConnection(AWSQueryConnection):
"""
Amazon ElastiCache
Amazon ElastiCache is a web service that makes it easier to set
up, operate, and scale a distributed cache in the cloud.
With ElastiCache, customers gain all of the benefits of a high-
performance, in-memory cache with far less of the administrative
burden of launching and managing a distributed cache. The service
makes set-up, scaling, and cluster failure handling much simpler
than in a self-managed cache deployment.
In addition, through integration with Amazon CloudWatch, customers
get enhanced visibility into the key performance statistics
associated with their cache and can receive alarms if a part of
their cache runs hot.
"""
APIVersion = "2013-06-15"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "elasticache.us-east-1.amazonaws.com"
def __init__(self, **kwargs):
region = kwargs.get('region')
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
else:
del kwargs['region']
kwargs['host'] = region.endpoint
super(ElastiCacheConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def authorize_cache_security_group_ingress(self,
cache_security_group_name,
ec2_security_group_name,
ec2_security_group_owner_id):
"""
The AuthorizeCacheSecurityGroupIngress operation allows
network ingress to a cache security group. Applications using
ElastiCache must be running on Amazon EC2, and Amazon EC2
security groups are used as the authorization mechanism.
You cannot authorize ingress from an Amazon EC2 security group
in one Region to an ElastiCache cluster in another Region.
:type cache_security_group_name: string
:param cache_security_group_name: The cache security group which will
allow network ingress.
:type ec2_security_group_name: string
:param ec2_security_group_name: The Amazon EC2 security group to be
authorized for ingress to the cache security group.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS account number of the
Amazon EC2 security group owner. Note that this is not the same
thing as an AWS access key ID - you must provide a valid AWS
account number for this parameter.
"""
params = {
'CacheSecurityGroupName': cache_security_group_name,
'EC2SecurityGroupName': ec2_security_group_name,
'EC2SecurityGroupOwnerId': ec2_security_group_owner_id,
}
return self._make_request(
action='AuthorizeCacheSecurityGroupIngress',
verb='POST',
path='/', params=params)
def create_cache_cluster(self, cache_cluster_id, num_cache_nodes=None,
cache_node_type=None, engine=None,
replication_group_id=None, engine_version=None,
cache_parameter_group_name=None,
cache_subnet_group_name=None,
cache_security_group_names=None,
security_group_ids=None, snapshot_arns=None,
preferred_availability_zone=None,
preferred_maintenance_window=None, port=None,
notification_topic_arn=None,
auto_minor_version_upgrade=None):
"""
The CreateCacheCluster operation creates a new cache cluster.
All nodes in the cache cluster run the same protocol-compliant
cache engine software - either Memcached or Redis.
:type cache_cluster_id: string
:param cache_cluster_id:
The cache cluster identifier. This parameter is stored as a lowercase
string.
Constraints:
+ Must contain from 1 to 20 alphanumeric characters or hyphens.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type replication_group_id: string
:param replication_group_id: The replication group to which this cache
cluster should belong. If this parameter is specified, the cache
cluster will be added to the specified replication group as a read
replica; otherwise, the cache cluster will be a standalone primary
that is not part of any replication group.
:type num_cache_nodes: integer
:param num_cache_nodes: The initial number of cache nodes that the
cache cluster will have.
For a Memcached cluster, valid values are between 1 and 20. If you need
to exceed this limit, please fill out the ElastiCache Limit
Increase Request form at ``_ .
For Redis, only single-node cache clusters are supported at this time,
so the value for this parameter must be 1.
:type cache_node_type: string
:param cache_node_type: The compute and memory capacity of the nodes in
the cache cluster.
Valid values for Memcached:
`cache.t1.micro` | `cache.m1.small` | `cache.m1.medium` |
`cache.m1.large` | `cache.m1.xlarge` | `cache.m3.xlarge` |
`cache.m3.2xlarge` | `cache.m2.xlarge` | `cache.m2.2xlarge` |
`cache.m2.4xlarge` | `cache.c1.xlarge`
Valid values for Redis:
`cache.t1.micro` | `cache.m1.small` | `cache.m1.medium` |
`cache.m1.large` | `cache.m1.xlarge` | `cache.m2.xlarge` |
`cache.m2.2xlarge` | `cache.m2.4xlarge` | `cache.c1.xlarge`
For a complete listing of cache node types and specifications, see `.
:type engine: string
:param engine: The name of the cache engine to be used for this cache
cluster.
Valid values for this parameter are:
`memcached` | `redis`
:type engine_version: string
:param engine_version: The version number of the cache engine to be
used for this cluster. To view the supported cache engine versions,
use the DescribeCacheEngineVersions operation.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of the cache parameter
group to associate with this cache cluster. If this argument is
omitted, the default cache parameter group for the specified engine
will be used.
:type cache_subnet_group_name: string
:param cache_subnet_group_name: The name of the cache subnet group to
be used for the cache cluster.
Use this parameter only when you are creating a cluster in an Amazon
Virtual Private Cloud (VPC).
:type cache_security_group_names: list
:param cache_security_group_names: A list of cache security group names
to associate with this cache cluster.
Use this parameter only when you are creating a cluster outside of an
Amazon Virtual Private Cloud (VPC).
:type security_group_ids: list
:param security_group_ids: One or more VPC security groups associated
with the cache cluster.
Use this parameter only when you are creating a cluster in an Amazon
Virtual Private Cloud (VPC).
:type snapshot_arns: list
:param snapshot_arns: A single-element string list containing an Amazon
Resource Name (ARN) that uniquely identifies a Redis RDB snapshot
file stored in Amazon S3. The snapshot file will be used to
populate the Redis cache in the new cache cluster. The Amazon S3
object name in the ARN cannot contain any commas.
Here is an example of an Amazon S3 ARN:
`arn:aws:s3:::my_bucket/snapshot1.rdb`
**Note:** This parameter is only valid if the `Engine` parameter is
`redis`.
:type preferred_availability_zone: string
:param preferred_availability_zone: The EC2 Availability Zone in which
the cache cluster will be created.
All cache nodes belonging to a cache cluster are placed in the
preferred availability zone.
Default: System chosen availability zone.
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur.
Example: `sun:05:00-sun:09:00`
:type port: integer
:param port: The port number on which each of the cache nodes will
accept connections.
:type notification_topic_arn: string
:param notification_topic_arn:
The Amazon Resource Name (ARN) of the Amazon Simple Notification
Service (SNS) topic to which notifications will be sent.
The Amazon SNS topic owner must be the same as the cache cluster owner.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Determines whether minor engine
upgrades will be applied automatically to the cache cluster during
the maintenance window. A value of `True` allows these upgrades to
occur; `False` disables automatic upgrades.
Default: `True`
"""
params = {
'CacheClusterId': cache_cluster_id,
}
if num_cache_nodes is not None:
params['NumCacheNodes'] = num_cache_nodes
if cache_node_type is not None:
params['CacheNodeType'] = cache_node_type
if engine is not None:
params['Engine'] = engine
if replication_group_id is not None:
params['ReplicationGroupId'] = replication_group_id
if engine_version is not None:
params['EngineVersion'] = engine_version
if cache_parameter_group_name is not None:
params['CacheParameterGroupName'] = cache_parameter_group_name
if cache_subnet_group_name is not None:
params['CacheSubnetGroupName'] = cache_subnet_group_name
if cache_security_group_names is not None:
self.build_list_params(params,
cache_security_group_names,
'CacheSecurityGroupNames.member')
if security_group_ids is not None:
self.build_list_params(params,
security_group_ids,
'SecurityGroupIds.member')
if snapshot_arns is not None:
self.build_list_params(params,
snapshot_arns,
'SnapshotArns.member')
if preferred_availability_zone is not None:
params['PreferredAvailabilityZone'] = preferred_availability_zone
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if port is not None:
params['Port'] = port
if notification_topic_arn is not None:
params['NotificationTopicArn'] = notification_topic_arn
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
return self._make_request(
action='CreateCacheCluster',
verb='POST',
path='/', params=params)
def create_cache_parameter_group(self, cache_parameter_group_name,
cache_parameter_group_family,
description):
"""
The CreateCacheParameterGroup operation creates a new cache
parameter group. A cache parameter group is a collection of
parameters that you apply to all of the nodes in a cache
cluster.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: A user-specified name for the cache
parameter group.
:type cache_parameter_group_family: string
:param cache_parameter_group_family: The name of the cache parameter
group family the cache parameter group can be used with.
Valid values are: `memcached1.4` | `redis2.6`
:type description: string
:param description: A user-specified description for the cache
parameter group.
"""
params = {
'CacheParameterGroupName': cache_parameter_group_name,
'CacheParameterGroupFamily': cache_parameter_group_family,
'Description': description,
}
return self._make_request(
action='CreateCacheParameterGroup',
verb='POST',
path='/', params=params)
def create_cache_security_group(self, cache_security_group_name,
description):
"""
The CreateCacheSecurityGroup operation creates a new cache
security group. Use a cache security group to control access
to one or more cache clusters.
Cache security groups are only used when you are creating a
cluster outside of an Amazon Virtual Private Cloud (VPC). If
you are creating a cluster inside of a VPC, use a cache subnet
group instead. For more information, see
CreateCacheSubnetGroup .
:type cache_security_group_name: string
:param cache_security_group_name: A name for the cache security group.
This value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters.
Must not be the word "Default".
Example: `mysecuritygroup`
:type description: string
:param description: A description for the cache security group.
"""
params = {
'CacheSecurityGroupName': cache_security_group_name,
'Description': description,
}
return self._make_request(
action='CreateCacheSecurityGroup',
verb='POST',
path='/', params=params)
def create_cache_subnet_group(self, cache_subnet_group_name,
cache_subnet_group_description, subnet_ids):
"""
The CreateCacheSubnetGroup operation creates a new cache
subnet group.
Use this parameter only when you are creating a cluster in an
Amazon Virtual Private Cloud (VPC).
:type cache_subnet_group_name: string
:param cache_subnet_group_name: A name for the cache subnet group. This
value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens.
Example: `mysubnetgroup`
:type cache_subnet_group_description: string
:param cache_subnet_group_description: A description for the cache
subnet group.
:type subnet_ids: list
:param subnet_ids: A list of VPC subnet IDs for the cache subnet group.
"""
params = {
'CacheSubnetGroupName': cache_subnet_group_name,
'CacheSubnetGroupDescription': cache_subnet_group_description,
}
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
return self._make_request(
action='CreateCacheSubnetGroup',
verb='POST',
path='/', params=params)
def create_replication_group(self, replication_group_id,
primary_cluster_id,
replication_group_description):
"""
The CreateReplicationGroup operation creates a replication
group. A replication group is a collection of cache clusters,
where one of the clusters is a read/write primary and the
other clusters are read-only replicas. Writes to the primary
are automatically propagated to the replicas.
When you create a replication group, you must specify an
existing cache cluster that is in the primary role. When the
replication group has been successfully created, you can add
one or more read replica replicas to it, up to a total of five
read replicas.
:type replication_group_id: string
:param replication_group_id:
The replication group identifier. This parameter is stored as a
lowercase string.
Constraints:
+ Must contain from 1 to 20 alphanumeric characters or hyphens.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type primary_cluster_id: string
:param primary_cluster_id: The identifier of the cache cluster that
will serve as the primary for this replication group. This cache
cluster must already exist and have a status of available .
:type replication_group_description: string
:param replication_group_description: A user-specified description for
the replication group.
"""
params = {
'ReplicationGroupId': replication_group_id,
'PrimaryClusterId': primary_cluster_id,
'ReplicationGroupDescription': replication_group_description,
}
return self._make_request(
action='CreateReplicationGroup',
verb='POST',
path='/', params=params)
def delete_cache_cluster(self, cache_cluster_id):
"""
The DeleteCacheCluster operation deletes a previously
provisioned cache cluster. DeleteCacheCluster deletes all
associated cache nodes, node endpoints and the cache cluster
itself. When you receive a successful response from this
operation, Amazon ElastiCache immediately begins deleting the
cache cluster; you cannot cancel or revert this operation.
:type cache_cluster_id: string
:param cache_cluster_id: The cache cluster identifier for the cluster
to be deleted. This parameter is not case sensitive.
"""
params = {'CacheClusterId': cache_cluster_id, }
return self._make_request(
action='DeleteCacheCluster',
verb='POST',
path='/', params=params)
def delete_cache_parameter_group(self, cache_parameter_group_name):
"""
The DeleteCacheParameterGroup operation deletes the specified
cache parameter group. You cannot delete a cache parameter
group if it is associated with any cache clusters.
:type cache_parameter_group_name: string
:param cache_parameter_group_name:
The name of the cache parameter group to delete.
The specified cache security group must not be associated with any
cache clusters.
"""
params = {
'CacheParameterGroupName': cache_parameter_group_name,
}
return self._make_request(
action='DeleteCacheParameterGroup',
verb='POST',
path='/', params=params)
def delete_cache_security_group(self, cache_security_group_name):
"""
The DeleteCacheSecurityGroup operation deletes a cache
security group.
You cannot delete a cache security group if it is associated
with any cache clusters.
:type cache_security_group_name: string
:param cache_security_group_name:
The name of the cache security group to delete.
You cannot delete the default security group.
"""
params = {
'CacheSecurityGroupName': cache_security_group_name,
}
return self._make_request(
action='DeleteCacheSecurityGroup',
verb='POST',
path='/', params=params)
def delete_cache_subnet_group(self, cache_subnet_group_name):
"""
The DeleteCacheSubnetGroup operation deletes a cache subnet
group.
You cannot delete a cache subnet group if it is associated
with any cache clusters.
:type cache_subnet_group_name: string
:param cache_subnet_group_name: The name of the cache subnet group to
delete.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens.
"""
params = {'CacheSubnetGroupName': cache_subnet_group_name, }
return self._make_request(
action='DeleteCacheSubnetGroup',
verb='POST',
path='/', params=params)
def delete_replication_group(self, replication_group_id):
"""
The DeleteReplicationGroup operation deletes an existing
replication group. DeleteReplicationGroup deletes the primary
cache cluster and all of the read replicas in the replication
group. When you receive a successful response from this
operation, Amazon ElastiCache immediately begins deleting the
entire replication group; you cannot cancel or revert this
operation.
:type replication_group_id: string
:param replication_group_id: The identifier for the replication group
to be deleted. This parameter is not case sensitive.
"""
params = {'ReplicationGroupId': replication_group_id, }
return self._make_request(
action='DeleteReplicationGroup',
verb='POST',
path='/', params=params)
def describe_cache_clusters(self, cache_cluster_id=None,
max_records=None, marker=None,
show_cache_node_info=None):
"""
The DescribeCacheClusters operation returns information about
all provisioned cache clusters if no cache cluster identifier
is specified, or about a specific cache cluster if a cache
cluster identifier is supplied.
By default, abbreviated information about the cache
clusters(s) will be returned. You can use the optional
ShowDetails flag to retrieve detailed information about the
cache nodes associated with the cache clusters. These details
include the DNS address and port for the cache node endpoint.
If the cluster is in the CREATING state, only cluster level
information will be displayed until all of the nodes are
successfully provisioned.
If the cluster is in the DELETING state, only cluster level
information will be displayed.
If cache nodes are currently being added to the cache cluster,
node endpoint information and creation time for the additional
nodes will not be displayed until they are completely
provisioned. When the cache cluster state is available , the
cluster is ready for use.
If cache nodes are currently being removed from the cache
cluster, no endpoint information for the removed nodes is
displayed.
:type cache_cluster_id: string
:param cache_cluster_id: The user-supplied cluster identifier. If this
parameter is specified, only information about that specific cache
cluster is returned. This parameter isn't case sensitive.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
:type show_cache_node_info: boolean
:param show_cache_node_info: An optional flag that can be included in
the DescribeCacheCluster request to retrieve information about the
individual cache nodes.
"""
params = {}
if cache_cluster_id is not None:
params['CacheClusterId'] = cache_cluster_id
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
if show_cache_node_info is not None:
params['ShowCacheNodeInfo'] = str(
show_cache_node_info).lower()
return self._make_request(
action='DescribeCacheClusters',
verb='POST',
path='/', params=params)
def describe_cache_engine_versions(self, engine=None,
engine_version=None,
cache_parameter_group_family=None,
max_records=None, marker=None,
default_only=None):
"""
The DescribeCacheEngineVersions operation returns a list of
the available cache engines and their versions.
:type engine: string
:param engine: The cache engine to return. Valid values: `memcached` |
`redis`
:type engine_version: string
:param engine_version: The cache engine version to return.
Example: `1.4.14`
:type cache_parameter_group_family: string
:param cache_parameter_group_family:
The name of a specific cache parameter group family to return details
for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
:type default_only: boolean
:param default_only: If true , specifies that only the default version
of the specified engine or engine and major version combination is
to be returned.
"""
params = {}
if engine is not None:
params['Engine'] = engine
if engine_version is not None:
params['EngineVersion'] = engine_version
if cache_parameter_group_family is not None:
params['CacheParameterGroupFamily'] = cache_parameter_group_family
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
if default_only is not None:
params['DefaultOnly'] = str(
default_only).lower()
return self._make_request(
action='DescribeCacheEngineVersions',
verb='POST',
path='/', params=params)
def describe_cache_parameter_groups(self,
cache_parameter_group_name=None,
max_records=None, marker=None):
"""
The DescribeCacheParameterGroups operation returns a list of
cache parameter group descriptions. If a cache parameter group
name is specified, the list will contain only the descriptions
for that group.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of a specific cache
parameter group to return details for.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if cache_parameter_group_name is not None:
params['CacheParameterGroupName'] = cache_parameter_group_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeCacheParameterGroups',
verb='POST',
path='/', params=params)
def describe_cache_parameters(self, cache_parameter_group_name,
source=None, max_records=None, marker=None):
"""
The DescribeCacheParameters operation returns the detailed
parameter list for a particular cache parameter group.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of a specific cache
parameter group to return details for.
:type source: string
:param source: The parameter types to return.
Valid values: `user` | `system` | `engine-default`
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {
'CacheParameterGroupName': cache_parameter_group_name,
}
if source is not None:
params['Source'] = source
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeCacheParameters',
verb='POST',
path='/', params=params)
def describe_cache_security_groups(self, cache_security_group_name=None,
max_records=None, marker=None):
"""
The DescribeCacheSecurityGroups operation returns a list of
cache security group descriptions. If a cache security group
name is specified, the list will contain only the description
of that group.
:type cache_security_group_name: string
:param cache_security_group_name: The name of the cache security group
to return details for.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if cache_security_group_name is not None:
params['CacheSecurityGroupName'] = cache_security_group_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeCacheSecurityGroups',
verb='POST',
path='/', params=params)
def describe_cache_subnet_groups(self, cache_subnet_group_name=None,
max_records=None, marker=None):
"""
The DescribeCacheSubnetGroups operation returns a list of
cache subnet group descriptions. If a subnet group name is
specified, the list will contain only the description of that
group.
:type cache_subnet_group_name: string
:param cache_subnet_group_name: The name of the cache subnet group to
return details for.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if cache_subnet_group_name is not None:
params['CacheSubnetGroupName'] = cache_subnet_group_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeCacheSubnetGroups',
verb='POST',
path='/', params=params)
def describe_engine_default_parameters(self,
cache_parameter_group_family,
max_records=None, marker=None):
"""
The DescribeEngineDefaultParameters operation returns the
default engine and system parameter information for the
specified cache engine.
:type cache_parameter_group_family: string
:param cache_parameter_group_family: The name of the cache parameter
group family. Valid values are: `memcached1.4` | `redis2.6`
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {
'CacheParameterGroupFamily': cache_parameter_group_family,
}
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEngineDefaultParameters',
verb='POST',
path='/', params=params)
def describe_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None, duration=None,
max_records=None, marker=None):
"""
The DescribeEvents operation returns events related to cache
clusters, cache security groups, and cache parameter groups.
You can obtain events specific to a particular cache cluster,
cache security group, or cache parameter group by providing
the name as a parameter.
By default, only the events occurring within the last hour are
returned; however, you can retrieve up to 14 days' worth of
events if necessary.
:type source_identifier: string
:param source_identifier: The identifier of the event source for which
events will be returned. If not specified, then all sources are
included in the response.
:type source_type: string
:param source_type: The event source to retrieve events for. If no
value is specified, all events are returned.
Valid values are: `cache-cluster` | `cache-parameter-group` | `cache-
security-group` | `cache-subnet-group`
:type start_time: timestamp
:param start_time: The beginning of the time interval to retrieve
events for, specified in ISO 8601 format.
:type end_time: timestamp
:param end_time: The end of the time interval for which to retrieve
events, specified in ISO 8601 format.
:type duration: integer
:param duration: The number of minutes' worth of events to retrieve.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if source_identifier is not None:
params['SourceIdentifier'] = source_identifier
if source_type is not None:
params['SourceType'] = source_type
if start_time is not None:
params['StartTime'] = start_time
if end_time is not None:
params['EndTime'] = end_time
if duration is not None:
params['Duration'] = duration
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEvents',
verb='POST',
path='/', params=params)
def describe_replication_groups(self, replication_group_id=None,
max_records=None, marker=None):
"""
The DescribeReplicationGroups operation returns information
about a particular replication group. If no identifier is
specified, DescribeReplicationGroups returns information about
all replication groups.
:type replication_group_id: string
:param replication_group_id: The identifier for the replication group
to be described. This parameter is not case sensitive.
If you do not specify this parameter, information about all replication
groups is returned.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if replication_group_id is not None:
params['ReplicationGroupId'] = replication_group_id
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReplicationGroups',
verb='POST',
path='/', params=params)
def describe_reserved_cache_nodes(self, reserved_cache_node_id=None,
reserved_cache_nodes_offering_id=None,
cache_node_type=None, duration=None,
product_description=None,
offering_type=None, max_records=None,
marker=None):
"""
The DescribeReservedCacheNodes operation returns information
about reserved cache nodes for this account, or about a
specified reserved cache node.
:type reserved_cache_node_id: string
:param reserved_cache_node_id: The reserved cache node identifier
filter value. Use this parameter to show only the reservation that
matches the specified reservation ID.
:type reserved_cache_nodes_offering_id: string
:param reserved_cache_nodes_offering_id: The offering identifier filter
value. Use this parameter to show only purchased reservations
matching the specified offering identifier.
:type cache_node_type: string
:param cache_node_type: The cache node type filter value. Use this
parameter to show only those reservations matching the specified
cache node type.
:type duration: string
:param duration: The duration filter value, specified in years or
seconds. Use this parameter to show only reservations for this
duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: The product description filter value. Use
this parameter to show only those reservations matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Use this
parameter to show only the available offerings matching the
specified offering type.
Valid values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if reserved_cache_node_id is not None:
params['ReservedCacheNodeId'] = reserved_cache_node_id
if reserved_cache_nodes_offering_id is not None:
params['ReservedCacheNodesOfferingId'] = reserved_cache_nodes_offering_id
if cache_node_type is not None:
params['CacheNodeType'] = cache_node_type
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedCacheNodes',
verb='POST',
path='/', params=params)
def describe_reserved_cache_nodes_offerings(self,
reserved_cache_nodes_offering_id=None,
cache_node_type=None,
duration=None,
product_description=None,
offering_type=None,
max_records=None,
marker=None):
"""
The DescribeReservedCacheNodesOfferings operation lists
available reserved cache node offerings.
:type reserved_cache_nodes_offering_id: string
:param reserved_cache_nodes_offering_id: The offering identifier filter
value. Use this parameter to show only the available offering that
matches the specified reservation identifier.
Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706`
:type cache_node_type: string
:param cache_node_type: The cache node type filter value. Use this
parameter to show only the available offerings matching the
specified cache node type.
:type duration: string
:param duration: Duration filter value, specified in years or seconds.
Use this parameter to show only reservations for a given duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: The product description filter value. Use
this parameter to show only the available offerings matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Use this
parameter to show only the available offerings matching the
specified offering type.
Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if reserved_cache_nodes_offering_id is not None:
params['ReservedCacheNodesOfferingId'] = reserved_cache_nodes_offering_id
if cache_node_type is not None:
params['CacheNodeType'] = cache_node_type
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedCacheNodesOfferings',
verb='POST',
path='/', params=params)
def modify_cache_cluster(self, cache_cluster_id, num_cache_nodes=None,
cache_node_ids_to_remove=None,
cache_security_group_names=None,
security_group_ids=None,
preferred_maintenance_window=None,
notification_topic_arn=None,
cache_parameter_group_name=None,
notification_topic_status=None,
apply_immediately=None, engine_version=None,
auto_minor_version_upgrade=None):
"""
The ModifyCacheCluster operation modifies the settings for a
cache cluster. You can use this operation to change one or
more cluster configuration parameters by specifying the
parameters and the new values.
:type cache_cluster_id: string
:param cache_cluster_id: The cache cluster identifier. This value is
stored as a lowercase string.
:type num_cache_nodes: integer
:param num_cache_nodes: The number of cache nodes that the cache
cluster should have. If the value for NumCacheNodes is greater than
the existing number of cache nodes, then more nodes will be added.
If the value is less than the existing number of cache nodes, then
cache nodes will be removed.
If you are removing cache nodes, you must use the CacheNodeIdsToRemove
parameter to provide the IDs of the specific cache nodes to be
removed.
:type cache_node_ids_to_remove: list
:param cache_node_ids_to_remove: A list of cache node IDs to be
removed. A node ID is a numeric identifier (0001, 0002, etc.). This
parameter is only valid when NumCacheNodes is less than the
existing number of cache nodes. The number of cache node IDs
supplied in this parameter must match the difference between the
existing number of cache nodes in the cluster and the value of
NumCacheNodes in the request.
:type cache_security_group_names: list
:param cache_security_group_names: A list of cache security group names
to authorize on this cache cluster. This change is asynchronously
applied as soon as possible.
This parameter can be used only with clusters that are created outside
of an Amazon Virtual Private Cloud (VPC).
Constraints: Must contain no more than 255 alphanumeric characters.
Must not be "Default".
:type security_group_ids: list
:param security_group_ids: Specifies the VPC Security Groups associated
with the cache cluster.
This parameter can be used only with clusters that are created in an
Amazon Virtual Private Cloud (VPC).
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur. Note that system
maintenance may result in an outage. This change is made
immediately. If you are moving this window to the current time,
there must be at least 120 minutes between the current time and end
of the window to ensure that pending changes are applied.
:type notification_topic_arn: string
:param notification_topic_arn:
The Amazon Resource Name (ARN) of the SNS topic to which notifications
will be sent.
The SNS topic owner must be same as the cache cluster owner.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of the cache parameter
group to apply to this cache cluster. This change is asynchronously
applied as soon as possible for parameters when the
ApplyImmediately parameter is specified as true for this request.
:type notification_topic_status: string
:param notification_topic_status: The status of the Amazon SNS
notification topic. Notifications are sent only if the status is
active .
Valid values: `active` | `inactive`
:type apply_immediately: boolean
:param apply_immediately: If `True`, this parameter causes the
modifications in this request and any pending modifications to be
applied, asynchronously and as soon as possible, regardless of the
PreferredMaintenanceWindow setting for the cache cluster.
If `False`, then changes to the cache cluster are applied on the next
maintenance reboot, or the next failure reboot, whichever occurs
first.
Valid values: `True` | `False`
Default: `False`
:type engine_version: string
:param engine_version: The upgraded version of the cache engine to be
run on the cache cluster nodes.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: If `True`, then minor engine
upgrades will be applied automatically to the cache cluster during
the maintenance window.
Valid values: `True` | `False`
Default: `True`
"""
params = {'CacheClusterId': cache_cluster_id, }
if num_cache_nodes is not None:
params['NumCacheNodes'] = num_cache_nodes
if cache_node_ids_to_remove is not None:
self.build_list_params(params,
cache_node_ids_to_remove,
'CacheNodeIdsToRemove.member')
if cache_security_group_names is not None:
self.build_list_params(params,
cache_security_group_names,
'CacheSecurityGroupNames.member')
if security_group_ids is not None:
self.build_list_params(params,
security_group_ids,
'SecurityGroupIds.member')
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if notification_topic_arn is not None:
params['NotificationTopicArn'] = notification_topic_arn
if cache_parameter_group_name is not None:
params['CacheParameterGroupName'] = cache_parameter_group_name
if notification_topic_status is not None:
params['NotificationTopicStatus'] = notification_topic_status
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
return self._make_request(
action='ModifyCacheCluster',
verb='POST',
path='/', params=params)
def modify_cache_parameter_group(self, cache_parameter_group_name,
parameter_name_values):
"""
The ModifyCacheParameterGroup operation modifies the
parameters of a cache parameter group. You can modify up to 20
parameters in a single request by submitting a list parameter
name and value pairs.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of the cache parameter
group to modify.
:type parameter_name_values: list
:param parameter_name_values: An array of parameter names and values
for the parameter update. You must supply at least one parameter
name and value; subsequent arguments are optional. A maximum of 20
parameters may be modified per request.
"""
params = {
'CacheParameterGroupName': cache_parameter_group_name,
}
self.build_complex_list_params(
params, parameter_name_values,
'ParameterNameValues.member',
('ParameterName', 'ParameterValue'))
return self._make_request(
action='ModifyCacheParameterGroup',
verb='POST',
path='/', params=params)
def modify_cache_subnet_group(self, cache_subnet_group_name,
cache_subnet_group_description=None,
subnet_ids=None):
"""
The ModifyCacheSubnetGroup operation modifies an existing
cache subnet group.
:type cache_subnet_group_name: string
:param cache_subnet_group_name: The name for the cache subnet group.
This value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens.
Example: `mysubnetgroup`
:type cache_subnet_group_description: string
:param cache_subnet_group_description: A description for the cache
subnet group.
:type subnet_ids: list
:param subnet_ids: The EC2 subnet IDs for the cache subnet group.
"""
params = {'CacheSubnetGroupName': cache_subnet_group_name, }
if cache_subnet_group_description is not None:
params['CacheSubnetGroupDescription'] = cache_subnet_group_description
if subnet_ids is not None:
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
return self._make_request(
action='ModifyCacheSubnetGroup',
verb='POST',
path='/', params=params)
def modify_replication_group(self, replication_group_id,
replication_group_description=None,
cache_security_group_names=None,
security_group_ids=None,
preferred_maintenance_window=None,
notification_topic_arn=None,
cache_parameter_group_name=None,
notification_topic_status=None,
apply_immediately=None, engine_version=None,
auto_minor_version_upgrade=None,
primary_cluster_id=None):
"""
The ModifyReplicationGroup operation modifies the settings for
a replication group.
:type replication_group_id: string
:param replication_group_id: The identifier of the replication group to
modify.
:type replication_group_description: string
:param replication_group_description: A description for the replication
group. Maximum length is 255 characters.
:type cache_security_group_names: list
:param cache_security_group_names: A list of cache security group names
to authorize for the clusters in this replication group. This
change is asynchronously applied as soon as possible.
This parameter can be used only with replication groups containing
cache clusters running outside of an Amazon Virtual Private Cloud
(VPC).
Constraints: Must contain no more than 255 alphanumeric characters.
Must not be "Default".
:type security_group_ids: list
:param security_group_ids: Specifies the VPC Security Groups associated
with the cache clusters in the replication group.
This parameter can be used only with replication groups containing
cache clusters running in an Amazon Virtual Private Cloud (VPC).
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which replication group system maintenance can occur. Note
that system maintenance may result in an outage. This change is
made immediately. If you are moving this window to the current
time, there must be at least 120 minutes between the current time
and end of the window to ensure that pending changes are applied.
:type notification_topic_arn: string
:param notification_topic_arn:
The Amazon Resource Name (ARN) of the SNS topic to which notifications
will be sent.
The SNS topic owner must be same as the replication group owner.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of the cache parameter
group to apply to all of the cache nodes in this replication group.
This change is asynchronously applied as soon as possible for
parameters when the ApplyImmediately parameter is specified as true
for this request.
:type notification_topic_status: string
:param notification_topic_status: The status of the Amazon SNS
notification topic for the replication group. Notifications are
sent only if the status is active .
Valid values: `active` | `inactive`
:type apply_immediately: boolean
:param apply_immediately: If `True`, this parameter causes the
modifications in this request and any pending modifications to be
applied, asynchronously and as soon as possible, regardless of the
PreferredMaintenanceWindow setting for the replication group.
If `False`, then changes to the nodes in the replication group are
applied on the next maintenance reboot, or the next failure reboot,
whichever occurs first.
Valid values: `True` | `False`
Default: `False`
:type engine_version: string
:param engine_version: The upgraded version of the cache engine to be
run on the nodes in the replication group..
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Determines whether minor engine
upgrades will be applied automatically to all of the cache nodes in
the replication group during the maintenance window. A value of
`True` allows these upgrades to occur; `False` disables automatic
upgrades.
:type primary_cluster_id: string
:param primary_cluster_id: If this parameter is specified, ElastiCache
will promote each of the nodes in the specified cache cluster to
the primary role. The nodes of all other clusters in the
replication group will be read replicas.
"""
params = {'ReplicationGroupId': replication_group_id, }
if replication_group_description is not None:
params['ReplicationGroupDescription'] = replication_group_description
if cache_security_group_names is not None:
self.build_list_params(params,
cache_security_group_names,
'CacheSecurityGroupNames.member')
if security_group_ids is not None:
self.build_list_params(params,
security_group_ids,
'SecurityGroupIds.member')
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if notification_topic_arn is not None:
params['NotificationTopicArn'] = notification_topic_arn
if cache_parameter_group_name is not None:
params['CacheParameterGroupName'] = cache_parameter_group_name
if notification_topic_status is not None:
params['NotificationTopicStatus'] = notification_topic_status
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if primary_cluster_id is not None:
params['PrimaryClusterId'] = primary_cluster_id
return self._make_request(
action='ModifyReplicationGroup',
verb='POST',
path='/', params=params)
def purchase_reserved_cache_nodes_offering(self,
reserved_cache_nodes_offering_id,
reserved_cache_node_id=None,
cache_node_count=None):
"""
The PurchaseReservedCacheNodesOffering operation allows you to
purchase a reserved cache node offering.
:type reserved_cache_nodes_offering_id: string
:param reserved_cache_nodes_offering_id: The ID of the reserved cache
node offering to purchase.
Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
:type reserved_cache_node_id: string
:param reserved_cache_node_id: A customer-specified identifier to track
this reservation.
Example: myreservationID
:type cache_node_count: integer
:param cache_node_count: The number of cache node instances to reserve.
Default: `1`
"""
params = {
'ReservedCacheNodesOfferingId': reserved_cache_nodes_offering_id,
}
if reserved_cache_node_id is not None:
params['ReservedCacheNodeId'] = reserved_cache_node_id
if cache_node_count is not None:
params['CacheNodeCount'] = cache_node_count
return self._make_request(
action='PurchaseReservedCacheNodesOffering',
verb='POST',
path='/', params=params)
def reboot_cache_cluster(self, cache_cluster_id,
cache_node_ids_to_reboot):
"""
The RebootCacheCluster operation reboots some, or all, of the
cache cluster nodes within a provisioned cache cluster. This
API will apply any modified cache parameter groups to the
cache cluster. The reboot action takes place as soon as
possible, and results in a momentary outage to the cache
cluster. During the reboot, the cache cluster status is set to
REBOOTING.
The reboot causes the contents of the cache (for each cache
cluster node being rebooted) to be lost.
When the reboot is complete, a cache cluster event is created.
:type cache_cluster_id: string
:param cache_cluster_id: The cache cluster identifier. This parameter
is stored as a lowercase string.
:type cache_node_ids_to_reboot: list
:param cache_node_ids_to_reboot: A list of cache cluster node IDs to
reboot. A node ID is a numeric identifier (0001, 0002, etc.). To
reboot an entire cache cluster, specify all of the cache cluster
node IDs.
"""
params = {'CacheClusterId': cache_cluster_id, }
self.build_list_params(params,
cache_node_ids_to_reboot,
'CacheNodeIdsToReboot.member')
return self._make_request(
action='RebootCacheCluster',
verb='POST',
path='/', params=params)
def reset_cache_parameter_group(self, cache_parameter_group_name,
parameter_name_values,
reset_all_parameters=None):
"""
The ResetCacheParameterGroup operation modifies the parameters
of a cache parameter group to the engine or system default
value. You can reset specific parameters by submitting a list
of parameter names. To reset the entire cache parameter group,
specify the ResetAllParameters and CacheParameterGroupName
parameters.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of the cache parameter
group to reset.
:type reset_all_parameters: boolean
:param reset_all_parameters: If true , all parameters in the cache
parameter group will be reset to default values. If false , no such
action occurs.
Valid values: `True` | `False`
:type parameter_name_values: list
:param parameter_name_values: An array of parameter names to be reset.
If you are not resetting the entire cache parameter group, you must
specify at least one parameter name.
"""
params = {
'CacheParameterGroupName': cache_parameter_group_name,
}
self.build_complex_list_params(
params, parameter_name_values,
'ParameterNameValues.member',
('ParameterName', 'ParameterValue'))
if reset_all_parameters is not None:
params['ResetAllParameters'] = str(
reset_all_parameters).lower()
return self._make_request(
action='ResetCacheParameterGroup',
verb='POST',
path='/', params=params)
def revoke_cache_security_group_ingress(self, cache_security_group_name,
ec2_security_group_name,
ec2_security_group_owner_id):
"""
The RevokeCacheSecurityGroupIngress operation revokes ingress
from a cache security group. Use this operation to disallow
access from an Amazon EC2 security group that had been
previously authorized.
:type cache_security_group_name: string
:param cache_security_group_name: The name of the cache security group
to revoke ingress from.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the Amazon EC2 security
group to revoke access from.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS account number of the
Amazon EC2 security group owner. Note that this is not the same
thing as an AWS access key ID - you must provide a valid AWS
account number for this parameter.
"""
params = {
'CacheSecurityGroupName': cache_security_group_name,
'EC2SecurityGroupName': ec2_security_group_name,
'EC2SecurityGroupOwnerId': ec2_security_group_owner_id,
}
return self._make_request(
action='RevokeCacheSecurityGroupIngress',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
raise self.ResponseError(response.status, response.reason, body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/elasticache/layer1.py
| 0.734501 | 0.337627 |
layer1.py
|
|
class Qualifications(object):
def __init__(self, requirements=None):
if requirements is None:
requirements = []
self.requirements = requirements
def add(self, req):
self.requirements.append(req)
def get_as_params(self):
params = {}
assert(len(self.requirements) <= 10)
for n, req in enumerate(self.requirements):
reqparams = req.get_as_params()
for rp in reqparams:
params['QualificationRequirement.%s.%s' % ((n+1), rp) ] = reqparams[rp]
return params
class Requirement(object):
"""
Representation of a single requirement
"""
def __init__(self, qualification_type_id, comparator, integer_value=None, required_to_preview=False):
self.qualification_type_id = qualification_type_id
self.comparator = comparator
self.integer_value = integer_value
self.required_to_preview = required_to_preview
def get_as_params(self):
params = {
"QualificationTypeId": self.qualification_type_id,
"Comparator": self.comparator,
}
if self.comparator in ('In', 'NotIn'):
for i, integer_value in enumerate(self.integer_value, 1):
params['IntegerValue.%d' % i] = integer_value
elif self.comparator not in ('Exists', 'DoesNotExist') and self.integer_value is not None:
params['IntegerValue'] = self.integer_value
if self.required_to_preview:
params['RequiredToPreview'] = "true"
return params
class PercentAssignmentsSubmittedRequirement(Requirement):
"""
The percentage of assignments the Worker has submitted, over all assignments the Worker has accepted. The value is an integer between 0 and 100.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
super(PercentAssignmentsSubmittedRequirement, self).__init__(qualification_type_id="00000000000000000000", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsAbandonedRequirement(Requirement):
"""
The percentage of assignments the Worker has abandoned (allowed the deadline to elapse), over all assignments the Worker has accepted. The value is an integer between 0 and 100.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
super(PercentAssignmentsAbandonedRequirement, self).__init__(qualification_type_id="00000000000000000070", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsReturnedRequirement(Requirement):
"""
The percentage of assignments the Worker has returned, over all assignments the Worker has accepted. The value is an integer between 0 and 100.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
super(PercentAssignmentsReturnedRequirement, self).__init__(qualification_type_id="000000000000000000E0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsApprovedRequirement(Requirement):
"""
The percentage of assignments the Worker has submitted that were subsequently approved by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
super(PercentAssignmentsApprovedRequirement, self).__init__(qualification_type_id="000000000000000000L0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsRejectedRequirement(Requirement):
"""
The percentage of assignments the Worker has submitted that were subsequently rejected by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
super(PercentAssignmentsRejectedRequirement, self).__init__(qualification_type_id="000000000000000000S0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class NumberHitsApprovedRequirement(Requirement):
"""
Specifies the total number of HITs submitted by a Worker that have been approved. The value is an integer greater than or equal to 0.
If specifying a Country and Subdivision, use a tuple of valid ISO 3166 country code and ISO 3166-2 subdivision code, e.g. ('US', 'CA') for the US State of California.
When using the 'In' and 'NotIn', locale should be a list of Countries and/or (Country, Subdivision) tuples.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
super(NumberHitsApprovedRequirement, self).__init__(qualification_type_id="00000000000000000040", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class LocaleRequirement(Requirement):
"""
A Qualification requirement based on the Worker's location. The Worker's location is specified by the Worker to Mechanical Turk when the Worker creates his account.
"""
def __init__(self, comparator, locale, required_to_preview=False):
super(LocaleRequirement, self).__init__(qualification_type_id="00000000000000000071", comparator=comparator, integer_value=None, required_to_preview=required_to_preview)
self.locale = locale
def get_as_params(self):
params = {
"QualificationTypeId": self.qualification_type_id,
"Comparator": self.comparator,
}
if self.comparator in ('In', 'NotIn'):
for i, locale in enumerate(self.locale, 1):
if isinstance(locale, tuple):
params['LocaleValue.%d.Country' % i] = locale[0]
params['LocaleValue.%d.Subdivision' % i] = locale[1]
else:
params['LocaleValue.%d.Country' % i] = locale
else:
if isinstance(self.locale, tuple):
params['LocaleValue.Country'] = self.locale[0]
params['LocaleValue.Subdivision'] = self.locale[1]
else:
params['LocaleValue.Country'] = self.locale
if self.required_to_preview:
params['RequiredToPreview'] = "true"
return params
class AdultRequirement(Requirement):
"""
Requires workers to acknowledge that they are over 18 and that they agree to work on potentially offensive content. The value type is boolean, 1 (required), 0 (not required, the default).
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
super(AdultRequirement, self).__init__(qualification_type_id="00000000000000000060", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/mturk/qualification.py
| 0.693369 | 0.280305 |
qualification.py
|
|
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.ec2containerservice import exceptions
class EC2ContainerServiceConnection(AWSQueryConnection):
"""
Amazon EC2 Container Service (Amazon ECS) is a highly scalable,
fast, container management service that makes it easy to run,
stop, and manage Docker containers on a cluster of Amazon EC2
instances. Amazon ECS lets you launch and stop container-enabled
applications with simple API calls, allows you to get the state of
your cluster from a centralized service, and gives you access to
many familiar Amazon EC2 features like security groups, Amazon EBS
volumes, and IAM roles.
You can use Amazon ECS to schedule the placement of containers
across your cluster based on your resource needs, isolation
policies, and availability requirements. Amazon EC2 Container
Service eliminates the need for you to operate your own cluster
management and configuration management systems or worry about
scaling your management infrastructure.
"""
APIVersion = "2014-11-13"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "ecs.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"ServerException": exceptions.ServerException,
"ClientException": exceptions.ClientException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(EC2ContainerServiceConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_cluster(self, cluster_name=None):
"""
Creates a new Amazon ECS cluster. By default, your account
will receive a `default` cluster when you launch your first
container instance. However, you can create your own cluster
with a unique name with the `CreateCluster` action.
During the preview, each account is limited to two clusters.
:type cluster_name: string
:param cluster_name: The name of your cluster. If you do not specify a
name for your cluster, you will create a cluster named `default`.
"""
params = {}
if cluster_name is not None:
params['clusterName'] = cluster_name
return self._make_request(
action='CreateCluster',
verb='POST',
path='/', params=params)
def delete_cluster(self, cluster):
"""
Deletes the specified cluster. You must deregister all
container instances from this cluster before you may delete
it. You can list the container instances in a cluster with
ListContainerInstances and deregister them with
DeregisterContainerInstance.
:type cluster: string
:param cluster: The cluster you want to delete.
"""
params = {'cluster': cluster, }
return self._make_request(
action='DeleteCluster',
verb='POST',
path='/', params=params)
def deregister_container_instance(self, container_instance, cluster=None,
force=None):
"""
Deregisters an Amazon ECS container instance from the
specified cluster. This instance will no longer be available
to run tasks.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the container instance you want to
deregister. If you do not specify a cluster, the default cluster is
assumed.
:type container_instance: string
:param container_instance: The container instance UUID or full Amazon
Resource Name (ARN) of the container instance you want to
deregister. The ARN contains the `arn:aws:ecs` namespace, followed
by the region of the container instance, the AWS account ID of the
container instance owner, the `container-instance` namespace, and
then the container instance UUID. For example, arn:aws:ecs: region
: aws_account_id :container-instance/ container_instance_UUID .
:type force: boolean
:param force: Force the deregistration of the container instance. You
can use the `force` parameter if you have several tasks running on
a container instance and you don't want to run `StopTask` for each
task before deregistering the container instance.
"""
params = {'containerInstance': container_instance, }
if cluster is not None:
params['cluster'] = cluster
if force is not None:
params['force'] = str(
force).lower()
return self._make_request(
action='DeregisterContainerInstance',
verb='POST',
path='/', params=params)
def deregister_task_definition(self, task_definition):
"""
Deregisters the specified task definition. You will no longer
be able to run tasks from this definition after
deregistration.
:type task_definition: string
:param task_definition: The `family` and `revision` (
`family:revision`) or full Amazon Resource Name (ARN) of the task
definition that you want to deregister.
"""
params = {'taskDefinition': task_definition, }
return self._make_request(
action='DeregisterTaskDefinition',
verb='POST',
path='/', params=params)
def describe_clusters(self, clusters=None):
"""
Describes one or more of your clusters.
:type clusters: list
:param clusters: A space-separated list of cluster names or full
cluster Amazon Resource Name (ARN) entries. If you do not specify a
cluster, the default cluster is assumed.
"""
params = {}
if clusters is not None:
self.build_list_params(params,
clusters,
'clusters.member')
return self._make_request(
action='DescribeClusters',
verb='POST',
path='/', params=params)
def describe_container_instances(self, container_instances, cluster=None):
"""
Describes Amazon EC2 Container Service container instances.
Returns metadata about registered and remaining resources on
each container instance requested.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the container instances you want to
describe. If you do not specify a cluster, the default cluster is
assumed.
:type container_instances: list
:param container_instances: A space-separated list of container
instance UUIDs or full Amazon Resource Name (ARN) entries.
"""
params = {}
self.build_list_params(params,
container_instances,
'containerInstances.member')
if cluster is not None:
params['cluster'] = cluster
return self._make_request(
action='DescribeContainerInstances',
verb='POST',
path='/', params=params)
def describe_task_definition(self, task_definition):
"""
Describes a task definition.
:type task_definition: string
:param task_definition: The `family` and `revision` (
`family:revision`) or full Amazon Resource Name (ARN) of the task
definition that you want to describe.
"""
params = {'taskDefinition': task_definition, }
return self._make_request(
action='DescribeTaskDefinition',
verb='POST',
path='/', params=params)
def describe_tasks(self, tasks, cluster=None):
"""
Describes a specified task or tasks.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the task you want to describe. If you do not
specify a cluster, the default cluster is assumed.
:type tasks: list
:param tasks: A space-separated list of task UUIDs or full Amazon
Resource Name (ARN) entries.
"""
params = {}
self.build_list_params(params,
tasks,
'tasks.member')
if cluster is not None:
params['cluster'] = cluster
return self._make_request(
action='DescribeTasks',
verb='POST',
path='/', params=params)
def discover_poll_endpoint(self, container_instance=None):
"""
This action is only used by the Amazon EC2 Container Service
agent, and it is not intended for use outside of the agent.
Returns an endpoint for the Amazon EC2 Container Service agent
to poll for updates.
:type container_instance: string
:param container_instance: The container instance UUID or full Amazon
Resource Name (ARN) of the container instance. The ARN contains the
`arn:aws:ecs` namespace, followed by the region of the container
instance, the AWS account ID of the container instance owner, the
`container-instance` namespace, and then the container instance
UUID. For example, arn:aws:ecs: region : aws_account_id :container-
instance/ container_instance_UUID .
"""
params = {}
if container_instance is not None:
params['containerInstance'] = container_instance
return self._make_request(
action='DiscoverPollEndpoint',
verb='POST',
path='/', params=params)
def list_clusters(self, next_token=None, max_results=None):
"""
Returns a list of existing clusters.
:type next_token: string
:param next_token: The `nextToken` value returned from a previous
paginated `ListClusters` request where `maxResults` was used and
the results exceeded the value of that parameter. Pagination
continues from the end of the previous results that returned the
`nextToken` value. This value is `null` when there are no more
results to return.
:type max_results: integer
:param max_results: The maximum number of cluster results returned by
`ListClusters` in paginated output. When this parameter is used,
`ListClusters` only returns `maxResults` results in a single page
along with a `nextToken` response element. The remaining results of
the initial request can be seen by sending another `ListClusters`
request with the returned `nextToken` value. This value can be
between 1 and 100. If this parameter is not used, then
`ListClusters` returns up to 100 results and a `nextToken` value if
applicable.
"""
params = {}
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
return self._make_request(
action='ListClusters',
verb='POST',
path='/', params=params)
def list_container_instances(self, cluster=None, next_token=None,
max_results=None):
"""
Returns a list of container instances in a specified cluster.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the container instances you want to list. If
you do not specify a cluster, the default cluster is assumed..
:type next_token: string
:param next_token: The `nextToken` value returned from a previous
paginated `ListContainerInstances` request where `maxResults` was
used and the results exceeded the value of that parameter.
Pagination continues from the end of the previous results that
returned the `nextToken` value. This value is `null` when there are
no more results to return.
:type max_results: integer
:param max_results: The maximum number of container instance results
returned by `ListContainerInstances` in paginated output. When this
parameter is used, `ListContainerInstances` only returns
`maxResults` results in a single page along with a `nextToken`
response element. The remaining results of the initial request can
be seen by sending another `ListContainerInstances` request with
the returned `nextToken` value. This value can be between 1 and
100. If this parameter is not used, then `ListContainerInstances`
returns up to 100 results and a `nextToken` value if applicable.
"""
params = {}
if cluster is not None:
params['cluster'] = cluster
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
return self._make_request(
action='ListContainerInstances',
verb='POST',
path='/', params=params)
def list_task_definitions(self, family_prefix=None, next_token=None,
max_results=None):
"""
Returns a list of task definitions that are registered to your
account. You can filter the results by family name with the
`familyPrefix` parameter.
:type family_prefix: string
:param family_prefix: The name of the family that you want to filter
the `ListTaskDefinitions` results with. Specifying a `familyPrefix`
will limit the listed task definitions to definitions that belong
to that family.
:type next_token: string
:param next_token: The `nextToken` value returned from a previous
paginated `ListTaskDefinitions` request where `maxResults` was used
and the results exceeded the value of that parameter. Pagination
continues from the end of the previous results that returned the
`nextToken` value. This value is `null` when there are no more
results to return.
:type max_results: integer
:param max_results: The maximum number of task definition results
returned by `ListTaskDefinitions` in paginated output. When this
parameter is used, `ListTaskDefinitions` only returns `maxResults`
results in a single page along with a `nextToken` response element.
The remaining results of the initial request can be seen by sending
another `ListTaskDefinitions` request with the returned `nextToken`
value. This value can be between 1 and 100. If this parameter is
not used, then `ListTaskDefinitions` returns up to 100 results and
a `nextToken` value if applicable.
"""
params = {}
if family_prefix is not None:
params['familyPrefix'] = family_prefix
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
return self._make_request(
action='ListTaskDefinitions',
verb='POST',
path='/', params=params)
def list_tasks(self, cluster=None, container_instance=None, family=None,
next_token=None, max_results=None):
"""
Returns a list of tasks for a specified cluster. You can
filter the results by family name or by a particular container
instance with the `family` and `containerInstance` parameters.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the tasks you want to list. If you do not
specify a cluster, the default cluster is assumed..
:type container_instance: string
:param container_instance: The container instance UUID or full Amazon
Resource Name (ARN) of the container instance that you want to
filter the `ListTasks` results with. Specifying a
`containerInstance` will limit the results to tasks that belong to
that container instance.
:type family: string
:param family: The name of the family that you want to filter the
`ListTasks` results with. Specifying a `family` will limit the
results to tasks that belong to that family.
:type next_token: string
:param next_token: The `nextToken` value returned from a previous
paginated `ListTasks` request where `maxResults` was used and the
results exceeded the value of that parameter. Pagination continues
from the end of the previous results that returned the `nextToken`
value. This value is `null` when there are no more results to
return.
:type max_results: integer
:param max_results: The maximum number of task results returned by
`ListTasks` in paginated output. When this parameter is used,
`ListTasks` only returns `maxResults` results in a single page
along with a `nextToken` response element. The remaining results of
the initial request can be seen by sending another `ListTasks`
request with the returned `nextToken` value. This value can be
between 1 and 100. If this parameter is not used, then `ListTasks`
returns up to 100 results and a `nextToken` value if applicable.
"""
params = {}
if cluster is not None:
params['cluster'] = cluster
if container_instance is not None:
params['containerInstance'] = container_instance
if family is not None:
params['family'] = family
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
return self._make_request(
action='ListTasks',
verb='POST',
path='/', params=params)
def register_container_instance(self, cluster=None,
instance_identity_document=None,
instance_identity_document_signature=None,
total_resources=None):
"""
This action is only used by the Amazon EC2 Container Service
agent, and it is not intended for use outside of the agent.
Registers an Amazon EC2 instance into the specified cluster.
This instance will become available to place containers on.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that you want to register your container instance with.
If you do not specify a cluster, the default cluster is assumed..
:type instance_identity_document: string
:param instance_identity_document:
:type instance_identity_document_signature: string
:param instance_identity_document_signature:
:type total_resources: list
:param total_resources:
"""
params = {}
if cluster is not None:
params['cluster'] = cluster
if instance_identity_document is not None:
params['instanceIdentityDocument'] = instance_identity_document
if instance_identity_document_signature is not None:
params['instanceIdentityDocumentSignature'] = instance_identity_document_signature
if total_resources is not None:
self.build_complex_list_params(
params, total_resources,
'totalResources.member',
('name', 'type', 'doubleValue', 'longValue', 'integerValue', 'stringSetValue'))
return self._make_request(
action='RegisterContainerInstance',
verb='POST',
path='/', params=params)
def register_task_definition(self, family, container_definitions):
"""
Registers a new task definition from the supplied `family` and
`containerDefinitions`.
:type family: string
:param family: You can specify a `family` for a task definition, which
allows you to track multiple versions of the same task definition.
You can think of the `family` as a name for your task definition.
:type container_definitions: list
:param container_definitions: A list of container definitions in JSON
format that describe the different containers that make up your
task.
"""
params = {'family': family, }
self.build_complex_list_params(
params, container_definitions,
'containerDefinitions.member',
('name', 'image', 'cpu', 'memory', 'links', 'portMappings', 'essential', 'entryPoint', 'command', 'environment'))
return self._make_request(
action='RegisterTaskDefinition',
verb='POST',
path='/', params=params)
def run_task(self, task_definition, cluster=None, overrides=None,
count=None):
"""
Start a task using random placement and the default Amazon ECS
scheduler. If you want to use your own scheduler or place a
task on a specific container instance, use `StartTask`
instead.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that you want to run your task on. If you do not
specify a cluster, the default cluster is assumed..
:type task_definition: string
:param task_definition: The `family` and `revision` (
`family:revision`) or full Amazon Resource Name (ARN) of the task
definition that you want to run.
:type overrides: dict
:param overrides:
:type count: integer
:param count: The number of instances of the specified task that you
would like to place on your cluster.
"""
params = {'taskDefinition': task_definition, }
if cluster is not None:
params['cluster'] = cluster
if overrides is not None:
params['overrides'] = overrides
if count is not None:
params['count'] = count
return self._make_request(
action='RunTask',
verb='POST',
path='/', params=params)
def start_task(self, task_definition, container_instances, cluster=None,
overrides=None):
"""
Starts a new task from the specified task definition on the
specified container instance or instances. If you want to use
the default Amazon ECS scheduler to place your task, use
`RunTask` instead.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that you want to start your task on. If you do not
specify a cluster, the default cluster is assumed..
:type task_definition: string
:param task_definition: The `family` and `revision` (
`family:revision`) or full Amazon Resource Name (ARN) of the task
definition that you want to start.
:type overrides: dict
:param overrides:
:type container_instances: list
:param container_instances: The container instance UUIDs or full Amazon
Resource Name (ARN) entries for the container instances on which
you would like to place your task.
"""
params = {'taskDefinition': task_definition, }
self.build_list_params(params,
container_instances,
'containerInstances.member')
if cluster is not None:
params['cluster'] = cluster
if overrides is not None:
params['overrides'] = overrides
return self._make_request(
action='StartTask',
verb='POST',
path='/', params=params)
def stop_task(self, task, cluster=None):
"""
Stops a running task.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the task you want to stop. If you do not
specify a cluster, the default cluster is assumed..
:type task: string
:param task: The task UUIDs or full Amazon Resource Name (ARN) entry of
the task you would like to stop.
"""
params = {'task': task, }
if cluster is not None:
params['cluster'] = cluster
return self._make_request(
action='StopTask',
verb='POST',
path='/', params=params)
def submit_container_state_change(self, cluster=None, task=None,
container_name=None, status=None,
exit_code=None, reason=None,
network_bindings=None):
"""
This action is only used by the Amazon EC2 Container Service
agent, and it is not intended for use outside of the agent.
Sent to acknowledge that a container changed states.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the container.
:type task: string
:param task: The task UUID or full Amazon Resource Name (ARN) of the
task that hosts the container.
:type container_name: string
:param container_name: The name of the container.
:type status: string
:param status: The status of the state change request.
:type exit_code: integer
:param exit_code: The exit code returned for the state change request.
:type reason: string
:param reason: The reason for the state change request.
:type network_bindings: list
:param network_bindings: The network bindings of the container.
"""
params = {}
if cluster is not None:
params['cluster'] = cluster
if task is not None:
params['task'] = task
if container_name is not None:
params['containerName'] = container_name
if status is not None:
params['status'] = status
if exit_code is not None:
params['exitCode'] = exit_code
if reason is not None:
params['reason'] = reason
if network_bindings is not None:
self.build_complex_list_params(
params, network_bindings,
'networkBindings.member',
('bindIP', 'containerPort', 'hostPort'))
return self._make_request(
action='SubmitContainerStateChange',
verb='POST',
path='/', params=params)
def submit_task_state_change(self, cluster=None, task=None, status=None,
reason=None):
"""
This action is only used by the Amazon EC2 Container Service
agent, and it is not intended for use outside of the agent.
Sent to acknowledge that a task changed states.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the task.
:type task: string
:param task: The task UUID or full Amazon Resource Name (ARN) of the
task in the state change request.
:type status: string
:param status: The status of the state change request.
:type reason: string
:param reason: The reason for the state change request.
"""
params = {}
if cluster is not None:
params['cluster'] = cluster
if task is not None:
params['task'] = task
if status is not None:
params['status'] = status
if reason is not None:
params['reason'] = reason
return self._make_request(
action='SubmitTaskStateChange',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/ec2containerservice/layer1.py
| 0.708818 | 0.277579 |
layer1.py
|
|
import logging
import re
from boto.vendored.regions.exceptions import NoRegionError
LOG = logging.getLogger(__name__)
DEFAULT_URI_TEMPLATE = '{service}.{region}.{dnsSuffix}'
DEFAULT_SERVICE_DATA = {'endpoints': {}}
class BaseEndpointResolver(object):
"""Resolves regions and endpoints. Must be subclassed."""
def construct_endpoint(self, service_name, region_name=None):
"""Resolves an endpoint for a service and region combination.
:type service_name: string
:param service_name: Name of the service to resolve an endpoint for
(e.g., s3)
:type region_name: string
:param region_name: Region/endpoint name to resolve (e.g., us-east-1)
if no region is provided, the first found partition-wide endpoint
will be used if available.
:rtype: dict
:return: Returns a dict containing the following keys:
- partition: (string, required) Resolved partition name
- endpointName: (string, required) Resolved endpoint name
- hostname: (string, required) Hostname to use for this endpoint
- sslCommonName: (string) sslCommonName to use for this endpoint.
- credentialScope: (dict) Signature version 4 credential scope
- region: (string) region name override when signing.
- service: (string) service name override when signing.
- signatureVersions: (list<string>) A list of possible signature
versions, including s3, v4, v2, and s3v4
- protocols: (list<string>) A list of supported protocols
(e.g., http, https)
- ...: Other keys may be included as well based on the metadata
"""
raise NotImplementedError
def get_available_partitions(self):
"""Lists the partitions available to the endpoint resolver.
:return: Returns a list of partition names (e.g., ["aws", "aws-cn"]).
"""
raise NotImplementedError
def get_available_endpoints(self, service_name, partition_name='aws',
allow_non_regional=False):
"""Lists the endpoint names of a particular partition.
:type service_name: string
:param service_name: Name of a service to list endpoint for (e.g., s3)
:type partition_name: string
:param partition_name: Name of the partition to limit endpoints to.
(e.g., aws for the public AWS endpoints, aws-cn for AWS China
endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc.
:type allow_non_regional: bool
:param allow_non_regional: Set to True to include endpoints that are
not regional endpoints (e.g., s3-external-1,
fips-us-gov-west-1, etc).
:return: Returns a list of endpoint names (e.g., ["us-east-1"]).
"""
raise NotImplementedError
class EndpointResolver(BaseEndpointResolver):
"""Resolves endpoints based on partition endpoint metadata"""
def __init__(self, endpoint_data):
"""
:param endpoint_data: A dict of partition data.
"""
if 'partitions' not in endpoint_data:
raise ValueError('Missing "partitions" in endpoint data')
self._endpoint_data = endpoint_data
def get_available_partitions(self):
result = []
for partition in self._endpoint_data['partitions']:
result.append(partition['partition'])
return result
def get_available_endpoints(self, service_name, partition_name='aws',
allow_non_regional=False):
result = []
for partition in self._endpoint_data['partitions']:
if partition['partition'] != partition_name:
continue
services = partition['services']
if service_name not in services:
continue
for endpoint_name in services[service_name]['endpoints']:
if allow_non_regional or endpoint_name in partition['regions']:
result.append(endpoint_name)
return result
def construct_endpoint(self, service_name, region_name=None):
# Iterate over each partition until a match is found.
for partition in self._endpoint_data['partitions']:
result = self._endpoint_for_partition(
partition, service_name, region_name)
if result:
return result
def _endpoint_for_partition(self, partition, service_name, region_name):
# Get the service from the partition, or an empty template.
service_data = partition['services'].get(
service_name, DEFAULT_SERVICE_DATA)
# Use the partition endpoint if no region is supplied.
if region_name is None:
if 'partitionEndpoint' in service_data:
region_name = service_data['partitionEndpoint']
else:
raise NoRegionError()
# Attempt to resolve the exact region for this partition.
if region_name in service_data['endpoints']:
return self._resolve(
partition, service_name, service_data, region_name)
# Check to see if the endpoint provided is valid for the partition.
if self._region_match(partition, region_name):
# Use the partition endpoint if set and not regionalized.
partition_endpoint = service_data.get('partitionEndpoint')
is_regionalized = service_data.get('isRegionalized', True)
if partition_endpoint and not is_regionalized:
LOG.debug('Using partition endpoint for %s, %s: %s',
service_name, region_name, partition_endpoint)
return self._resolve(
partition, service_name, service_data, partition_endpoint)
LOG.debug('Creating a regex based endpoint for %s, %s',
service_name, region_name)
return self._resolve(
partition, service_name, service_data, region_name)
def _region_match(self, partition, region_name):
if region_name in partition['regions']:
return True
if 'regionRegex' in partition:
return re.compile(partition['regionRegex']).match(region_name)
return False
def _resolve(self, partition, service_name, service_data, endpoint_name):
result = service_data['endpoints'].get(endpoint_name, {})
result['partition'] = partition['partition']
result['endpointName'] = endpoint_name
# Merge in the service defaults then the partition defaults.
self._merge_keys(service_data.get('defaults', {}), result)
self._merge_keys(partition.get('defaults', {}), result)
hostname = result.get('hostname', DEFAULT_URI_TEMPLATE)
result['hostname'] = self._expand_template(
partition, result['hostname'], service_name, endpoint_name)
if 'sslCommonName' in result:
result['sslCommonName'] = self._expand_template(
partition, result['sslCommonName'], service_name,
endpoint_name)
result['dnsSuffix'] = partition['dnsSuffix']
return result
def _merge_keys(self, from_data, result):
for key in from_data:
if key not in result:
result[key] = from_data[key]
def _expand_template(self, partition, template, service_name,
endpoint_name):
return template.format(
service=service_name, region=endpoint_name,
dnsSuffix=partition['dnsSuffix'])
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/vendored/regions/regions.py
| 0.719186 | 0.250225 |
regions.py
|
|
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.codedeploy import exceptions
class CodeDeployConnection(AWSQueryConnection):
"""
AWS CodeDeploy **Overview**
This is the AWS CodeDeploy API Reference. This guide provides
descriptions of the AWS CodeDeploy APIs. For additional
information, see the `AWS CodeDeploy User Guide`_.
**Using the APIs**
You can use the AWS CodeDeploy APIs to work with the following
items:
+ Applications , which are unique identifiers that AWS CodeDeploy
uses to ensure that the correct combinations of revisions,
deployment configurations, and deployment groups are being
referenced during deployments. You can work with applications by
calling CreateApplication, DeleteApplication, GetApplication,
ListApplications, BatchGetApplications, and UpdateApplication to
create, delete, and get information about applications, and to
change information about an application, respectively.
+ Deployment configurations , which are sets of deployment rules
and deployment success and failure conditions that AWS CodeDeploy
uses during deployments. You can work with deployment
configurations by calling CreateDeploymentConfig,
DeleteDeploymentConfig, GetDeploymentConfig, and
ListDeploymentConfigs to create, delete, and get information about
deployment configurations, respectively.
+ Deployment groups , which represent groups of Amazon EC2
instances to which application revisions can be deployed. You can
work with deployment groups by calling CreateDeploymentGroup,
DeleteDeploymentGroup, GetDeploymentGroup, ListDeploymentGroups,
and UpdateDeploymentGroup to create, delete, and get information
about single and multiple deployment groups, and to change
information about a deployment group, respectively.
+ Deployment instances (also known simply as instances ), which
represent Amazon EC2 instances to which application revisions are
deployed. Deployment instances are identified by their Amazon EC2
tags or Auto Scaling group names. Deployment instances belong to
deployment groups. You can work with deployment instances by
calling GetDeploymentInstance and ListDeploymentInstances to get
information about single and multiple deployment instances,
respectively.
+ Deployments , which represent the process of deploying revisions
to deployment groups. You can work with deployments by calling
CreateDeployment, GetDeployment, ListDeployments,
BatchGetDeployments, and StopDeployment to create and get
information about deployments, and to stop a deployment,
respectively.
+ Application revisions (also known simply as revisions ), which
are archive files that are stored in Amazon S3 buckets or GitHub
repositories. These revisions contain source content (such as
source code, web pages, executable files, any deployment scripts,
and similar) along with an Application Specification file (AppSpec
file). (The AppSpec file is unique to AWS CodeDeploy; it defines a
series of deployment actions that you want AWS CodeDeploy to
execute.) An application revision is uniquely identified by its
Amazon S3 object key and its ETag, version, or both. Application
revisions are deployed to deployment groups. You can work with
application revisions by calling GetApplicationRevision,
ListApplicationRevisions, and RegisterApplicationRevision to get
information about application revisions and to inform AWS
CodeDeploy about an application revision, respectively.
"""
APIVersion = "2014-10-06"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "codedeploy.us-east-1.amazonaws.com"
ServiceName = "codedeploy"
TargetPrefix = "CodeDeploy_20141006"
ResponseError = JSONResponseError
_faults = {
"InvalidDeploymentIdException": exceptions.InvalidDeploymentIdException,
"InvalidDeploymentGroupNameException": exceptions.InvalidDeploymentGroupNameException,
"DeploymentConfigAlreadyExistsException": exceptions.DeploymentConfigAlreadyExistsException,
"InvalidRoleException": exceptions.InvalidRoleException,
"RoleRequiredException": exceptions.RoleRequiredException,
"DeploymentGroupAlreadyExistsException": exceptions.DeploymentGroupAlreadyExistsException,
"DeploymentConfigLimitExceededException": exceptions.DeploymentConfigLimitExceededException,
"InvalidNextTokenException": exceptions.InvalidNextTokenException,
"InvalidDeploymentConfigNameException": exceptions.InvalidDeploymentConfigNameException,
"InvalidSortByException": exceptions.InvalidSortByException,
"InstanceDoesNotExistException": exceptions.InstanceDoesNotExistException,
"InvalidMinimumHealthyHostValueException": exceptions.InvalidMinimumHealthyHostValueException,
"ApplicationLimitExceededException": exceptions.ApplicationLimitExceededException,
"ApplicationNameRequiredException": exceptions.ApplicationNameRequiredException,
"InvalidEC2TagException": exceptions.InvalidEC2TagException,
"DeploymentDoesNotExistException": exceptions.DeploymentDoesNotExistException,
"DeploymentLimitExceededException": exceptions.DeploymentLimitExceededException,
"InvalidInstanceStatusException": exceptions.InvalidInstanceStatusException,
"RevisionRequiredException": exceptions.RevisionRequiredException,
"InvalidBucketNameFilterException": exceptions.InvalidBucketNameFilterException,
"DeploymentGroupLimitExceededException": exceptions.DeploymentGroupLimitExceededException,
"DeploymentGroupDoesNotExistException": exceptions.DeploymentGroupDoesNotExistException,
"DeploymentConfigNameRequiredException": exceptions.DeploymentConfigNameRequiredException,
"DeploymentAlreadyCompletedException": exceptions.DeploymentAlreadyCompletedException,
"RevisionDoesNotExistException": exceptions.RevisionDoesNotExistException,
"DeploymentGroupNameRequiredException": exceptions.DeploymentGroupNameRequiredException,
"DeploymentIdRequiredException": exceptions.DeploymentIdRequiredException,
"DeploymentConfigDoesNotExistException": exceptions.DeploymentConfigDoesNotExistException,
"BucketNameFilterRequiredException": exceptions.BucketNameFilterRequiredException,
"InvalidTimeRangeException": exceptions.InvalidTimeRangeException,
"ApplicationDoesNotExistException": exceptions.ApplicationDoesNotExistException,
"InvalidRevisionException": exceptions.InvalidRevisionException,
"InvalidSortOrderException": exceptions.InvalidSortOrderException,
"InvalidOperationException": exceptions.InvalidOperationException,
"InvalidAutoScalingGroupException": exceptions.InvalidAutoScalingGroupException,
"InvalidApplicationNameException": exceptions.InvalidApplicationNameException,
"DescriptionTooLongException": exceptions.DescriptionTooLongException,
"ApplicationAlreadyExistsException": exceptions.ApplicationAlreadyExistsException,
"InvalidDeployedStateFilterException": exceptions.InvalidDeployedStateFilterException,
"DeploymentNotStartedException": exceptions.DeploymentNotStartedException,
"DeploymentConfigInUseException": exceptions.DeploymentConfigInUseException,
"InstanceIdRequiredException": exceptions.InstanceIdRequiredException,
"InvalidKeyPrefixFilterException": exceptions.InvalidKeyPrefixFilterException,
"InvalidDeploymentStatusException": exceptions.InvalidDeploymentStatusException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CodeDeployConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def batch_get_applications(self, application_names=None):
"""
Gets information about one or more applications.
:type application_names: list
:param application_names: A list of application names, with multiple
application names separated by spaces.
"""
params = {}
if application_names is not None:
params['applicationNames'] = application_names
return self.make_request(action='BatchGetApplications',
body=json.dumps(params))
def batch_get_deployments(self, deployment_ids=None):
"""
Gets information about one or more deployments.
:type deployment_ids: list
:param deployment_ids: A list of deployment IDs, with multiple
deployment IDs separated by spaces.
"""
params = {}
if deployment_ids is not None:
params['deploymentIds'] = deployment_ids
return self.make_request(action='BatchGetDeployments',
body=json.dumps(params))
def create_application(self, application_name):
"""
Creates a new application.
:type application_name: string
:param application_name: The name of the application. This name must be
unique within the AWS user account.
"""
params = {'applicationName': application_name, }
return self.make_request(action='CreateApplication',
body=json.dumps(params))
def create_deployment(self, application_name, deployment_group_name=None,
revision=None, deployment_config_name=None,
description=None,
ignore_application_stop_failures=None):
"""
Deploys an application revision to the specified deployment
group.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type deployment_group_name: string
:param deployment_group_name: The deployment group's name.
:type revision: dict
:param revision: The type of revision to deploy, along with information
about the revision's location.
:type deployment_config_name: string
:param deployment_config_name: The name of an existing deployment
configuration within the AWS user account.
If not specified, the value configured in the deployment group will be
used as the default. If the deployment group does not have a
deployment configuration associated with it, then
CodeDeployDefault.OneAtATime will be used by default.
:type description: string
:param description: A comment about the deployment.
:type ignore_application_stop_failures: boolean
:param ignore_application_stop_failures: If set to true, then if the
deployment causes the ApplicationStop deployment lifecycle event to
fail to a specific instance, the deployment will not be considered
to have failed to that instance at that point and will continue on
to the BeforeInstall deployment lifecycle event.
If set to false or not specified, then if the deployment causes the
ApplicationStop deployment lifecycle event to fail to a specific
instance, the deployment will stop to that instance, and the
deployment to that instance will be considered to have failed.
"""
params = {'applicationName': application_name, }
if deployment_group_name is not None:
params['deploymentGroupName'] = deployment_group_name
if revision is not None:
params['revision'] = revision
if deployment_config_name is not None:
params['deploymentConfigName'] = deployment_config_name
if description is not None:
params['description'] = description
if ignore_application_stop_failures is not None:
params['ignoreApplicationStopFailures'] = ignore_application_stop_failures
return self.make_request(action='CreateDeployment',
body=json.dumps(params))
def create_deployment_config(self, deployment_config_name,
minimum_healthy_hosts=None):
"""
Creates a new deployment configuration.
:type deployment_config_name: string
:param deployment_config_name: The name of the deployment configuration
to create.
:type minimum_healthy_hosts: dict
:param minimum_healthy_hosts: The minimum number of healthy instances
that should be available at any time during the deployment. There
are two parameters expected in the input: type and value.
The type parameter takes either of the following values:
+ HOST_COUNT: The value parameter represents the minimum number of
healthy instances, as an absolute value.
+ FLEET_PERCENT: The value parameter represents the minimum number of
healthy instances, as a percentage of the total number of instances
in the deployment. If you specify FLEET_PERCENT, then at the start
of the deployment AWS CodeDeploy converts the percentage to the
equivalent number of instances and rounds fractional instances up.
The value parameter takes an integer.
For example, to set a minimum of 95% healthy instances, specify a type
of FLEET_PERCENT and a value of 95.
"""
params = {'deploymentConfigName': deployment_config_name, }
if minimum_healthy_hosts is not None:
params['minimumHealthyHosts'] = minimum_healthy_hosts
return self.make_request(action='CreateDeploymentConfig',
body=json.dumps(params))
def create_deployment_group(self, application_name,
deployment_group_name,
deployment_config_name=None,
ec_2_tag_filters=None,
auto_scaling_groups=None,
service_role_arn=None):
"""
Creates a new deployment group for application revisions to be
deployed to.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type deployment_group_name: string
:param deployment_group_name: The name of an existing deployment group
for the specified application.
:type deployment_config_name: string
:param deployment_config_name: If specified, the deployment
configuration name must be one of the predefined values, or it can
be a custom deployment configuration:
+ CodeDeployDefault.AllAtOnce deploys an application revision to up to
all of the Amazon EC2 instances at once. The overall deployment
succeeds if the application revision deploys to at least one of the
instances. The overall deployment fails after the application
revision fails to deploy to all of the instances. For example, for
9 instances, deploy to up to all 9 instances at once. The overall
deployment succeeds if any of the 9 instances is successfully
deployed to, and it fails if all 9 instances fail to be deployed
to.
+ CodeDeployDefault.HalfAtATime deploys to up to half of the instances
at a time (with fractions rounded down). The overall deployment
succeeds if the application revision deploys to at least half of
the instances (with fractions rounded up); otherwise, the
deployment fails. For example, for 9 instances, deploy to up to 4
instances at a time. The overall deployment succeeds if 5 or more
instances are successfully deployed to; otherwise, the deployment
fails. Note that the deployment may successfully deploy to some
instances, even if the overall deployment fails.
+ CodeDeployDefault.OneAtATime deploys the application revision to only
one of the instances at a time. The overall deployment succeeds if
the application revision deploys to all of the instances. The
overall deployment fails after the application revision first fails
to deploy to any one instance. For example, for 9 instances, deploy
to one instance at a time. The overall deployment succeeds if all 9
instances are successfully deployed to, and it fails if any of one
of the 9 instances fail to be deployed to. Note that the deployment
may successfully deploy to some instances, even if the overall
deployment fails. This is the default deployment configuration if a
configuration isn't specified for either the deployment or the
deployment group.
To create a custom deployment configuration, call the create deployment
configuration operation.
:type ec_2_tag_filters: list
:param ec_2_tag_filters: The Amazon EC2 tags to filter on.
:type auto_scaling_groups: list
:param auto_scaling_groups: A list of associated Auto Scaling groups.
:type service_role_arn: string
:param service_role_arn: A service role ARN that allows AWS CodeDeploy
to act on the user's behalf when interacting with AWS services.
"""
params = {
'applicationName': application_name,
'deploymentGroupName': deployment_group_name,
}
if deployment_config_name is not None:
params['deploymentConfigName'] = deployment_config_name
if ec_2_tag_filters is not None:
params['ec2TagFilters'] = ec_2_tag_filters
if auto_scaling_groups is not None:
params['autoScalingGroups'] = auto_scaling_groups
if service_role_arn is not None:
params['serviceRoleArn'] = service_role_arn
return self.make_request(action='CreateDeploymentGroup',
body=json.dumps(params))
def delete_application(self, application_name):
"""
Deletes an application.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
"""
params = {'applicationName': application_name, }
return self.make_request(action='DeleteApplication',
body=json.dumps(params))
def delete_deployment_config(self, deployment_config_name):
"""
Deletes a deployment configuration.
A deployment configuration cannot be deleted if it is
currently in use. Also, predefined configurations cannot be
deleted.
:type deployment_config_name: string
:param deployment_config_name: The name of an existing deployment
configuration within the AWS user account.
"""
params = {'deploymentConfigName': deployment_config_name, }
return self.make_request(action='DeleteDeploymentConfig',
body=json.dumps(params))
def delete_deployment_group(self, application_name,
deployment_group_name):
"""
Deletes a deployment group.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type deployment_group_name: string
:param deployment_group_name: The name of an existing deployment group
for the specified application.
"""
params = {
'applicationName': application_name,
'deploymentGroupName': deployment_group_name,
}
return self.make_request(action='DeleteDeploymentGroup',
body=json.dumps(params))
def get_application(self, application_name):
"""
Gets information about an application.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
"""
params = {'applicationName': application_name, }
return self.make_request(action='GetApplication',
body=json.dumps(params))
def get_application_revision(self, application_name, revision):
"""
Gets information about an application revision.
:type application_name: string
:param application_name: The name of the application that corresponds
to the revision.
:type revision: dict
:param revision: Information about the application revision to get,
including the revision's type and its location.
"""
params = {
'applicationName': application_name,
'revision': revision,
}
return self.make_request(action='GetApplicationRevision',
body=json.dumps(params))
def get_deployment(self, deployment_id):
"""
Gets information about a deployment.
:type deployment_id: string
:param deployment_id: An existing deployment ID within the AWS user
account.
"""
params = {'deploymentId': deployment_id, }
return self.make_request(action='GetDeployment',
body=json.dumps(params))
def get_deployment_config(self, deployment_config_name):
"""
Gets information about a deployment configuration.
:type deployment_config_name: string
:param deployment_config_name: The name of an existing deployment
configuration within the AWS user account.
"""
params = {'deploymentConfigName': deployment_config_name, }
return self.make_request(action='GetDeploymentConfig',
body=json.dumps(params))
def get_deployment_group(self, application_name, deployment_group_name):
"""
Gets information about a deployment group.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type deployment_group_name: string
:param deployment_group_name: The name of an existing deployment group
for the specified application.
"""
params = {
'applicationName': application_name,
'deploymentGroupName': deployment_group_name,
}
return self.make_request(action='GetDeploymentGroup',
body=json.dumps(params))
def get_deployment_instance(self, deployment_id, instance_id):
"""
Gets information about an Amazon EC2 instance as part of a
deployment.
:type deployment_id: string
:param deployment_id: The unique ID of a deployment.
:type instance_id: string
:param instance_id: The unique ID of an Amazon EC2 instance in the
deployment's deployment group.
"""
params = {
'deploymentId': deployment_id,
'instanceId': instance_id,
}
return self.make_request(action='GetDeploymentInstance',
body=json.dumps(params))
def list_application_revisions(self, application_name, sort_by=None,
sort_order=None, s_3_bucket=None,
s_3_key_prefix=None, deployed=None,
next_token=None):
"""
Lists information about revisions for an application.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type sort_by: string
:param sort_by: The column name to sort the list results by:
+ registerTime: Sort the list results by when the revisions were
registered with AWS CodeDeploy.
+ firstUsedTime: Sort the list results by when the revisions were first
used by in a deployment.
+ lastUsedTime: Sort the list results by when the revisions were last
used in a deployment.
If not specified or set to null, the results will be returned in an
arbitrary order.
:type sort_order: string
:param sort_order: The order to sort the list results by:
+ ascending: Sort the list results in ascending order.
+ descending: Sort the list results in descending order.
If not specified, the results will be sorted in ascending order.
If set to null, the results will be sorted in an arbitrary order.
:type s_3_bucket: string
:param s_3_bucket: A specific Amazon S3 bucket name to limit the search
for revisions.
If set to null, then all of the user's buckets will be searched.
:type s_3_key_prefix: string
:param s_3_key_prefix: A specific key prefix for the set of Amazon S3
objects to limit the search for revisions.
:type deployed: string
:param deployed:
Whether to list revisions based on whether the revision is the target
revision of an deployment group:
+ include: List revisions that are target revisions of a deployment
group.
+ exclude: Do not list revisions that are target revisions of a
deployment group.
+ ignore: List all revisions, regardless of whether they are target
revisions of a deployment group.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list application revisions call, which can be used to return the
next set of applications in the list.
"""
params = {'applicationName': application_name, }
if sort_by is not None:
params['sortBy'] = sort_by
if sort_order is not None:
params['sortOrder'] = sort_order
if s_3_bucket is not None:
params['s3Bucket'] = s_3_bucket
if s_3_key_prefix is not None:
params['s3KeyPrefix'] = s_3_key_prefix
if deployed is not None:
params['deployed'] = deployed
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='ListApplicationRevisions',
body=json.dumps(params))
def list_applications(self, next_token=None):
"""
Lists the applications registered within the AWS user account.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list applications call, which can be used to return the next set of
applications in the list.
"""
params = {}
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='ListApplications',
body=json.dumps(params))
def list_deployment_configs(self, next_token=None):
"""
Lists the deployment configurations within the AWS user
account.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list deployment configurations call, which can be used to return
the next set of deployment configurations in the list.
"""
params = {}
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='ListDeploymentConfigs',
body=json.dumps(params))
def list_deployment_groups(self, application_name, next_token=None):
"""
Lists the deployment groups for an application registered
within the AWS user account.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list deployment groups call, which can be used to return the next
set of deployment groups in the list.
"""
params = {'applicationName': application_name, }
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='ListDeploymentGroups',
body=json.dumps(params))
def list_deployment_instances(self, deployment_id, next_token=None,
instance_status_filter=None):
"""
Lists the Amazon EC2 instances for a deployment within the AWS
user account.
:type deployment_id: string
:param deployment_id: The unique ID of a deployment.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list deployment instances call, which can be used to return the
next set of deployment instances in the list.
:type instance_status_filter: list
:param instance_status_filter:
A subset of instances to list, by status:
+ Pending: Include in the resulting list those instances with pending
deployments.
+ InProgress: Include in the resulting list those instances with in-
progress deployments.
+ Succeeded: Include in the resulting list those instances with
succeeded deployments.
+ Failed: Include in the resulting list those instances with failed
deployments.
+ Skipped: Include in the resulting list those instances with skipped
deployments.
+ Unknown: Include in the resulting list those instances with
deployments in an unknown state.
"""
params = {'deploymentId': deployment_id, }
if next_token is not None:
params['nextToken'] = next_token
if instance_status_filter is not None:
params['instanceStatusFilter'] = instance_status_filter
return self.make_request(action='ListDeploymentInstances',
body=json.dumps(params))
def list_deployments(self, application_name=None,
deployment_group_name=None,
include_only_statuses=None, create_time_range=None,
next_token=None):
"""
Lists the deployments under a deployment group for an
application registered within the AWS user account.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type deployment_group_name: string
:param deployment_group_name: The name of an existing deployment group
for the specified application.
:type include_only_statuses: list
:param include_only_statuses: A subset of deployments to list, by
status:
+ Created: Include in the resulting list created deployments.
+ Queued: Include in the resulting list queued deployments.
+ In Progress: Include in the resulting list in-progress deployments.
+ Succeeded: Include in the resulting list succeeded deployments.
+ Failed: Include in the resulting list failed deployments.
+ Aborted: Include in the resulting list aborted deployments.
:type create_time_range: dict
:param create_time_range: A deployment creation start- and end-time
range for returning a subset of the list of deployments.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list deployments call, which can be used to return the next set of
deployments in the list.
"""
params = {}
if application_name is not None:
params['applicationName'] = application_name
if deployment_group_name is not None:
params['deploymentGroupName'] = deployment_group_name
if include_only_statuses is not None:
params['includeOnlyStatuses'] = include_only_statuses
if create_time_range is not None:
params['createTimeRange'] = create_time_range
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='ListDeployments',
body=json.dumps(params))
def register_application_revision(self, application_name, revision,
description=None):
"""
Registers with AWS CodeDeploy a revision for the specified
application.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type description: string
:param description: A comment about the revision.
:type revision: dict
:param revision: Information about the application revision to
register, including the revision's type and its location.
"""
params = {
'applicationName': application_name,
'revision': revision,
}
if description is not None:
params['description'] = description
return self.make_request(action='RegisterApplicationRevision',
body=json.dumps(params))
def stop_deployment(self, deployment_id):
"""
Attempts to stop an ongoing deployment.
:type deployment_id: string
:param deployment_id: The unique ID of a deployment.
"""
params = {'deploymentId': deployment_id, }
return self.make_request(action='StopDeployment',
body=json.dumps(params))
def update_application(self, application_name=None,
new_application_name=None):
"""
Changes an existing application's name.
:type application_name: string
:param application_name: The current name of the application that you
want to change.
:type new_application_name: string
:param new_application_name: The new name that you want to change the
application to.
"""
params = {}
if application_name is not None:
params['applicationName'] = application_name
if new_application_name is not None:
params['newApplicationName'] = new_application_name
return self.make_request(action='UpdateApplication',
body=json.dumps(params))
def update_deployment_group(self, application_name,
current_deployment_group_name,
new_deployment_group_name=None,
deployment_config_name=None,
ec_2_tag_filters=None,
auto_scaling_groups=None,
service_role_arn=None):
"""
Changes information about an existing deployment group.
:type application_name: string
:param application_name: The application name corresponding to the
deployment group to update.
:type current_deployment_group_name: string
:param current_deployment_group_name: The current name of the existing
deployment group.
:type new_deployment_group_name: string
:param new_deployment_group_name: The new name of the deployment group,
if you want to change it.
:type deployment_config_name: string
:param deployment_config_name: The replacement deployment configuration
name to use, if you want to change it.
:type ec_2_tag_filters: list
:param ec_2_tag_filters: The replacement set of Amazon EC2 tags to
filter on, if you want to change them.
:type auto_scaling_groups: list
:param auto_scaling_groups: The replacement list of Auto Scaling groups
to be included in the deployment group, if you want to change them.
:type service_role_arn: string
:param service_role_arn: A replacement service role's ARN, if you want
to change it.
"""
params = {
'applicationName': application_name,
'currentDeploymentGroupName': current_deployment_group_name,
}
if new_deployment_group_name is not None:
params['newDeploymentGroupName'] = new_deployment_group_name
if deployment_config_name is not None:
params['deploymentConfigName'] = deployment_config_name
if ec_2_tag_filters is not None:
params['ec2TagFilters'] = ec_2_tag_filters
if auto_scaling_groups is not None:
params['autoScalingGroups'] = auto_scaling_groups
if service_role_arn is not None:
params['serviceRoleArn'] = service_role_arn
return self.make_request(action='UpdateDeploymentGroup',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/codedeploy/layer1.py
| 0.625667 | 0.195325 |
layer1.py
|
|
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.directconnect import exceptions
from boto.compat import json
class DirectConnectConnection(AWSQueryConnection):
"""
AWS Direct Connect makes it easy to establish a dedicated network
connection from your premises to Amazon Web Services (AWS). Using
AWS Direct Connect, you can establish private connectivity between
AWS and your data center, office, or colocation environment, which
in many cases can reduce your network costs, increase bandwidth
throughput, and provide a more consistent network experience than
Internet-based connections.
The AWS Direct Connect API Reference provides descriptions,
syntax, and usage examples for each of the actions and data types
for AWS Direct Connect. Use the following links to get started
using the AWS Direct Connect API Reference :
+ `Actions`_: An alphabetical list of all AWS Direct Connect
actions.
+ `Data Types`_: An alphabetical list of all AWS Direct Connect
data types.
+ `Common Query Parameters`_: Parameters that all Query actions
can use.
+ `Common Errors`_: Client and server errors that all actions can
return.
"""
APIVersion = "2012-10-25"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "directconnect.us-east-1.amazonaws.com"
ServiceName = "DirectConnect"
TargetPrefix = "OvertureService"
ResponseError = JSONResponseError
_faults = {
"DirectConnectClientException": exceptions.DirectConnectClientException,
"DirectConnectServerException": exceptions.DirectConnectServerException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(DirectConnectConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def allocate_connection_on_interconnect(self, bandwidth, connection_name,
owner_account, interconnect_id,
vlan):
"""
Creates a hosted connection on an interconnect.
Allocates a VLAN number and a specified amount of bandwidth
for use by a hosted connection on the given interconnect.
:type bandwidth: string
:param bandwidth: Bandwidth of the connection.
Example: " 500Mbps "
Default: None
:type connection_name: string
:param connection_name: Name of the provisioned connection.
Example: " 500M Connection to AWS "
Default: None
:type owner_account: string
:param owner_account: Numeric account Id of the customer for whom the
connection will be provisioned.
Example: 123443215678
Default: None
:type interconnect_id: string
:param interconnect_id: ID of the interconnect on which the connection
will be provisioned.
Example: dxcon-456abc78
Default: None
:type vlan: integer
:param vlan: The dedicated VLAN provisioned to the connection.
Example: 101
Default: None
"""
params = {
'bandwidth': bandwidth,
'connectionName': connection_name,
'ownerAccount': owner_account,
'interconnectId': interconnect_id,
'vlan': vlan,
}
return self.make_request(action='AllocateConnectionOnInterconnect',
body=json.dumps(params))
def allocate_private_virtual_interface(self, connection_id,
owner_account,
new_private_virtual_interface_allocation):
"""
Provisions a private virtual interface to be owned by a
different customer.
The owner of a connection calls this function to provision a
private virtual interface which will be owned by another AWS
customer.
Virtual interfaces created using this function must be
confirmed by the virtual interface owner by calling
ConfirmPrivateVirtualInterface. Until this step has been
completed, the virtual interface will be in 'Confirming'
state, and will not be available for handling traffic.
:type connection_id: string
:param connection_id: The connection ID on which the private virtual
interface is provisioned.
Default: None
:type owner_account: string
:param owner_account: The AWS account that will own the new private
virtual interface.
Default: None
:type new_private_virtual_interface_allocation: dict
:param new_private_virtual_interface_allocation: Detailed information
for the private virtual interface to be provisioned.
Default: None
"""
params = {
'connectionId': connection_id,
'ownerAccount': owner_account,
'newPrivateVirtualInterfaceAllocation': new_private_virtual_interface_allocation,
}
return self.make_request(action='AllocatePrivateVirtualInterface',
body=json.dumps(params))
def allocate_public_virtual_interface(self, connection_id, owner_account,
new_public_virtual_interface_allocation):
"""
Provisions a public virtual interface to be owned by a
different customer.
The owner of a connection calls this function to provision a
public virtual interface which will be owned by another AWS
customer.
Virtual interfaces created using this function must be
confirmed by the virtual interface owner by calling
ConfirmPublicVirtualInterface. Until this step has been
completed, the virtual interface will be in 'Confirming'
state, and will not be available for handling traffic.
:type connection_id: string
:param connection_id: The connection ID on which the public virtual
interface is provisioned.
Default: None
:type owner_account: string
:param owner_account: The AWS account that will own the new public
virtual interface.
Default: None
:type new_public_virtual_interface_allocation: dict
:param new_public_virtual_interface_allocation: Detailed information
for the public virtual interface to be provisioned.
Default: None
"""
params = {
'connectionId': connection_id,
'ownerAccount': owner_account,
'newPublicVirtualInterfaceAllocation': new_public_virtual_interface_allocation,
}
return self.make_request(action='AllocatePublicVirtualInterface',
body=json.dumps(params))
def confirm_connection(self, connection_id):
"""
Confirm the creation of a hosted connection on an
interconnect.
Upon creation, the hosted connection is initially in the
'Ordering' state, and will remain in this state until the
owner calls ConfirmConnection to confirm creation of the
hosted connection.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
"""
params = {'connectionId': connection_id, }
return self.make_request(action='ConfirmConnection',
body=json.dumps(params))
def confirm_private_virtual_interface(self, virtual_interface_id,
virtual_gateway_id):
"""
Accept ownership of a private virtual interface created by
another customer.
After the virtual interface owner calls this function, the
virtual interface will be created and attached to the given
virtual private gateway, and will be available for handling
traffic.
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
:type virtual_gateway_id: string
:param virtual_gateway_id: ID of the virtual private gateway that will
be attached to the virtual interface.
A virtual private gateway can be managed via the Amazon Virtual Private
Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action.
Default: None
"""
params = {
'virtualInterfaceId': virtual_interface_id,
'virtualGatewayId': virtual_gateway_id,
}
return self.make_request(action='ConfirmPrivateVirtualInterface',
body=json.dumps(params))
def confirm_public_virtual_interface(self, virtual_interface_id):
"""
Accept ownership of a public virtual interface created by
another customer.
After the virtual interface owner calls this function, the
specified virtual interface will be created and made available
for handling traffic.
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
"""
params = {'virtualInterfaceId': virtual_interface_id, }
return self.make_request(action='ConfirmPublicVirtualInterface',
body=json.dumps(params))
def create_connection(self, location, bandwidth, connection_name):
"""
Creates a new connection between the customer network and a
specific AWS Direct Connect location.
A connection links your internal network to an AWS Direct
Connect location over a standard 1 gigabit or 10 gigabit
Ethernet fiber-optic cable. One end of the cable is connected
to your router, the other to an AWS Direct Connect router. An
AWS Direct Connect location provides access to Amazon Web
Services in the region it is associated with. You can
establish connections with AWS Direct Connect locations in
multiple regions, but a connection in one region does not
provide connectivity to other regions.
:type location: string
:param location: Where the connection is located.
Example: EqSV5
Default: None
:type bandwidth: string
:param bandwidth: Bandwidth of the connection.
Example: 1Gbps
Default: None
:type connection_name: string
:param connection_name: The name of the connection.
Example: " My Connection to AWS "
Default: None
"""
params = {
'location': location,
'bandwidth': bandwidth,
'connectionName': connection_name,
}
return self.make_request(action='CreateConnection',
body=json.dumps(params))
def create_interconnect(self, interconnect_name, bandwidth, location):
"""
Creates a new interconnect between a AWS Direct Connect
partner's network and a specific AWS Direct Connect location.
An interconnect is a connection which is capable of hosting
other connections. The AWS Direct Connect partner can use an
interconnect to provide sub-1Gbps AWS Direct Connect service
to tier 2 customers who do not have their own connections.
Like a standard connection, an interconnect links the AWS
Direct Connect partner's network to an AWS Direct Connect
location over a standard 1 Gbps or 10 Gbps Ethernet fiber-
optic cable. One end is connected to the partner's router, the
other to an AWS Direct Connect router.
For each end customer, the AWS Direct Connect partner
provisions a connection on their interconnect by calling
AllocateConnectionOnInterconnect. The end customer can then
connect to AWS resources by creating a virtual interface on
their connection, using the VLAN assigned to them by the AWS
Direct Connect partner.
:type interconnect_name: string
:param interconnect_name: The name of the interconnect.
Example: " 1G Interconnect to AWS "
Default: None
:type bandwidth: string
:param bandwidth: The port bandwidth
Example: 1Gbps
Default: None
Available values: 1Gbps,10Gbps
:type location: string
:param location: Where the interconnect is located
Example: EqSV5
Default: None
"""
params = {
'interconnectName': interconnect_name,
'bandwidth': bandwidth,
'location': location,
}
return self.make_request(action='CreateInterconnect',
body=json.dumps(params))
def create_private_virtual_interface(self, connection_id,
new_private_virtual_interface):
"""
Creates a new private virtual interface. A virtual interface
is the VLAN that transports AWS Direct Connect traffic. A
private virtual interface supports sending traffic to a single
virtual private cloud (VPC).
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
:type new_private_virtual_interface: dict
:param new_private_virtual_interface: Detailed information for the
private virtual interface to be created.
Default: None
"""
params = {
'connectionId': connection_id,
'newPrivateVirtualInterface': new_private_virtual_interface,
}
return self.make_request(action='CreatePrivateVirtualInterface',
body=json.dumps(params))
def create_public_virtual_interface(self, connection_id,
new_public_virtual_interface):
"""
Creates a new public virtual interface. A virtual interface is
the VLAN that transports AWS Direct Connect traffic. A public
virtual interface supports sending traffic to public services
of AWS such as Amazon Simple Storage Service (Amazon S3).
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
:type new_public_virtual_interface: dict
:param new_public_virtual_interface: Detailed information for the
public virtual interface to be created.
Default: None
"""
params = {
'connectionId': connection_id,
'newPublicVirtualInterface': new_public_virtual_interface,
}
return self.make_request(action='CreatePublicVirtualInterface',
body=json.dumps(params))
def delete_connection(self, connection_id):
"""
Deletes the connection.
Deleting a connection only stops the AWS Direct Connect port
hour and data transfer charges. You need to cancel separately
with the providers any services or charges for cross-connects
or network circuits that connect you to the AWS Direct Connect
location.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
"""
params = {'connectionId': connection_id, }
return self.make_request(action='DeleteConnection',
body=json.dumps(params))
def delete_interconnect(self, interconnect_id):
"""
Deletes the specified interconnect.
:type interconnect_id: string
:param interconnect_id: The ID of the interconnect.
Example: dxcon-abc123
"""
params = {'interconnectId': interconnect_id, }
return self.make_request(action='DeleteInterconnect',
body=json.dumps(params))
def delete_virtual_interface(self, virtual_interface_id):
"""
Deletes a virtual interface.
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
"""
params = {'virtualInterfaceId': virtual_interface_id, }
return self.make_request(action='DeleteVirtualInterface',
body=json.dumps(params))
def describe_connections(self, connection_id=None):
"""
Displays all connections in this region.
If a connection ID is provided, the call returns only that
particular connection.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
"""
params = {}
if connection_id is not None:
params['connectionId'] = connection_id
return self.make_request(action='DescribeConnections',
body=json.dumps(params))
def describe_connections_on_interconnect(self, interconnect_id):
"""
Return a list of connections that have been provisioned on the
given interconnect.
:type interconnect_id: string
:param interconnect_id: ID of the interconnect on which a list of
connection is provisioned.
Example: dxcon-abc123
Default: None
"""
params = {'interconnectId': interconnect_id, }
return self.make_request(action='DescribeConnectionsOnInterconnect',
body=json.dumps(params))
def describe_interconnects(self, interconnect_id=None):
"""
Returns a list of interconnects owned by the AWS account.
If an interconnect ID is provided, it will only return this
particular interconnect.
:type interconnect_id: string
:param interconnect_id: The ID of the interconnect.
Example: dxcon-abc123
"""
params = {}
if interconnect_id is not None:
params['interconnectId'] = interconnect_id
return self.make_request(action='DescribeInterconnects',
body=json.dumps(params))
def describe_locations(self):
"""
Returns the list of AWS Direct Connect locations in the
current AWS region. These are the locations that may be
selected when calling CreateConnection or CreateInterconnect.
"""
params = {}
return self.make_request(action='DescribeLocations',
body=json.dumps(params))
def describe_virtual_gateways(self):
"""
Returns a list of virtual private gateways owned by the AWS
account.
You can create one or more AWS Direct Connect private virtual
interfaces linking to a virtual private gateway. A virtual
private gateway can be managed via Amazon Virtual Private
Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action.
"""
params = {}
return self.make_request(action='DescribeVirtualGateways',
body=json.dumps(params))
def describe_virtual_interfaces(self, connection_id=None,
virtual_interface_id=None):
"""
Displays all virtual interfaces for an AWS account. Virtual
interfaces deleted fewer than 15 minutes before
DescribeVirtualInterfaces is called are also returned. If a
connection ID is included then only virtual interfaces
associated with this connection will be returned. If a virtual
interface ID is included then only a single virtual interface
will be returned.
A virtual interface (VLAN) transmits the traffic between the
AWS Direct Connect location and the customer.
If a connection ID is provided, only virtual interfaces
provisioned on the specified connection will be returned. If a
virtual interface ID is provided, only this particular virtual
interface will be returned.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
"""
params = {}
if connection_id is not None:
params['connectionId'] = connection_id
if virtual_interface_id is not None:
params['virtualInterfaceId'] = virtual_interface_id
return self.make_request(action='DescribeVirtualInterfaces',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/directconnect/layer1.py
| 0.745213 | 0.280073 |
layer1.py
|
|
import boto
from boto.compat import json, urlsplit
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.machinelearning import exceptions
class MachineLearningConnection(AWSQueryConnection):
"""
Definition of the public APIs exposed by Amazon Machine Learning
"""
APIVersion = "2014-12-12"
AuthServiceName = 'machinelearning'
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "machinelearning.us-east-1.amazonaws.com"
ServiceName = "MachineLearning"
TargetPrefix = "AmazonML_20141212"
ResponseError = JSONResponseError
_faults = {
"InternalServerException": exceptions.InternalServerException,
"LimitExceededException": exceptions.LimitExceededException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"IdempotentParameterMismatchException": exceptions.IdempotentParameterMismatchException,
"PredictorNotMountedException": exceptions.PredictorNotMountedException,
"InvalidInputException": exceptions.InvalidInputException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(MachineLearningConnection, self).__init__(**kwargs)
self.region = region
self.auth_region_name = self.region.name
def _required_auth_capability(self):
return ['hmac-v4']
def create_batch_prediction(self, batch_prediction_id, ml_model_id,
batch_prediction_data_source_id, output_uri,
batch_prediction_name=None):
"""
Generates predictions for a group of observations. The
observations to process exist in one or more data files
referenced by a `DataSource`. This operation creates a new
`BatchPrediction`, and uses an `MLModel` and the data files
referenced by the `DataSource` as information sources.
`CreateBatchPrediction` is an asynchronous operation. In
response to `CreateBatchPrediction`, Amazon Machine Learning
(Amazon ML) immediately returns and sets the `BatchPrediction`
status to `PENDING`. After the `BatchPrediction` completes,
Amazon ML sets the status to `COMPLETED`.
You can poll for status updates by using the
GetBatchPrediction operation and checking the `Status`
parameter of the result. After the `COMPLETED` status appears,
the results are available in the location specified by the
`OutputUri` parameter.
:type batch_prediction_id: string
:param batch_prediction_id: A user-supplied ID that uniquely identifies
the `BatchPrediction`.
:type batch_prediction_name: string
:param batch_prediction_name: A user-supplied name or description of
the `BatchPrediction`. `BatchPredictionName` can only use the UTF-8
character set.
:type ml_model_id: string
:param ml_model_id: The ID of the `MLModel` that will generate
predictions for the group of observations.
:type batch_prediction_data_source_id: string
:param batch_prediction_data_source_id: The ID of the `DataSource` that
points to the group of observations to predict.
:type output_uri: string
:param output_uri: The location of an Amazon Simple Storage Service
(Amazon S3) bucket or directory to store the batch prediction
results. The following substrings are not allowed in the s3 key
portion of the "outputURI" field: ':', '//', '/./', '/../'.
Amazon ML needs permissions to store and retrieve the logs on your
behalf. For information about how to set permissions, see the
`Amazon Machine Learning Developer Guide`_.
"""
params = {
'BatchPredictionId': batch_prediction_id,
'MLModelId': ml_model_id,
'BatchPredictionDataSourceId': batch_prediction_data_source_id,
'OutputUri': output_uri,
}
if batch_prediction_name is not None:
params['BatchPredictionName'] = batch_prediction_name
return self.make_request(action='CreateBatchPrediction',
body=json.dumps(params))
def create_data_source_from_rds(self, data_source_id, rds_data, role_arn,
data_source_name=None,
compute_statistics=None):
"""
Creates a `DataSource` object from an ` Amazon Relational
Database Service`_ (Amazon RDS). A `DataSource` references
data that can be used to perform CreateMLModel,
CreateEvaluation, or CreateBatchPrediction operations.
`CreateDataSourceFromRDS` is an asynchronous operation. In
response to `CreateDataSourceFromRDS`, Amazon Machine Learning
(Amazon ML) immediately returns and sets the `DataSource`
status to `PENDING`. After the `DataSource` is created and
ready for use, Amazon ML sets the `Status` parameter to
`COMPLETED`. `DataSource` in `COMPLETED` or `PENDING` status
can only be used to perform CreateMLModel, CreateEvaluation,
or CreateBatchPrediction operations.
If Amazon ML cannot accept the input source, it sets the
`Status` parameter to `FAILED` and includes an error message
in the `Message` attribute of the GetDataSource operation
response.
:type data_source_id: string
:param data_source_id: A user-supplied ID that uniquely identifies the
`DataSource`. Typically, an Amazon Resource Number (ARN) becomes
the ID for a `DataSource`.
:type data_source_name: string
:param data_source_name: A user-supplied name or description of the
`DataSource`.
:type rds_data: dict
:param rds_data:
The data specification of an Amazon RDS `DataSource`:
+ DatabaseInformation -
+ `DatabaseName ` - Name of the Amazon RDS database.
+ ` InstanceIdentifier ` - Unique identifier for the Amazon RDS
database instance.
+ DatabaseCredentials - AWS Identity and Access Management (IAM)
credentials that are used to connect to the Amazon RDS database.
+ ResourceRole - Role (DataPipelineDefaultResourceRole) assumed by an
Amazon Elastic Compute Cloud (EC2) instance to carry out the copy
task from Amazon RDS to Amazon S3. For more information, see `Role
templates`_ for data pipelines.
+ ServiceRole - Role (DataPipelineDefaultRole) assumed by the AWS Data
Pipeline service to monitor the progress of the copy task from
Amazon RDS to Amazon Simple Storage Service (S3). For more
information, see `Role templates`_ for data pipelines.
+ SecurityInfo - Security information to use to access an Amazon RDS
instance. You need to set up appropriate ingress rules for the
security entity IDs provided to allow access to the Amazon RDS
instance. Specify a [ `SubnetId`, `SecurityGroupIds`] pair for a
VPC-based Amazon RDS instance.
+ SelectSqlQuery - Query that is used to retrieve the observation data
for the `Datasource`.
+ S3StagingLocation - Amazon S3 location for staging RDS data. The data
retrieved from Amazon RDS using `SelectSqlQuery` is stored in this
location.
+ DataSchemaUri - Amazon S3 location of the `DataSchema`.
+ DataSchema - A JSON string representing the schema. This is not
required if `DataSchemaUri` is specified.
+ DataRearrangement - A JSON string representing the splitting
requirement of a `Datasource`. Sample - ` "{\"randomSeed\":\"some-
random-seed\",
\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"`
:type role_arn: string
:param role_arn: The role that Amazon ML assumes on behalf of the user
to create and activate a data pipeline in the users account and
copy data (using the `SelectSqlQuery`) query from Amazon RDS to
Amazon S3.
:type compute_statistics: boolean
:param compute_statistics: The compute statistics for a `DataSource`.
The statistics are generated from the observation data referenced
by a `DataSource`. Amazon ML uses the statistics internally during
an `MLModel` training. This parameter must be set to `True` if the
``DataSource `` needs to be used for `MLModel` training.
"""
params = {
'DataSourceId': data_source_id,
'RDSData': rds_data,
'RoleARN': role_arn,
}
if data_source_name is not None:
params['DataSourceName'] = data_source_name
if compute_statistics is not None:
params['ComputeStatistics'] = compute_statistics
return self.make_request(action='CreateDataSourceFromRDS',
body=json.dumps(params))
def create_data_source_from_redshift(self, data_source_id, data_spec,
role_arn, data_source_name=None,
compute_statistics=None):
"""
Creates a `DataSource` from `Amazon Redshift`_. A `DataSource`
references data that can be used to perform either
CreateMLModel, CreateEvaluation or CreateBatchPrediction
operations.
`CreateDataSourceFromRedshift` is an asynchronous operation.
In response to `CreateDataSourceFromRedshift`, Amazon Machine
Learning (Amazon ML) immediately returns and sets the
`DataSource` status to `PENDING`. After the `DataSource` is
created and ready for use, Amazon ML sets the `Status`
parameter to `COMPLETED`. `DataSource` in `COMPLETED` or
`PENDING` status can only be used to perform CreateMLModel,
CreateEvaluation, or CreateBatchPrediction operations.
If Amazon ML cannot accept the input source, it sets the
`Status` parameter to `FAILED` and includes an error message
in the `Message` attribute of the GetDataSource operation
response.
The observations should exist in the database hosted on an
Amazon Redshift cluster and should be specified by a
`SelectSqlQuery`. Amazon ML executes ` Unload`_ command in
Amazon Redshift to transfer the result set of `SelectSqlQuery`
to `S3StagingLocation.`
After the `DataSource` is created, it's ready for use in
evaluations and batch predictions. If you plan to use the
`DataSource` to train an `MLModel`, the `DataSource` requires
another item -- a recipe. A recipe describes the observation
variables that participate in training an `MLModel`. A recipe
describes how each input variable will be used in training.
Will the variable be included or excluded from training? Will
the variable be manipulated, for example, combined with
another variable or split apart into word combinations? The
recipe provides answers to these questions. For more
information, see the Amazon Machine Learning Developer Guide.
:type data_source_id: string
:param data_source_id: A user-supplied ID that uniquely identifies the
`DataSource`.
:type data_source_name: string
:param data_source_name: A user-supplied name or description of the
`DataSource`.
:type data_spec: dict
:param data_spec:
The data specification of an Amazon Redshift `DataSource`:
+ DatabaseInformation -
+ `DatabaseName ` - Name of the Amazon Redshift database.
+ ` ClusterIdentifier ` - Unique ID for the Amazon Redshift cluster.
+ DatabaseCredentials - AWS Identity abd Access Management (IAM)
credentials that are used to connect to the Amazon Redshift
database.
+ SelectSqlQuery - Query that is used to retrieve the observation data
for the `Datasource`.
+ S3StagingLocation - Amazon Simple Storage Service (Amazon S3)
location for staging Amazon Redshift data. The data retrieved from
Amazon Relational Database Service (Amazon RDS) using
`SelectSqlQuery` is stored in this location.
+ DataSchemaUri - Amazon S3 location of the `DataSchema`.
+ DataSchema - A JSON string representing the schema. This is not
required if `DataSchemaUri` is specified.
+ DataRearrangement - A JSON string representing the splitting
requirement of a `Datasource`. Sample - ` "{\"randomSeed\":\"some-
random-seed\",
\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"`
:type role_arn: string
:param role_arn: A fully specified role Amazon Resource Name (ARN).
Amazon ML assumes the role on behalf of the user to create the
following:
+ A security group to allow Amazon ML to execute the `SelectSqlQuery`
query on an Amazon Redshift cluster
+ An Amazon S3 bucket policy to grant Amazon ML read/write permissions
on the `S3StagingLocation`
:type compute_statistics: boolean
:param compute_statistics: The compute statistics for a `DataSource`.
The statistics are generated from the observation data referenced
by a `DataSource`. Amazon ML uses the statistics internally during
`MLModel` training. This parameter must be set to `True` if the
``DataSource `` needs to be used for `MLModel` training
"""
params = {
'DataSourceId': data_source_id,
'DataSpec': data_spec,
'RoleARN': role_arn,
}
if data_source_name is not None:
params['DataSourceName'] = data_source_name
if compute_statistics is not None:
params['ComputeStatistics'] = compute_statistics
return self.make_request(action='CreateDataSourceFromRedshift',
body=json.dumps(params))
def create_data_source_from_s3(self, data_source_id, data_spec,
data_source_name=None,
compute_statistics=None):
"""
Creates a `DataSource` object. A `DataSource` references data
that can be used to perform CreateMLModel, CreateEvaluation,
or CreateBatchPrediction operations.
`CreateDataSourceFromS3` is an asynchronous operation. In
response to `CreateDataSourceFromS3`, Amazon Machine Learning
(Amazon ML) immediately returns and sets the `DataSource`
status to `PENDING`. After the `DataSource` is created and
ready for use, Amazon ML sets the `Status` parameter to
`COMPLETED`. `DataSource` in `COMPLETED` or `PENDING` status
can only be used to perform CreateMLModel, CreateEvaluation or
CreateBatchPrediction operations.
If Amazon ML cannot accept the input source, it sets the
`Status` parameter to `FAILED` and includes an error message
in the `Message` attribute of the GetDataSource operation
response.
The observation data used in a `DataSource` should be ready to
use; that is, it should have a consistent structure, and
missing data values should be kept to a minimum. The
observation data must reside in one or more CSV files in an
Amazon Simple Storage Service (Amazon S3) bucket, along with a
schema that describes the data items by name and type. The
same schema must be used for all of the data files referenced
by the `DataSource`.
After the `DataSource` has been created, it's ready to use in
evaluations and batch predictions. If you plan to use the
`DataSource` to train an `MLModel`, the `DataSource` requires
another item: a recipe. A recipe describes the observation
variables that participate in training an `MLModel`. A recipe
describes how each input variable will be used in training.
Will the variable be included or excluded from training? Will
the variable be manipulated, for example, combined with
another variable, or split apart into word combinations? The
recipe provides answers to these questions. For more
information, see the `Amazon Machine Learning Developer
Guide`_.
:type data_source_id: string
:param data_source_id: A user-supplied identifier that uniquely
identifies the `DataSource`.
:type data_source_name: string
:param data_source_name: A user-supplied name or description of the
`DataSource`.
:type data_spec: dict
:param data_spec:
The data specification of a `DataSource`:
+ DataLocationS3 - Amazon Simple Storage Service (Amazon S3) location
of the observation data.
+ DataSchemaLocationS3 - Amazon S3 location of the `DataSchema`.
+ DataSchema - A JSON string representing the schema. This is not
required if `DataSchemaUri` is specified.
+ DataRearrangement - A JSON string representing the splitting
requirement of a `Datasource`. Sample - ` "{\"randomSeed\":\"some-
random-seed\",
\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"`
:type compute_statistics: boolean
:param compute_statistics: The compute statistics for a `DataSource`.
The statistics are generated from the observation data referenced
by a `DataSource`. Amazon ML uses the statistics internally during
an `MLModel` training. This parameter must be set to `True` if the
``DataSource `` needs to be used for `MLModel` training
"""
params = {
'DataSourceId': data_source_id,
'DataSpec': data_spec,
}
if data_source_name is not None:
params['DataSourceName'] = data_source_name
if compute_statistics is not None:
params['ComputeStatistics'] = compute_statistics
return self.make_request(action='CreateDataSourceFromS3',
body=json.dumps(params))
def create_evaluation(self, evaluation_id, ml_model_id,
evaluation_data_source_id, evaluation_name=None):
"""
Creates a new `Evaluation` of an `MLModel`. An `MLModel` is
evaluated on a set of observations associated to a
`DataSource`. Like a `DataSource` for an `MLModel`, the
`DataSource` for an `Evaluation` contains values for the
Target Variable. The `Evaluation` compares the predicted
result for each observation to the actual outcome and provides
a summary so that you know how effective the `MLModel`
functions on the test data. Evaluation generates a relevant
performance metric such as BinaryAUC, RegressionRMSE or
MulticlassAvgFScore based on the corresponding `MLModelType`:
`BINARY`, `REGRESSION` or `MULTICLASS`.
`CreateEvaluation` is an asynchronous operation. In response
to `CreateEvaluation`, Amazon Machine Learning (Amazon ML)
immediately returns and sets the evaluation status to
`PENDING`. After the `Evaluation` is created and ready for
use, Amazon ML sets the status to `COMPLETED`.
You can use the GetEvaluation operation to check progress of
the evaluation during the creation operation.
:type evaluation_id: string
:param evaluation_id: A user-supplied ID that uniquely identifies the
`Evaluation`.
:type evaluation_name: string
:param evaluation_name: A user-supplied name or description of the
`Evaluation`.
:type ml_model_id: string
:param ml_model_id: The ID of the `MLModel` to evaluate.
The schema used in creating the `MLModel` must match the schema of the
`DataSource` used in the `Evaluation`.
:type evaluation_data_source_id: string
:param evaluation_data_source_id: The ID of the `DataSource` for the
evaluation. The schema of the `DataSource` must match the schema
used to create the `MLModel`.
"""
params = {
'EvaluationId': evaluation_id,
'MLModelId': ml_model_id,
'EvaluationDataSourceId': evaluation_data_source_id,
}
if evaluation_name is not None:
params['EvaluationName'] = evaluation_name
return self.make_request(action='CreateEvaluation',
body=json.dumps(params))
def create_ml_model(self, ml_model_id, ml_model_type,
training_data_source_id, ml_model_name=None,
parameters=None, recipe=None, recipe_uri=None):
"""
Creates a new `MLModel` using the data files and the recipe as
information sources.
An `MLModel` is nearly immutable. Users can only update the
`MLModelName` and the `ScoreThreshold` in an `MLModel` without
creating a new `MLModel`.
`CreateMLModel` is an asynchronous operation. In response to
`CreateMLModel`, Amazon Machine Learning (Amazon ML)
immediately returns and sets the `MLModel` status to
`PENDING`. After the `MLModel` is created and ready for use,
Amazon ML sets the status to `COMPLETED`.
You can use the GetMLModel operation to check progress of the
`MLModel` during the creation operation.
CreateMLModel requires a `DataSource` with computed
statistics, which can be created by setting
`ComputeStatistics` to `True` in CreateDataSourceFromRDS,
CreateDataSourceFromS3, or CreateDataSourceFromRedshift
operations.
:type ml_model_id: string
:param ml_model_id: A user-supplied ID that uniquely identifies the
`MLModel`.
:type ml_model_name: string
:param ml_model_name: A user-supplied name or description of the
`MLModel`.
:type ml_model_type: string
:param ml_model_type: The category of supervised learning that this
`MLModel` will address. Choose from the following types:
+ Choose `REGRESSION` if the `MLModel` will be used to predict a
numeric value.
+ Choose `BINARY` if the `MLModel` result has two possible values.
+ Choose `MULTICLASS` if the `MLModel` result has a limited number of
values.
For more information, see the `Amazon Machine Learning Developer
Guide`_.
:type parameters: map
:param parameters:
A list of the training parameters in the `MLModel`. The list is
implemented as a map of key/value pairs.
The following is the current set of training parameters:
+ `sgd.l1RegularizationAmount` - Coefficient regularization L1 norm. It
controls overfitting the data by penalizing large coefficients.
This tends to drive coefficients to zero, resulting in sparse
feature set. If you use this parameter, start by specifying a small
value such as 1.0E-08. The value is a double that ranges from 0 to
MAX_DOUBLE. The default is not to use L1 normalization. The
parameter cannot be used when `L2` is specified. Use this parameter
sparingly.
+ `sgd.l2RegularizationAmount` - Coefficient regularization L2 norm. It
controls overfitting the data by penalizing large coefficients.
This tends to drive coefficients to small, nonzero values. If you
use this parameter, start by specifying a small value such as
1.0E-08. The valuseis a double that ranges from 0 to MAX_DOUBLE.
The default is not to use L2 normalization. This cannot be used
when `L1` is specified. Use this parameter sparingly.
+ `sgd.maxPasses` - Number of times that the training process traverses
the observations to build the `MLModel`. The value is an integer
that ranges from 1 to 10000. The default value is 10.
+ `sgd.maxMLModelSizeInBytes` - Maximum allowed size of the model.
Depending on the input data, the size of the model might affect its
performance. The value is an integer that ranges from 100000 to
2147483648. The default value is 33554432.
:type training_data_source_id: string
:param training_data_source_id: The `DataSource` that points to the
training data.
:type recipe: string
:param recipe: The data recipe for creating `MLModel`. You must specify
either the recipe or its URI. If you dont specify a recipe or its
URI, Amazon ML creates a default.
:type recipe_uri: string
:param recipe_uri: The Amazon Simple Storage Service (Amazon S3)
location and file name that contains the `MLModel` recipe. You must
specify either the recipe or its URI. If you dont specify a recipe
or its URI, Amazon ML creates a default.
"""
params = {
'MLModelId': ml_model_id,
'MLModelType': ml_model_type,
'TrainingDataSourceId': training_data_source_id,
}
if ml_model_name is not None:
params['MLModelName'] = ml_model_name
if parameters is not None:
params['Parameters'] = parameters
if recipe is not None:
params['Recipe'] = recipe
if recipe_uri is not None:
params['RecipeUri'] = recipe_uri
return self.make_request(action='CreateMLModel',
body=json.dumps(params))
def create_realtime_endpoint(self, ml_model_id):
"""
Creates a real-time endpoint for the `MLModel`. The endpoint
contains the URI of the `MLModel`; that is, the location to
send real-time prediction requests for the specified
`MLModel`.
:type ml_model_id: string
:param ml_model_id: The ID assigned to the `MLModel` during creation.
"""
params = {'MLModelId': ml_model_id, }
return self.make_request(action='CreateRealtimeEndpoint',
body=json.dumps(params))
def delete_batch_prediction(self, batch_prediction_id):
"""
Assigns the DELETED status to a `BatchPrediction`, rendering
it unusable.
After using the `DeleteBatchPrediction` operation, you can use
the GetBatchPrediction operation to verify that the status of
the `BatchPrediction` changed to DELETED.
The result of the `DeleteBatchPrediction` operation is
irreversible.
:type batch_prediction_id: string
:param batch_prediction_id: A user-supplied ID that uniquely identifies
the `BatchPrediction`.
"""
params = {'BatchPredictionId': batch_prediction_id, }
return self.make_request(action='DeleteBatchPrediction',
body=json.dumps(params))
def delete_data_source(self, data_source_id):
"""
Assigns the DELETED status to a `DataSource`, rendering it
unusable.
After using the `DeleteDataSource` operation, you can use the
GetDataSource operation to verify that the status of the
`DataSource` changed to DELETED.
The results of the `DeleteDataSource` operation are
irreversible.
:type data_source_id: string
:param data_source_id: A user-supplied ID that uniquely identifies the
`DataSource`.
"""
params = {'DataSourceId': data_source_id, }
return self.make_request(action='DeleteDataSource',
body=json.dumps(params))
def delete_evaluation(self, evaluation_id):
"""
Assigns the `DELETED` status to an `Evaluation`, rendering it
unusable.
After invoking the `DeleteEvaluation` operation, you can use
the GetEvaluation operation to verify that the status of the
`Evaluation` changed to `DELETED`.
The results of the `DeleteEvaluation` operation are
irreversible.
:type evaluation_id: string
:param evaluation_id: A user-supplied ID that uniquely identifies the
`Evaluation` to delete.
"""
params = {'EvaluationId': evaluation_id, }
return self.make_request(action='DeleteEvaluation',
body=json.dumps(params))
def delete_ml_model(self, ml_model_id):
"""
Assigns the DELETED status to an `MLModel`, rendering it
unusable.
After using the `DeleteMLModel` operation, you can use the
GetMLModel operation to verify that the status of the
`MLModel` changed to DELETED.
The result of the `DeleteMLModel` operation is irreversible.
:type ml_model_id: string
:param ml_model_id: A user-supplied ID that uniquely identifies the
`MLModel`.
"""
params = {'MLModelId': ml_model_id, }
return self.make_request(action='DeleteMLModel',
body=json.dumps(params))
def delete_realtime_endpoint(self, ml_model_id):
"""
Deletes a real time endpoint of an `MLModel`.
:type ml_model_id: string
:param ml_model_id: The ID assigned to the `MLModel` during creation.
"""
params = {'MLModelId': ml_model_id, }
return self.make_request(action='DeleteRealtimeEndpoint',
body=json.dumps(params))
def describe_batch_predictions(self, filter_variable=None, eq=None,
gt=None, lt=None, ge=None, le=None,
ne=None, prefix=None, sort_order=None,
next_token=None, limit=None):
"""
Returns a list of `BatchPrediction` operations that match the
search criteria in the request.
:type filter_variable: string
:param filter_variable:
Use one of the following variables to filter a list of
`BatchPrediction`:
+ `CreatedAt` - Sets the search criteria to the `BatchPrediction`
creation date.
+ `Status` - Sets the search criteria to the `BatchPrediction` status.
+ `Name` - Sets the search criteria to the contents of the
`BatchPrediction` ** ** `Name`.
+ `IAMUser` - Sets the search criteria to the user account that invoked
the `BatchPrediction` creation.
+ `MLModelId` - Sets the search criteria to the `MLModel` used in the
`BatchPrediction`.
+ `DataSourceId` - Sets the search criteria to the `DataSource` used in
the `BatchPrediction`.
+ `DataURI` - Sets the search criteria to the data file(s) used in the
`BatchPrediction`. The URL can identify either a file or an Amazon
Simple Storage Solution (Amazon S3) bucket or directory.
:type eq: string
:param eq: The equal to operator. The `BatchPrediction` results will
have `FilterVariable` values that exactly match the value specified
with `EQ`.
:type gt: string
:param gt: The greater than operator. The `BatchPrediction` results
will have `FilterVariable` values that are greater than the value
specified with `GT`.
:type lt: string
:param lt: The less than operator. The `BatchPrediction` results will
have `FilterVariable` values that are less than the value specified
with `LT`.
:type ge: string
:param ge: The greater than or equal to operator. The `BatchPrediction`
results will have `FilterVariable` values that are greater than or
equal to the value specified with `GE`.
:type le: string
:param le: The less than or equal to operator. The `BatchPrediction`
results will have `FilterVariable` values that are less than or
equal to the value specified with `LE`.
:type ne: string
:param ne: The not equal to operator. The `BatchPrediction` results
will have `FilterVariable` values not equal to the value specified
with `NE`.
:type prefix: string
:param prefix:
A string that is found at the beginning of a variable, such as `Name`
or `Id`.
For example, a `Batch Prediction` operation could have the `Name`
`2014-09-09-HolidayGiftMailer`. To search for this
`BatchPrediction`, select `Name` for the `FilterVariable` and any
of the following strings for the `Prefix`:
+ 2014-09
+ 2014-09-09
+ 2014-09-09-Holiday
:type sort_order: string
:param sort_order: A two-value parameter that determines the sequence
of the resulting list of `MLModel`s.
+ `asc` - Arranges the list in ascending order (A-Z, 0-9).
+ `dsc` - Arranges the list in descending order (Z-A, 9-0).
Results are sorted by `FilterVariable`.
:type next_token: string
:param next_token: An ID of the page in the paginated results.
:type limit: integer
:param limit: The number of pages of information to include in the
result. The range of acceptable values is 1 through 100. The
default value is 100.
"""
params = {}
if filter_variable is not None:
params['FilterVariable'] = filter_variable
if eq is not None:
params['EQ'] = eq
if gt is not None:
params['GT'] = gt
if lt is not None:
params['LT'] = lt
if ge is not None:
params['GE'] = ge
if le is not None:
params['LE'] = le
if ne is not None:
params['NE'] = ne
if prefix is not None:
params['Prefix'] = prefix
if sort_order is not None:
params['SortOrder'] = sort_order
if next_token is not None:
params['NextToken'] = next_token
if limit is not None:
params['Limit'] = limit
return self.make_request(action='DescribeBatchPredictions',
body=json.dumps(params))
def describe_data_sources(self, filter_variable=None, eq=None, gt=None,
lt=None, ge=None, le=None, ne=None,
prefix=None, sort_order=None, next_token=None,
limit=None):
"""
Returns a list of `DataSource` that match the search criteria
in the request.
:type filter_variable: string
:param filter_variable:
Use one of the following variables to filter a list of `DataSource`:
+ `CreatedAt` - Sets the search criteria to `DataSource` creation
dates.
+ `Status` - Sets the search criteria to `DataSource` statuses.
+ `Name` - Sets the search criteria to the contents of `DataSource` **
** `Name`.
+ `DataUri` - Sets the search criteria to the URI of data files used to
create the `DataSource`. The URI can identify either a file or an
Amazon Simple Storage Service (Amazon S3) bucket or directory.
+ `IAMUser` - Sets the search criteria to the user account that invoked
the `DataSource` creation.
:type eq: string
:param eq: The equal to operator. The `DataSource` results will have
`FilterVariable` values that exactly match the value specified with
`EQ`.
:type gt: string
:param gt: The greater than operator. The `DataSource` results will
have `FilterVariable` values that are greater than the value
specified with `GT`.
:type lt: string
:param lt: The less than operator. The `DataSource` results will have
`FilterVariable` values that are less than the value specified with
`LT`.
:type ge: string
:param ge: The greater than or equal to operator. The `DataSource`
results will have `FilterVariable` values that are greater than or
equal to the value specified with `GE`.
:type le: string
:param le: The less than or equal to operator. The `DataSource` results
will have `FilterVariable` values that are less than or equal to
the value specified with `LE`.
:type ne: string
:param ne: The not equal to operator. The `DataSource` results will
have `FilterVariable` values not equal to the value specified with
`NE`.
:type prefix: string
:param prefix:
A string that is found at the beginning of a variable, such as `Name`
or `Id`.
For example, a `DataSource` could have the `Name`
`2014-09-09-HolidayGiftMailer`. To search for this `DataSource`,
select `Name` for the `FilterVariable` and any of the following
strings for the `Prefix`:
+ 2014-09
+ 2014-09-09
+ 2014-09-09-Holiday
:type sort_order: string
:param sort_order: A two-value parameter that determines the sequence
of the resulting list of `DataSource`.
+ `asc` - Arranges the list in ascending order (A-Z, 0-9).
+ `dsc` - Arranges the list in descending order (Z-A, 9-0).
Results are sorted by `FilterVariable`.
:type next_token: string
:param next_token: The ID of the page in the paginated results.
:type limit: integer
:param limit: The maximum number of `DataSource` to include in the
result.
"""
params = {}
if filter_variable is not None:
params['FilterVariable'] = filter_variable
if eq is not None:
params['EQ'] = eq
if gt is not None:
params['GT'] = gt
if lt is not None:
params['LT'] = lt
if ge is not None:
params['GE'] = ge
if le is not None:
params['LE'] = le
if ne is not None:
params['NE'] = ne
if prefix is not None:
params['Prefix'] = prefix
if sort_order is not None:
params['SortOrder'] = sort_order
if next_token is not None:
params['NextToken'] = next_token
if limit is not None:
params['Limit'] = limit
return self.make_request(action='DescribeDataSources',
body=json.dumps(params))
def describe_evaluations(self, filter_variable=None, eq=None, gt=None,
lt=None, ge=None, le=None, ne=None, prefix=None,
sort_order=None, next_token=None, limit=None):
"""
Returns a list of `DescribeEvaluations` that match the search
criteria in the request.
:type filter_variable: string
:param filter_variable:
Use one of the following variable to filter a list of `Evaluation`
objects:
+ `CreatedAt` - Sets the search criteria to the `Evaluation` creation
date.
+ `Status` - Sets the search criteria to the `Evaluation` status.
+ `Name` - Sets the search criteria to the contents of `Evaluation` **
** `Name`.
+ `IAMUser` - Sets the search criteria to the user account that invoked
an `Evaluation`.
+ `MLModelId` - Sets the search criteria to the `MLModel` that was
evaluated.
+ `DataSourceId` - Sets the search criteria to the `DataSource` used in
`Evaluation`.
+ `DataUri` - Sets the search criteria to the data file(s) used in
`Evaluation`. The URL can identify either a file or an Amazon
Simple Storage Solution (Amazon S3) bucket or directory.
:type eq: string
:param eq: The equal to operator. The `Evaluation` results will have
`FilterVariable` values that exactly match the value specified with
`EQ`.
:type gt: string
:param gt: The greater than operator. The `Evaluation` results will
have `FilterVariable` values that are greater than the value
specified with `GT`.
:type lt: string
:param lt: The less than operator. The `Evaluation` results will have
`FilterVariable` values that are less than the value specified with
`LT`.
:type ge: string
:param ge: The greater than or equal to operator. The `Evaluation`
results will have `FilterVariable` values that are greater than or
equal to the value specified with `GE`.
:type le: string
:param le: The less than or equal to operator. The `Evaluation` results
will have `FilterVariable` values that are less than or equal to
the value specified with `LE`.
:type ne: string
:param ne: The not equal to operator. The `Evaluation` results will
have `FilterVariable` values not equal to the value specified with
`NE`.
:type prefix: string
:param prefix:
A string that is found at the beginning of a variable, such as `Name`
or `Id`.
For example, an `Evaluation` could have the `Name`
`2014-09-09-HolidayGiftMailer`. To search for this `Evaluation`,
select `Name` for the `FilterVariable` and any of the following
strings for the `Prefix`:
+ 2014-09
+ 2014-09-09
+ 2014-09-09-Holiday
:type sort_order: string
:param sort_order: A two-value parameter that determines the sequence
of the resulting list of `Evaluation`.
+ `asc` - Arranges the list in ascending order (A-Z, 0-9).
+ `dsc` - Arranges the list in descending order (Z-A, 9-0).
Results are sorted by `FilterVariable`.
:type next_token: string
:param next_token: The ID of the page in the paginated results.
:type limit: integer
:param limit: The maximum number of `Evaluation` to include in the
result.
"""
params = {}
if filter_variable is not None:
params['FilterVariable'] = filter_variable
if eq is not None:
params['EQ'] = eq
if gt is not None:
params['GT'] = gt
if lt is not None:
params['LT'] = lt
if ge is not None:
params['GE'] = ge
if le is not None:
params['LE'] = le
if ne is not None:
params['NE'] = ne
if prefix is not None:
params['Prefix'] = prefix
if sort_order is not None:
params['SortOrder'] = sort_order
if next_token is not None:
params['NextToken'] = next_token
if limit is not None:
params['Limit'] = limit
return self.make_request(action='DescribeEvaluations',
body=json.dumps(params))
def describe_ml_models(self, filter_variable=None, eq=None, gt=None,
lt=None, ge=None, le=None, ne=None, prefix=None,
sort_order=None, next_token=None, limit=None):
"""
Returns a list of `MLModel` that match the search criteria in
the request.
:type filter_variable: string
:param filter_variable:
Use one of the following variables to filter a list of `MLModel`:
+ `CreatedAt` - Sets the search criteria to `MLModel` creation date.
+ `Status` - Sets the search criteria to `MLModel` status.
+ `Name` - Sets the search criteria to the contents of `MLModel` ** **
`Name`.
+ `IAMUser` - Sets the search criteria to the user account that invoked
the `MLModel` creation.
+ `TrainingDataSourceId` - Sets the search criteria to the `DataSource`
used to train one or more `MLModel`.
+ `RealtimeEndpointStatus` - Sets the search criteria to the `MLModel`
real-time endpoint status.
+ `MLModelType` - Sets the search criteria to `MLModel` type: binary,
regression, or multi-class.
+ `Algorithm` - Sets the search criteria to the algorithm that the
`MLModel` uses.
+ `TrainingDataURI` - Sets the search criteria to the data file(s) used
in training a `MLModel`. The URL can identify either a file or an
Amazon Simple Storage Service (Amazon S3) bucket or directory.
:type eq: string
:param eq: The equal to operator. The `MLModel` results will have
`FilterVariable` values that exactly match the value specified with
`EQ`.
:type gt: string
:param gt: The greater than operator. The `MLModel` results will have
`FilterVariable` values that are greater than the value specified
with `GT`.
:type lt: string
:param lt: The less than operator. The `MLModel` results will have
`FilterVariable` values that are less than the value specified with
`LT`.
:type ge: string
:param ge: The greater than or equal to operator. The `MLModel` results
will have `FilterVariable` values that are greater than or equal to
the value specified with `GE`.
:type le: string
:param le: The less than or equal to operator. The `MLModel` results
will have `FilterVariable` values that are less than or equal to
the value specified with `LE`.
:type ne: string
:param ne: The not equal to operator. The `MLModel` results will have
`FilterVariable` values not equal to the value specified with `NE`.
:type prefix: string
:param prefix:
A string that is found at the beginning of a variable, such as `Name`
or `Id`.
For example, an `MLModel` could have the `Name`
`2014-09-09-HolidayGiftMailer`. To search for this `MLModel`,
select `Name` for the `FilterVariable` and any of the following
strings for the `Prefix`:
+ 2014-09
+ 2014-09-09
+ 2014-09-09-Holiday
:type sort_order: string
:param sort_order: A two-value parameter that determines the sequence
of the resulting list of `MLModel`.
+ `asc` - Arranges the list in ascending order (A-Z, 0-9).
+ `dsc` - Arranges the list in descending order (Z-A, 9-0).
Results are sorted by `FilterVariable`.
:type next_token: string
:param next_token: The ID of the page in the paginated results.
:type limit: integer
:param limit: The number of pages of information to include in the
result. The range of acceptable values is 1 through 100. The
default value is 100.
"""
params = {}
if filter_variable is not None:
params['FilterVariable'] = filter_variable
if eq is not None:
params['EQ'] = eq
if gt is not None:
params['GT'] = gt
if lt is not None:
params['LT'] = lt
if ge is not None:
params['GE'] = ge
if le is not None:
params['LE'] = le
if ne is not None:
params['NE'] = ne
if prefix is not None:
params['Prefix'] = prefix
if sort_order is not None:
params['SortOrder'] = sort_order
if next_token is not None:
params['NextToken'] = next_token
if limit is not None:
params['Limit'] = limit
return self.make_request(action='DescribeMLModels',
body=json.dumps(params))
def get_batch_prediction(self, batch_prediction_id):
"""
Returns a `BatchPrediction` that includes detailed metadata,
status, and data file information for a `Batch Prediction`
request.
:type batch_prediction_id: string
:param batch_prediction_id: An ID assigned to the `BatchPrediction` at
creation.
"""
params = {'BatchPredictionId': batch_prediction_id, }
return self.make_request(action='GetBatchPrediction',
body=json.dumps(params))
def get_data_source(self, data_source_id, verbose=None):
"""
Returns a `DataSource` that includes metadata and data file
information, as well as the current status of the
`DataSource`.
`GetDataSource` provides results in normal or verbose format.
The verbose format adds the schema description and the list of
files pointed to by the DataSource to the normal format.
:type data_source_id: string
:param data_source_id: The ID assigned to the `DataSource` at creation.
:type verbose: boolean
:param verbose: Specifies whether the `GetDataSource` operation should
return `DataSourceSchema`.
If true, `DataSourceSchema` is returned.
If false, `DataSourceSchema` is not returned.
"""
params = {'DataSourceId': data_source_id, }
if verbose is not None:
params['Verbose'] = verbose
return self.make_request(action='GetDataSource',
body=json.dumps(params))
def get_evaluation(self, evaluation_id):
"""
Returns an `Evaluation` that includes metadata as well as the
current status of the `Evaluation`.
:type evaluation_id: string
:param evaluation_id: The ID of the `Evaluation` to retrieve. The
evaluation of each `MLModel` is recorded and cataloged. The ID
provides the means to access the information.
"""
params = {'EvaluationId': evaluation_id, }
return self.make_request(action='GetEvaluation',
body=json.dumps(params))
def get_ml_model(self, ml_model_id, verbose=None):
"""
Returns an `MLModel` that includes detailed metadata, and data
source information as well as the current status of the
`MLModel`.
`GetMLModel` provides results in normal or verbose format.
:type ml_model_id: string
:param ml_model_id: The ID assigned to the `MLModel` at creation.
:type verbose: boolean
:param verbose: Specifies whether the `GetMLModel` operation should
return `Recipe`.
If true, `Recipe` is returned.
If false, `Recipe` is not returned.
"""
params = {'MLModelId': ml_model_id, }
if verbose is not None:
params['Verbose'] = verbose
return self.make_request(action='GetMLModel',
body=json.dumps(params))
def predict(self, ml_model_id, record, predict_endpoint):
"""
Generates a prediction for the observation using the specified
`MLModel`.
Not all response parameters will be populated because this is
dependent on the type of requested model.
:type ml_model_id: string
:param ml_model_id: A unique identifier of the `MLModel`.
:type record: map
:param record: A map of variable name-value pairs that represent an
observation.
:type predict_endpoint: string
:param predict_endpoint: The endpoint to send the predict request to.
"""
predict_host = urlsplit(predict_endpoint).hostname
if predict_host is None:
predict_host = predict_endpoint
params = {
'MLModelId': ml_model_id,
'Record': record,
'PredictEndpoint': predict_host,
}
return self.make_request(action='Predict',
body=json.dumps(params),
host=predict_host)
def update_batch_prediction(self, batch_prediction_id,
batch_prediction_name):
"""
Updates the `BatchPredictionName` of a `BatchPrediction`.
You can use the GetBatchPrediction operation to view the
contents of the updated data element.
:type batch_prediction_id: string
:param batch_prediction_id: The ID assigned to the `BatchPrediction`
during creation.
:type batch_prediction_name: string
:param batch_prediction_name: A new user-supplied name or description
of the `BatchPrediction`.
"""
params = {
'BatchPredictionId': batch_prediction_id,
'BatchPredictionName': batch_prediction_name,
}
return self.make_request(action='UpdateBatchPrediction',
body=json.dumps(params))
def update_data_source(self, data_source_id, data_source_name):
"""
Updates the `DataSourceName` of a `DataSource`.
You can use the GetDataSource operation to view the contents
of the updated data element.
:type data_source_id: string
:param data_source_id: The ID assigned to the `DataSource` during
creation.
:type data_source_name: string
:param data_source_name: A new user-supplied name or description of the
`DataSource` that will replace the current description.
"""
params = {
'DataSourceId': data_source_id,
'DataSourceName': data_source_name,
}
return self.make_request(action='UpdateDataSource',
body=json.dumps(params))
def update_evaluation(self, evaluation_id, evaluation_name):
"""
Updates the `EvaluationName` of an `Evaluation`.
You can use the GetEvaluation operation to view the contents
of the updated data element.
:type evaluation_id: string
:param evaluation_id: The ID assigned to the `Evaluation` during
creation.
:type evaluation_name: string
:param evaluation_name: A new user-supplied name or description of the
`Evaluation` that will replace the current content.
"""
params = {
'EvaluationId': evaluation_id,
'EvaluationName': evaluation_name,
}
return self.make_request(action='UpdateEvaluation',
body=json.dumps(params))
def update_ml_model(self, ml_model_id, ml_model_name=None,
score_threshold=None):
"""
Updates the `MLModelName` and the `ScoreThreshold` of an
`MLModel`.
You can use the GetMLModel operation to view the contents of
the updated data element.
:type ml_model_id: string
:param ml_model_id: The ID assigned to the `MLModel` during creation.
:type ml_model_name: string
:param ml_model_name: A user-supplied name or description of the
`MLModel`.
:type score_threshold: float
:param score_threshold: The `ScoreThreshold` used in binary
classification `MLModel` that marks the boundary between a positive
prediction and a negative prediction.
Output values greater than or equal to the `ScoreThreshold` receive a
positive result from the `MLModel`, such as `True`. Output values
less than the `ScoreThreshold` receive a negative response from the
`MLModel`, such as `False`.
"""
params = {'MLModelId': ml_model_id, }
if ml_model_name is not None:
params['MLModelName'] = ml_model_name
if score_threshold is not None:
params['ScoreThreshold'] = score_threshold
return self.make_request(action='UpdateMLModel',
body=json.dumps(params))
def make_request(self, action, body, host=None):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request_kwargs = {
'method':'POST', 'path':'/', 'auth_path':'/', 'params':{},
'headers': headers, 'data':body
}
if host is not None:
headers['Host'] = host
http_request_kwargs['host'] = host
http_request = self.build_base_http_request(**http_request_kwargs)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/machinelearning/layer1.py
| 0.723212 | 0.325547 |
layer1.py
|
|
from boto.compat import json
from boto.exception import JSONResponseError
from boto.connection import AWSAuthConnection
from boto.regioninfo import RegionInfo
from boto.cloudsearchdomain import exceptions
class CloudSearchDomainConnection(AWSAuthConnection):
"""
You use the AmazonCloudSearch2013 API to upload documents to a
search domain and search those documents.
The endpoints for submitting `UploadDocuments`, `Search`, and
`Suggest` requests are domain-specific. To get the endpoints for
your domain, use the Amazon CloudSearch configuration service
`DescribeDomains` action. The domain endpoints are also displayed
on the domain dashboard in the Amazon CloudSearch console. You
submit suggest requests to the search endpoint.
For more information, see the `Amazon CloudSearch Developer
Guide`_.
"""
APIVersion = "2013-01-01"
AuthServiceName = 'cloudsearch'
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "cloudsearch.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"SearchException": exceptions.SearchException,
"DocumentServiceException": exceptions.DocumentServiceException,
}
def __init__(self, **kwargs):
region = kwargs.get('region')
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
else:
del kwargs['region']
if kwargs.get('host', None) is None:
raise ValueError(
'The argument, host, must be provided when creating a '
'CloudSearchDomainConnection because its methods require the '
'specific domain\'s endpoint in order to successfully make '
'requests to that CloudSearch Domain.'
)
super(CloudSearchDomainConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def search(self, query, cursor=None, expr=None, facet=None,
filter_query=None, highlight=None, partial=None,
query_options=None, query_parser=None, ret=None, size=None,
sort=None, start=None):
"""
Retrieves a list of documents that match the specified search
criteria. How you specify the search criteria depends on which
query parser you use. Amazon CloudSearch supports four query
parsers:
+ `simple`: search all `text` and `text-array` fields for the
specified string. Search for phrases, individual terms, and
prefixes.
+ `structured`: search specific fields, construct compound
queries using Boolean operators, and use advanced features
such as term boosting and proximity searching.
+ `lucene`: specify search criteria using the Apache Lucene
query parser syntax.
+ `dismax`: specify search criteria using the simplified
subset of the Apache Lucene query parser syntax defined by the
DisMax query parser.
For more information, see `Searching Your Data`_ in the Amazon
CloudSearch Developer Guide .
The endpoint for submitting `Search` requests is domain-
specific. You submit search requests to a domain's search
endpoint. To get the search endpoint for your domain, use the
Amazon CloudSearch configuration service `DescribeDomains`
action. A domain's endpoints are also displayed on the domain
dashboard in the Amazon CloudSearch console.
:type cursor: string
:param cursor: Retrieves a cursor value you can use to page through
large result sets. Use the `size` parameter to control the number
of hits to include in each response. You can specify either the
`cursor` or `start` parameter in a request; they are mutually
exclusive. To get the first cursor, set the cursor value to
`initial`. In subsequent requests, specify the cursor value
returned in the hits section of the response.
For more information, see `Paginating Results`_ in the Amazon
CloudSearch Developer Guide .
:type expr: string
:param expr: Defines one or more numeric expressions that can be used
to sort results or specify search or filter criteria. You can also
specify expressions as return fields.
For more information about defining and using expressions, see
`Configuring Expressions`_ in the Amazon CloudSearch Developer
Guide .
:type facet: string
:param facet: Specifies one or more fields for which to get facet
information, and options that control how the facet information is
returned. Each specified field must be facet-enabled in the domain
configuration. The fields and options are specified in JSON using
the form `{"FIELD":{"OPTION":VALUE,"OPTION:"STRING"},"FIELD":{"OPTI
ON":VALUE,"OPTION":"STRING"}}`.
You can specify the following faceting options:
+ `buckets` specifies an array of the facet values or ranges to count.
Ranges are specified using the same syntax that you use to search
for a range of values. For more information, see ` Searching for a
Range of Values`_ in the Amazon CloudSearch Developer Guide .
Buckets are returned in the order they are specified in the
request. The `sort` and `size` options are not valid if you specify
`buckets`.
+ `size` specifies the maximum number of facets to include in the
results. By default, Amazon CloudSearch returns counts for the top
10. The `size` parameter is only valid when you specify the `sort`
option; it cannot be used in conjunction with `buckets`.
+ `sort` specifies how you want to sort the facets in the results:
`bucket` or `count`. Specify `bucket` to sort alphabetically or
numerically by facet value (in ascending order). Specify `count` to
sort by the facet counts computed for each facet value (in
descending order). To retrieve facet counts for particular values
or ranges of values, use the `buckets` option instead of `sort`.
If no facet options are specified, facet counts are computed for all
field values, the facets are sorted by facet count, and the top 10
facets are returned in the results.
For more information, see `Getting and Using Facet Information`_ in the
Amazon CloudSearch Developer Guide .
:type filter_query: string
:param filter_query: Specifies a structured query that filters the
results of a search without affecting how the results are scored
and sorted. You use `filterQuery` in conjunction with the `query`
parameter to filter the documents that match the constraints
specified in the `query` parameter. Specifying a filter controls
only which matching documents are included in the results, it has
no effect on how they are scored and sorted. The `filterQuery`
parameter supports the full structured query syntax.
For more information about using filters, see `Filtering Matching
Documents`_ in the Amazon CloudSearch Developer Guide .
:type highlight: string
:param highlight: Retrieves highlights for matches in the specified
`text` or `text-array` fields. Each specified field must be
highlight enabled in the domain configuration. The fields and
options are specified in JSON using the form `{"FIELD":{"OPTION":VA
LUE,"OPTION:"STRING"},"FIELD":{"OPTION":VALUE,"OPTION":"STRING"}}`.
You can specify the following highlight options:
+ `format`: specifies the format of the data in the text field: `text`
or `html`. When data is returned as HTML, all non-alphanumeric
characters are encoded. The default is `html`.
+ `max_phrases`: specifies the maximum number of occurrences of the
search term(s) you want to highlight. By default, the first
occurrence is highlighted.
+ `pre_tag`: specifies the string to prepend to an occurrence of a
search term. The default for HTML highlights is `<em>`. The
default for text highlights is `*`.
+ `post_tag`: specifies the string to append to an occurrence of a
search term. The default for HTML highlights is `</em>`. The
default for text highlights is `*`.
If no highlight options are specified for a field, the returned field
text is treated as HTML and the first match is highlighted with
emphasis tags: `<em>search-term</em>`.
:type partial: boolean
:param partial: Enables partial results to be returned if one or more
index partitions are unavailable. When your search index is
partitioned across multiple search instances, by default Amazon
CloudSearch only returns results if every partition can be queried.
This means that the failure of a single search instance can result
in 5xx (internal server) errors. When you enable partial results,
Amazon CloudSearch returns whatever results are available and
includes the percentage of documents searched in the search results
(percent-searched). This enables you to more gracefully degrade
your users' search experience. For example, rather than displaying
no results, you could display the partial results and a message
indicating that the results might be incomplete due to a temporary
system outage.
:type query: string
:param query: Specifies the search criteria for the request. How you
specify the search criteria depends on the query parser used for
the request and the parser options specified in the `queryOptions`
parameter. By default, the `simple` query parser is used to process
requests. To use the `structured`, `lucene`, or `dismax` query
parser, you must also specify the `queryParser` parameter.
For more information about specifying search criteria, see `Searching
Your Data`_ in the Amazon CloudSearch Developer Guide .
:type query_options: string
:param query_options:
Configures options for the query parser specified in the `queryParser`
parameter.
The options you can configure vary according to which parser you use:
+ `defaultOperator`: The default operator used to combine individual
terms in the search string. For example: `defaultOperator: 'or'`.
For the `dismax` parser, you specify a percentage that represents
the percentage of terms in the search string (rounded down) that
must match, rather than a default operator. A value of `0%` is the
equivalent to OR, and a value of `100%` is equivalent to AND. The
percentage must be specified as a value in the range 0-100 followed
by the percent (%) symbol. For example, `defaultOperator: 50%`.
Valid values: `and`, `or`, a percentage in the range 0%-100% (
`dismax`). Default: `and` ( `simple`, `structured`, `lucene`) or
`100` ( `dismax`). Valid for: `simple`, `structured`, `lucene`, and
`dismax`.
+ `fields`: An array of the fields to search when no fields are
specified in a search. If no fields are specified in a search and
this option is not specified, all text and text-array fields are
searched. You can specify a weight for each field to control the
relative importance of each field when Amazon CloudSearch
calculates relevance scores. To specify a field weight, append a
caret ( `^`) symbol and the weight to the field name. For example,
to boost the importance of the `title` field over the `description`
field you could specify: `"fields":["title^5","description"]`.
Valid values: The name of any configured field and an optional
numeric value greater than zero. Default: All `text` and `text-
array` fields. Valid for: `simple`, `structured`, `lucene`, and
`dismax`.
+ `operators`: An array of the operators or special characters you want
to disable for the simple query parser. If you disable the `and`,
`or`, or `not` operators, the corresponding operators ( `+`, `|`,
`-`) have no special meaning and are dropped from the search
string. Similarly, disabling `prefix` disables the wildcard
operator ( `*`) and disabling `phrase` disables the ability to
search for phrases by enclosing phrases in double quotes. Disabling
precedence disables the ability to control order of precedence
using parentheses. Disabling `near` disables the ability to use the
~ operator to perform a sloppy phrase search. Disabling the `fuzzy`
operator disables the ability to use the ~ operator to perform a
fuzzy search. `escape` disables the ability to use a backslash (
`\`) to escape special characters within the search string.
Disabling whitespace is an advanced option that prevents the parser
from tokenizing on whitespace, which can be useful for Vietnamese.
(It prevents Vietnamese words from being split incorrectly.) For
example, you could disable all operators other than the phrase
operator to support just simple term and phrase queries:
`"operators":["and","not","or", "prefix"]`. Valid values: `and`,
`escape`, `fuzzy`, `near`, `not`, `or`, `phrase`, `precedence`,
`prefix`, `whitespace`. Default: All operators and special
characters are enabled. Valid for: `simple`.
+ `phraseFields`: An array of the `text` or `text-array` fields you
want to use for phrase searches. When the terms in the search
string appear in close proximity within a field, the field scores
higher. You can specify a weight for each field to boost that
score. The `phraseSlop` option controls how much the matches can
deviate from the search string and still be boosted. To specify a
field weight, append a caret ( `^`) symbol and the weight to the
field name. For example, to boost phrase matches in the `title`
field over the `abstract` field, you could specify:
`"phraseFields":["title^3", "plot"]` Valid values: The name of any
`text` or `text-array` field and an optional numeric value greater
than zero. Default: No fields. If you don't specify any fields with
`phraseFields`, proximity scoring is disabled even if `phraseSlop`
is specified. Valid for: `dismax`.
+ `phraseSlop`: An integer value that specifies how much matches can
deviate from the search phrase and still be boosted according to
the weights specified in the `phraseFields` option; for example,
`phraseSlop: 2`. You must also specify `phraseFields` to enable
proximity scoring. Valid values: positive integers. Default: 0.
Valid for: `dismax`.
+ `explicitPhraseSlop`: An integer value that specifies how much a
match can deviate from the search phrase when the phrase is
enclosed in double quotes in the search string. (Phrases that
exceed this proximity distance are not considered a match.) For
example, to specify a slop of three for dismax phrase queries, you
would specify `"explicitPhraseSlop":3`. Valid values: positive
integers. Default: 0. Valid for: `dismax`.
+ `tieBreaker`: When a term in the search string is found in a
document's field, a score is calculated for that field based on how
common the word is in that field compared to other documents. If
the term occurs in multiple fields within a document, by default
only the highest scoring field contributes to the document's
overall score. You can specify a `tieBreaker` value to enable the
matches in lower-scoring fields to contribute to the document's
score. That way, if two documents have the same max field score for
a particular term, the score for the document that has matches in
more fields will be higher. The formula for calculating the score
with a tieBreaker is `(max field score) + (tieBreaker) * (sum of
the scores for the rest of the matching fields)`. Set `tieBreaker`
to 0 to disregard all but the highest scoring field (pure max):
`"tieBreaker":0`. Set to 1 to sum the scores from all fields (pure
sum): `"tieBreaker":1`. Valid values: 0.0 to 1.0. Default: 0.0.
Valid for: `dismax`.
:type query_parser: string
:param query_parser:
Specifies which query parser to use to process the request. If
`queryParser` is not specified, Amazon CloudSearch uses the
`simple` query parser.
Amazon CloudSearch supports four query parsers:
+ `simple`: perform simple searches of `text` and `text-array` fields.
By default, the `simple` query parser searches all `text` and
`text-array` fields. You can specify which fields to search by with
the `queryOptions` parameter. If you prefix a search term with a
plus sign (+) documents must contain the term to be considered a
match. (This is the default, unless you configure the default
operator with the `queryOptions` parameter.) You can use the `-`
(NOT), `|` (OR), and `*` (wildcard) operators to exclude particular
terms, find results that match any of the specified terms, or
search for a prefix. To search for a phrase rather than individual
terms, enclose the phrase in double quotes. For more information,
see `Searching for Text`_ in the Amazon CloudSearch Developer Guide
.
+ `structured`: perform advanced searches by combining multiple
expressions to define the search criteria. You can also search
within particular fields, search for values and ranges of values,
and use advanced options such as term boosting, `matchall`, and
`near`. For more information, see `Constructing Compound Queries`_
in the Amazon CloudSearch Developer Guide .
+ `lucene`: search using the Apache Lucene query parser syntax. For
more information, see `Apache Lucene Query Parser Syntax`_.
+ `dismax`: search using the simplified subset of the Apache Lucene
query parser syntax defined by the DisMax query parser. For more
information, see `DisMax Query Parser Syntax`_.
:type ret: string
:param ret: Specifies the field and expression values to include in
the response. Multiple fields or expressions are specified as a
comma-separated list. By default, a search response includes all
return enabled fields ( `_all_fields`). To return only the document
IDs for the matching documents, specify `_no_fields`. To retrieve
the relevance score calculated for each document, specify `_score`.
:type size: long
:param size: Specifies the maximum number of search hits to include in
the response.
:type sort: string
:param sort: Specifies the fields or custom expressions to use to sort
the search results. Multiple fields or expressions are specified as
a comma-separated list. You must specify the sort direction ( `asc`
or `desc`) for each field; for example, `year desc,title asc`. To
use a field to sort results, the field must be sort-enabled in the
domain configuration. Array type fields cannot be used for sorting.
If no `sort` parameter is specified, results are sorted by their
default relevance scores in descending order: `_score desc`. You
can also sort by document ID ( `_id asc`) and version ( `_version
desc`).
For more information, see `Sorting Results`_ in the Amazon CloudSearch
Developer Guide .
:type start: long
:param start: Specifies the offset of the first search hit you want to
return. Note that the result set is zero-based; the first result is
at index 0. You can specify either the `start` or `cursor`
parameter in a request, they are mutually exclusive.
For more information, see `Paginating Results`_ in the Amazon
CloudSearch Developer Guide .
"""
uri = '/2013-01-01/search'
params = {}
headers = {}
query_params = {}
if cursor is not None:
query_params['cursor'] = cursor
if expr is not None:
query_params['expr'] = expr
if facet is not None:
query_params['facet'] = facet
if filter_query is not None:
query_params['fq'] = filter_query
if highlight is not None:
query_params['highlight'] = highlight
if partial is not None:
query_params['partial'] = partial
if query is not None:
query_params['q'] = query
if query_options is not None:
query_params['q.options'] = query_options
if query_parser is not None:
query_params['q.parser'] = query_parser
if ret is not None:
query_params['return'] = ret
if size is not None:
query_params['size'] = size
if sort is not None:
query_params['sort'] = sort
if start is not None:
query_params['start'] = start
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params), headers=headers,
params=query_params)
def suggest(self, query, suggester, size=None):
"""
Retrieves autocomplete suggestions for a partial query string.
You can use suggestions enable you to display likely matches
before users finish typing. In Amazon CloudSearch, suggestions
are based on the contents of a particular text field. When you
request suggestions, Amazon CloudSearch finds all of the
documents whose values in the suggester field start with the
specified query string. The beginning of the field must match
the query string to be considered a match.
For more information about configuring suggesters and
retrieving suggestions, see `Getting Suggestions`_ in the
Amazon CloudSearch Developer Guide .
The endpoint for submitting `Suggest` requests is domain-
specific. You submit suggest requests to a domain's search
endpoint. To get the search endpoint for your domain, use the
Amazon CloudSearch configuration service `DescribeDomains`
action. A domain's endpoints are also displayed on the domain
dashboard in the Amazon CloudSearch console.
:type query: string
:param query: Specifies the string for which you want to get
suggestions.
:type suggester: string
:param suggester: Specifies the name of the suggester to use to find
suggested matches.
:type size: long
:param size: Specifies the maximum number of suggestions to return.
"""
uri = '/2013-01-01/suggest'
params = {}
headers = {}
query_params = {}
if query is not None:
query_params['q'] = query
if suggester is not None:
query_params['suggester'] = suggester
if size is not None:
query_params['size'] = size
return self.make_request('GET', uri, expected_status=200,
data=json.dumps(params), headers=headers,
params=query_params)
def upload_documents(self, documents, content_type):
"""
Posts a batch of documents to a search domain for indexing. A
document batch is a collection of add and delete operations
that represent the documents you want to add, update, or
delete from your domain. Batches can be described in either
JSON or XML. Each item that you want Amazon CloudSearch to
return as a search result (such as a product) is represented
as a document. Every document has a unique ID and one or more
fields that contain the data that you want to search and
return in results. Individual documents cannot contain more
than 1 MB of data. The entire batch cannot exceed 5 MB. To get
the best possible upload performance, group add and delete
operations in batches that are close the 5 MB limit.
Submitting a large volume of single-document batches can
overload a domain's document service.
The endpoint for submitting `UploadDocuments` requests is
domain-specific. To get the document endpoint for your domain,
use the Amazon CloudSearch configuration service
`DescribeDomains` action. A domain's endpoints are also
displayed on the domain dashboard in the Amazon CloudSearch
console.
For more information about formatting your data for Amazon
CloudSearch, see `Preparing Your Data`_ in the Amazon
CloudSearch Developer Guide . For more information about
uploading data for indexing, see `Uploading Data`_ in the
Amazon CloudSearch Developer Guide .
:type documents: blob
:param documents: A batch of documents formatted in JSON or HTML.
:type content_type: string
:param content_type:
The format of the batch you are uploading. Amazon CloudSearch supports
two document batch formats:
+ application/json
+ application/xml
"""
uri = '/2013-01-01/documents/batch'
headers = {}
query_params = {}
if content_type is not None:
headers['Content-Type'] = content_type
return self.make_request('POST', uri, expected_status=200,
data=documents, headers=headers,
params=query_params)
def make_request(self, verb, resource, headers=None, data='',
expected_status=None, params=None):
if headers is None:
headers = {}
response = AWSAuthConnection.make_request(
self, verb, resource, headers=headers, data=data, params=params)
body = json.loads(response.read().decode('utf-8'))
if response.status == expected_status:
return body
else:
raise JSONResponseError(response.status, response.reason, body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cloudsearchdomain/layer1.py
| 0.783409 | 0.35869 |
layer1.py
|
|
import base64
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.kinesis import exceptions
from boto.compat import json
from boto.compat import six
class KinesisConnection(AWSQueryConnection):
"""
Amazon Kinesis Service API Reference
Amazon Kinesis is a managed service that scales elastically for
real time processing of streaming big data.
"""
APIVersion = "2013-12-02"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "kinesis.us-east-1.amazonaws.com"
ServiceName = "Kinesis"
TargetPrefix = "Kinesis_20131202"
ResponseError = JSONResponseError
_faults = {
"ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException,
"LimitExceededException": exceptions.LimitExceededException,
"ExpiredIteratorException": exceptions.ExpiredIteratorException,
"ResourceInUseException": exceptions.ResourceInUseException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InvalidArgumentException": exceptions.InvalidArgumentException,
"SubscriptionRequiredException": exceptions.SubscriptionRequiredException
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(KinesisConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_tags_to_stream(self, stream_name, tags):
"""
Adds or updates tags for the specified Amazon Kinesis stream.
Each stream can have up to 10 tags.
If tags have already been assigned to the stream,
`AddTagsToStream` overwrites any existing tags that correspond
to the specified tag keys.
:type stream_name: string
:param stream_name: The name of the stream.
:type tags: map
:param tags: The set of key-value pairs to use to create the tags.
"""
params = {'StreamName': stream_name, 'Tags': tags, }
return self.make_request(action='AddTagsToStream',
body=json.dumps(params))
def create_stream(self, stream_name, shard_count):
"""
Creates a Amazon Kinesis stream. A stream captures and
transports data records that are continuously emitted from
different data sources or producers . Scale-out within an
Amazon Kinesis stream is explicitly supported by means of
shards, which are uniquely identified groups of data records
in an Amazon Kinesis stream.
You specify and control the number of shards that a stream is
composed of. Each open shard can support up to 5 read
transactions per second, up to a maximum total of 2 MB of data
read per second. Each shard can support up to 1000 records
written per second, up to a maximum total of 1 MB data written
per second. You can add shards to a stream if the amount of
data input increases and you can remove shards if the amount
of data input decreases.
The stream name identifies the stream. The name is scoped to
the AWS account used by the application. It is also scoped by
region. That is, two streams in two different accounts can
have the same name, and two streams in the same account, but
in two different regions, can have the same name.
`CreateStream` is an asynchronous operation. Upon receiving a
`CreateStream` request, Amazon Kinesis immediately returns and
sets the stream status to `CREATING`. After the stream is
created, Amazon Kinesis sets the stream status to `ACTIVE`.
You should perform read and write operations only on an
`ACTIVE` stream.
You receive a `LimitExceededException` when making a
`CreateStream` request if you try to do one of the following:
+ Have more than five streams in the `CREATING` state at any
point in time.
+ Create more shards than are authorized for your account.
The default limit for an AWS account is 10 shards per stream.
If you need to create a stream with more than 10 shards,
`contact AWS Support`_ to increase the limit on your account.
You can use `DescribeStream` to check the stream status, which
is returned in `StreamStatus`.
`CreateStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: A name to identify the stream. The stream name is
scoped to the AWS account used by the application that creates the
stream. It is also scoped by region. That is, two streams in two
different AWS accounts can have the same name, and two streams in
the same AWS account, but in two different regions, can have the
same name.
:type shard_count: integer
:param shard_count: The number of shards that the stream will use. The
throughput of the stream is a function of the number of shards;
more shards are required for greater provisioned throughput.
**Note:** The default limit for an AWS account is 10 shards per stream.
If you need to create a stream with more than 10 shards, `contact
AWS Support`_ to increase the limit on your account.
"""
params = {
'StreamName': stream_name,
'ShardCount': shard_count,
}
return self.make_request(action='CreateStream',
body=json.dumps(params))
def delete_stream(self, stream_name):
"""
Deletes a stream and all its shards and data. You must shut
down any applications that are operating on the stream before
you delete the stream. If an application attempts to operate
on a deleted stream, it will receive the exception
`ResourceNotFoundException`.
If the stream is in the `ACTIVE` state, you can delete it.
After a `DeleteStream` request, the specified stream is in the
`DELETING` state until Amazon Kinesis completes the deletion.
**Note:** Amazon Kinesis might continue to accept data read
and write operations, such as PutRecord, PutRecords, and
GetRecords, on a stream in the `DELETING` state until the
stream deletion is complete.
When you delete a stream, any shards in that stream are also
deleted, and any tags are dissociated from the stream.
You can use the DescribeStream operation to check the state of
the stream, which is returned in `StreamStatus`.
`DeleteStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to delete.
"""
params = {'StreamName': stream_name, }
return self.make_request(action='DeleteStream',
body=json.dumps(params))
def describe_stream(self, stream_name, limit=None,
exclusive_start_shard_id=None):
"""
Describes the specified stream.
The information about the stream includes its current status,
its Amazon Resource Name (ARN), and an array of shard objects.
For each shard object, there is information about the hash key
and sequence number ranges that the shard spans, and the IDs
of any earlier shards that played in a role in creating the
shard. A sequence number is the identifier associated with
every record ingested in the Amazon Kinesis stream. The
sequence number is assigned when a record is put into the
stream.
You can limit the number of returned shards using the `Limit`
parameter. The number of shards in a stream may be too large
to return from a single call to `DescribeStream`. You can
detect this by using the `HasMoreShards` flag in the returned
output. `HasMoreShards` is set to `True` when there is more
data available.
`DescribeStream` is a paginated operation. If there are more
shards available, you can request them using the shard ID of
the last shard returned. Specify this ID in the
`ExclusiveStartShardId` parameter in a subsequent request to
`DescribeStream`.
`DescribeStream` has a limit of 10 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to describe.
:type limit: integer
:param limit: The maximum number of shards to return.
:type exclusive_start_shard_id: string
:param exclusive_start_shard_id: The shard ID of the shard to start
with.
"""
params = {'StreamName': stream_name, }
if limit is not None:
params['Limit'] = limit
if exclusive_start_shard_id is not None:
params['ExclusiveStartShardId'] = exclusive_start_shard_id
return self.make_request(action='DescribeStream',
body=json.dumps(params))
def get_records(self, shard_iterator, limit=None, b64_decode=True):
"""
Gets data records from a shard.
Specify a shard iterator using the `ShardIterator` parameter.
The shard iterator specifies the position in the shard from
which you want to start reading data records sequentially. If
there are no records available in the portion of the shard
that the iterator points to, `GetRecords` returns an empty
list. Note that it might take multiple calls to get to a
portion of the shard that contains records.
You can scale by provisioning multiple shards. Your
application should have one thread per shard, each reading
continuously from its stream. To read from a stream
continually, call `GetRecords` in a loop. Use GetShardIterator
to get the shard iterator to specify in the first `GetRecords`
call. `GetRecords` returns a new shard iterator in
`NextShardIterator`. Specify the shard iterator returned in
`NextShardIterator` in subsequent calls to `GetRecords`. Note
that if the shard has been closed, the shard iterator can't
return more data and `GetRecords` returns `null` in
`NextShardIterator`. You can terminate the loop when the shard
is closed, or when the shard iterator reaches the record with
the sequence number or other attribute that marks it as the
last record to process.
Each data record can be up to 50 KB in size, and each shard
can read up to 2 MB per second. You can ensure that your calls
don't exceed the maximum supported size or throughput by using
the `Limit` parameter to specify the maximum number of records
that `GetRecords` can return. Consider your average record
size when determining this limit. For example, if your average
record size is 40 KB, you can limit the data returned to about
1 MB per call by specifying 25 as the limit.
The size of the data returned by `GetRecords` will vary
depending on the utilization of the shard. The maximum size of
data that `GetRecords` can return is 10 MB. If a call returns
10 MB of data, subsequent calls made within the next 5 seconds
throw `ProvisionedThroughputExceededException`. If there is
insufficient provisioned throughput on the shard, subsequent
calls made within the next 1 second throw
`ProvisionedThroughputExceededException`. Note that
`GetRecords` won't return any data when it throws an
exception. For this reason, we recommend that you wait one
second between calls to `GetRecords`; however, it's possible
that the application will get exceptions for longer than 1
second.
To detect whether the application is falling behind in
processing, add a timestamp to your records and note how long
it takes to process them. You can also monitor how much data
is in a stream using the CloudWatch metrics for write
operations ( `PutRecord` and `PutRecords`). For more
information, see `Monitoring Amazon Kinesis with Amazon
CloudWatch`_ in the Amazon Kinesis Developer Guide .
:type shard_iterator: string
:param shard_iterator: The position in the shard from which you want to
start sequentially reading data records. A shard iterator specifies
this position using the sequence number of a data record in the
shard.
:type limit: integer
:param limit: The maximum number of records to return. Specify a value
of up to 10,000. If you specify a value that is greater than
10,000, `GetRecords` throws `InvalidArgumentException`.
:type b64_decode: boolean
:param b64_decode: Decode the Base64-encoded ``Data`` field of records.
"""
params = {'ShardIterator': shard_iterator, }
if limit is not None:
params['Limit'] = limit
response = self.make_request(action='GetRecords',
body=json.dumps(params))
# Base64 decode the data
if b64_decode:
for record in response.get('Records', []):
record['Data'] = base64.b64decode(
record['Data'].encode('utf-8')).decode('utf-8')
return response
def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type,
starting_sequence_number=None):
"""
Gets a shard iterator. A shard iterator expires five minutes
after it is returned to the requester.
A shard iterator specifies the position in the shard from
which to start reading data records sequentially. A shard
iterator specifies this position using the sequence number of
a data record in a shard. A sequence number is the identifier
associated with every record ingested in the Amazon Kinesis
stream. The sequence number is assigned when a record is put
into the stream.
You must specify the shard iterator type. For example, you can
set the `ShardIteratorType` parameter to read exactly from the
position denoted by a specific sequence number by using the
`AT_SEQUENCE_NUMBER` shard iterator type, or right after the
sequence number by using the `AFTER_SEQUENCE_NUMBER` shard
iterator type, using sequence numbers returned by earlier
calls to PutRecord, PutRecords, GetRecords, or DescribeStream.
You can specify the shard iterator type `TRIM_HORIZON` in the
request to cause `ShardIterator` to point to the last
untrimmed record in the shard in the system, which is the
oldest data record in the shard. Or you can point to just
after the most recent record in the shard, by using the shard
iterator type `LATEST`, so that you always read the most
recent data in the shard.
When you repeatedly read from an Amazon Kinesis stream use a
GetShardIterator request to get the first shard iterator to to
use in your first `GetRecords` request and then use the shard
iterator returned by the `GetRecords` request in
`NextShardIterator` for subsequent reads. A new shard iterator
is returned by every `GetRecords` request in
`NextShardIterator`, which you use in the `ShardIterator`
parameter of the next `GetRecords` request.
If a `GetShardIterator` request is made too often, you receive
a `ProvisionedThroughputExceededException`. For more
information about throughput limits, see GetRecords.
If the shard is closed, the iterator can't return more data,
and `GetShardIterator` returns `null` for its `ShardIterator`.
A shard can be closed using SplitShard or MergeShards.
`GetShardIterator` has a limit of 5 transactions per second
per account per open shard.
:type stream_name: string
:param stream_name: The name of the stream.
:type shard_id: string
:param shard_id: The shard ID of the shard to get the iterator for.
:type shard_iterator_type: string
:param shard_iterator_type:
Determines how the shard iterator is used to start reading data records
from the shard.
The following are the valid shard iterator types:
+ AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted
by a specific sequence number.
+ AFTER_SEQUENCE_NUMBER - Start reading right after the position
denoted by a specific sequence number.
+ TRIM_HORIZON - Start reading at the last untrimmed record in the
shard in the system, which is the oldest data record in the shard.
+ LATEST - Start reading just after the most recent record in the
shard, so that you always read the most recent data in the shard.
:type starting_sequence_number: string
:param starting_sequence_number: The sequence number of the data record
in the shard from which to start reading from.
:returns: A dictionary containing:
1) a `ShardIterator` with the value being the shard-iterator object
"""
params = {
'StreamName': stream_name,
'ShardId': shard_id,
'ShardIteratorType': shard_iterator_type,
}
if starting_sequence_number is not None:
params['StartingSequenceNumber'] = starting_sequence_number
return self.make_request(action='GetShardIterator',
body=json.dumps(params))
def list_streams(self, limit=None, exclusive_start_stream_name=None):
"""
Lists your streams.
The number of streams may be too large to return from a single
call to `ListStreams`. You can limit the number of returned
streams using the `Limit` parameter. If you do not specify a
value for the `Limit` parameter, Amazon Kinesis uses the
default limit, which is currently 10.
You can detect if there are more streams available to list by
using the `HasMoreStreams` flag from the returned output. If
there are more streams available, you can request more streams
by using the name of the last stream returned by the
`ListStreams` request in the `ExclusiveStartStreamName`
parameter in a subsequent request to `ListStreams`. The group
of stream names returned by the subsequent request is then
added to the list. You can continue this process until all the
stream names have been collected in the list.
`ListStreams` has a limit of 5 transactions per second per
account.
:type limit: integer
:param limit: The maximum number of streams to list.
:type exclusive_start_stream_name: string
:param exclusive_start_stream_name: The name of the stream to start the
list with.
"""
params = {}
if limit is not None:
params['Limit'] = limit
if exclusive_start_stream_name is not None:
params['ExclusiveStartStreamName'] = exclusive_start_stream_name
return self.make_request(action='ListStreams',
body=json.dumps(params))
def list_tags_for_stream(self, stream_name, exclusive_start_tag_key=None,
limit=None):
"""
Lists the tags for the specified Amazon Kinesis stream.
:type stream_name: string
:param stream_name: The name of the stream.
:type exclusive_start_tag_key: string
:param exclusive_start_tag_key: The key to use as the starting point
for the list of tags. If this parameter is set, `ListTagsForStream`
gets all tags that occur after `ExclusiveStartTagKey`.
:type limit: integer
:param limit: The number of tags to return. If this number is less than
the total number of tags associated with the stream, `HasMoreTags`
is set to `True`. To list additional tags, set
`ExclusiveStartTagKey` to the last key in the response.
"""
params = {'StreamName': stream_name, }
if exclusive_start_tag_key is not None:
params['ExclusiveStartTagKey'] = exclusive_start_tag_key
if limit is not None:
params['Limit'] = limit
return self.make_request(action='ListTagsForStream',
body=json.dumps(params))
def merge_shards(self, stream_name, shard_to_merge,
adjacent_shard_to_merge):
"""
Merges two adjacent shards in a stream and combines them into
a single shard to reduce the stream's capacity to ingest and
transport data. Two shards are considered adjacent if the
union of the hash key ranges for the two shards form a
contiguous set with no gaps. For example, if you have two
shards, one with a hash key range of 276...381 and the other
with a hash key range of 382...454, then you could merge these
two shards into a single shard that would have a hash key
range of 276...454. After the merge, the single child shard
receives data for all hash key values covered by the two
parent shards.
`MergeShards` is called when there is a need to reduce the
overall capacity of a stream because of excess capacity that
is not being used. You must specify the shard to be merged and
the adjacent shard for a stream. For more information about
merging shards, see `Merge Two Shards`_ in the Amazon Kinesis
Developer Guide .
If the stream is in the `ACTIVE` state, you can call
`MergeShards`. If a stream is in the `CREATING`, `UPDATING`,
or `DELETING` state, `MergeShards` returns a
`ResourceInUseException`. If the specified stream does not
exist, `MergeShards` returns a `ResourceNotFoundException`.
You can use DescribeStream to check the state of the stream,
which is returned in `StreamStatus`.
`MergeShards` is an asynchronous operation. Upon receiving a
`MergeShards` request, Amazon Kinesis immediately returns a
response and sets the `StreamStatus` to `UPDATING`. After the
operation is completed, Amazon Kinesis sets the `StreamStatus`
to `ACTIVE`. Read and write operations continue to work while
the stream is in the `UPDATING` state.
You use DescribeStream to determine the shard IDs that are
specified in the `MergeShards` request.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, `MergeShards` or SplitShard, you
will receive a `LimitExceededException`.
`MergeShards` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the merge.
:type shard_to_merge: string
:param shard_to_merge: The shard ID of the shard to combine with the
adjacent shard for the merge.
:type adjacent_shard_to_merge: string
:param adjacent_shard_to_merge: The shard ID of the adjacent shard for
the merge.
"""
params = {
'StreamName': stream_name,
'ShardToMerge': shard_to_merge,
'AdjacentShardToMerge': adjacent_shard_to_merge,
}
return self.make_request(action='MergeShards',
body=json.dumps(params))
def put_record(self, stream_name, data, partition_key,
explicit_hash_key=None,
sequence_number_for_ordering=None,
exclusive_minimum_sequence_number=None,
b64_encode=True):
"""
This operation puts a data record into an Amazon Kinesis
stream from a producer. This operation must be called to send
data from the producer into the Amazon Kinesis stream for
real-time ingestion and subsequent processing. The `PutRecord`
operation requires the name of the stream that captures,
stores, and transports the data; a partition key; and the data
blob itself. The data blob could be a segment from a log file,
geographic/location data, website clickstream data, or any
other data type.
The partition key is used to distribute data across shards.
Amazon Kinesis segregates the data records that belong to a
data stream into multiple shards, using the partition key
associated with each data record to determine which shard a
given data record belongs to.
Partition keys are Unicode strings, with a maximum length
limit of 256 bytes. An MD5 hash function is used to map
partition keys to 128-bit integer values and to map associated
data records to shards using the hash key ranges of the
shards. You can override hashing the partition key to
determine the shard by explicitly specifying a hash value
using the `ExplicitHashKey` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
`PutRecord` returns the shard ID of where the data record was
placed and the sequence number that was assigned to the data
record.
Sequence numbers generally increase over time. To guarantee
strictly increasing ordering, use the
`SequenceNumberForOrdering` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
If a `PutRecord` request cannot be processed because of
insufficient provisioned throughput on the shard involved in
the request, `PutRecord` throws
`ProvisionedThroughputExceededException`.
Data records are accessible for only 24 hours from the time
that they are added to an Amazon Kinesis stream.
:type stream_name: string
:param stream_name: The name of the stream to put the data record into.
:type data: blob
:param data: The data blob to put into the record, which is
Base64-encoded when the blob is serialized.
The maximum size of the data blob (the payload after
Base64-decoding) is 50 kilobytes (KB)
Set `b64_encode` to disable automatic Base64 encoding.
:type partition_key: string
:param partition_key: Determines which shard in the stream the data
record is assigned to. Partition keys are Unicode strings with a
maximum length limit of 256 bytes. Amazon Kinesis uses the
partition key as input to a hash function that maps the partition
key and associated data to a specific shard. Specifically, an MD5
hash function is used to map partition keys to 128-bit integer
values and to map associated data records to shards. As a result of
this hashing mechanism, all data records with the same partition
key will map to the same shard within the stream.
:type explicit_hash_key: string
:param explicit_hash_key: The hash value used to explicitly determine
the shard the data record is assigned to by overriding the
partition key hash.
:type sequence_number_for_ordering: string
:param sequence_number_for_ordering: Guarantees strictly increasing
sequence numbers, for puts from the same client and to the same
partition key. Usage: set the `SequenceNumberForOrdering` of record
n to the sequence number of record n-1 (as returned in the
PutRecordResult when putting record n-1 ). If this parameter is not
set, records will be coarsely ordered based on arrival time.
:type b64_encode: boolean
:param b64_encode: Whether to Base64 encode `data`. Can be set to
``False`` if `data` is already encoded to prevent double encoding.
"""
params = {
'StreamName': stream_name,
'Data': data,
'PartitionKey': partition_key,
}
if explicit_hash_key is not None:
params['ExplicitHashKey'] = explicit_hash_key
if sequence_number_for_ordering is not None:
params['SequenceNumberForOrdering'] = sequence_number_for_ordering
if b64_encode:
if not isinstance(params['Data'], six.binary_type):
params['Data'] = params['Data'].encode('utf-8')
params['Data'] = base64.b64encode(params['Data']).decode('utf-8')
return self.make_request(action='PutRecord',
body=json.dumps(params))
def put_records(self, records, stream_name, b64_encode=True):
"""
Puts (writes) multiple data records from a producer into an
Amazon Kinesis stream in a single call (also referred to as a
`PutRecords` request). Use this operation to send data from a
data producer into the Amazon Kinesis stream for real-time
ingestion and processing. Each shard can support up to 1000
records written per second, up to a maximum total of 1 MB data
written per second.
You must specify the name of the stream that captures, stores,
and transports the data; and an array of request `Records`,
with each record in the array requiring a partition key and
data blob.
The data blob can be any type of data; for example, a segment
from a log file, geographic/location data, website clickstream
data, and so on.
The partition key is used by Amazon Kinesis as input to a hash
function that maps the partition key and associated data to a
specific shard. An MD5 hash function is used to map partition
keys to 128-bit integer values and to map associated data
records to shards. As a result of this hashing mechanism, all
data records with the same partition key map to the same shard
within the stream. For more information, see `Partition Key`_
in the Amazon Kinesis Developer Guide .
Each record in the `Records` array may include an optional
parameter, `ExplicitHashKey`, which overrides the partition
key to shard mapping. This parameter allows a data producer to
determine explicitly the shard where the record is stored. For
more information, see `Adding Multiple Records with
PutRecords`_ in the Amazon Kinesis Developer Guide .
The `PutRecords` response includes an array of response
`Records`. Each record in the response array directly
correlates with a record in the request array using natural
ordering, from the top to the bottom of the request and
response. The response `Records` array always includes the
same number of records as the request array.
The response `Records` array includes both successfully and
unsuccessfully processed records. Amazon Kinesis attempts to
process all records in each `PutRecords` request. A single
record failure does not stop the processing of subsequent
records.
A successfully-processed record includes `ShardId` and
`SequenceNumber` values. The `ShardId` parameter identifies
the shard in the stream where the record is stored. The
`SequenceNumber` parameter is an identifier assigned to the
put record, unique to all records in the stream.
An unsuccessfully-processed record includes `ErrorCode` and
`ErrorMessage` values. `ErrorCode` reflects the type of error
and can be one of the following values:
`ProvisionedThroughputExceededException` or `InternalFailure`.
`ErrorMessage` provides more detailed information about the
`ProvisionedThroughputExceededException` exception including
the account ID, stream name, and shard ID of the record that
was throttled.
Data records are accessible for only 24 hours from the time
that they are added to an Amazon Kinesis stream.
:type records: list
:param records: The records associated with the request.
:type stream_name: string
:param stream_name: The stream name associated with the request.
:type b64_encode: boolean
:param b64_encode: Whether to Base64 encode `data`. Can be set to
``False`` if `data` is already encoded to prevent double encoding.
"""
params = {'Records': records, 'StreamName': stream_name, }
if b64_encode:
for i in range(len(params['Records'])):
data = params['Records'][i]['Data']
if not isinstance(data, six.binary_type):
data = data.encode('utf-8')
params['Records'][i]['Data'] = base64.b64encode(
data).decode('utf-8')
return self.make_request(action='PutRecords',
body=json.dumps(params))
def remove_tags_from_stream(self, stream_name, tag_keys):
"""
Deletes tags from the specified Amazon Kinesis stream.
If you specify a tag that does not exist, it is ignored.
:type stream_name: string
:param stream_name: The name of the stream.
:type tag_keys: list
:param tag_keys: A list of tag keys. Each corresponding tag is removed
from the stream.
"""
params = {'StreamName': stream_name, 'TagKeys': tag_keys, }
return self.make_request(action='RemoveTagsFromStream',
body=json.dumps(params))
def split_shard(self, stream_name, shard_to_split, new_starting_hash_key):
"""
Splits a shard into two new shards in the stream, to increase
the stream's capacity to ingest and transport data.
`SplitShard` is called when there is a need to increase the
overall capacity of stream because of an expected increase in
the volume of data records being ingested.
You can also use `SplitShard` when a shard appears to be
approaching its maximum utilization, for example, when the set
of producers sending data into the specific shard are suddenly
sending more than previously anticipated. You can also call
`SplitShard` to increase stream capacity, so that more Amazon
Kinesis applications can simultaneously read data from the
stream for real-time processing.
You must specify the shard to be split and the new hash key,
which is the position in the shard where the shard gets split
in two. In many cases, the new hash key might simply be the
average of the beginning and ending hash key, but it can be
any hash key value in the range being mapped into the shard.
For more information about splitting shards, see `Split a
Shard`_ in the Amazon Kinesis Developer Guide .
You can use DescribeStream to determine the shard ID and hash
key values for the `ShardToSplit` and `NewStartingHashKey`
parameters that are specified in the `SplitShard` request.
`SplitShard` is an asynchronous operation. Upon receiving a
`SplitShard` request, Amazon Kinesis immediately returns a
response and sets the stream status to `UPDATING`. After the
operation is completed, Amazon Kinesis sets the stream status
to `ACTIVE`. Read and write operations continue to work while
the stream is in the `UPDATING` state.
You can use `DescribeStream` to check the status of the
stream, which is returned in `StreamStatus`. If the stream is
in the `ACTIVE` state, you can call `SplitShard`. If a stream
is in `CREATING` or `UPDATING` or `DELETING` states,
`DescribeStream` returns a `ResourceInUseException`.
If the specified stream does not exist, `DescribeStream`
returns a `ResourceNotFoundException`. If you try to create
more shards than are authorized for your account, you receive
a `LimitExceededException`.
The default limit for an AWS account is 10 shards per stream.
If you need to create a stream with more than 10 shards,
`contact AWS Support`_ to increase the limit on your account.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, MergeShards or SplitShard, you
receive a `LimitExceededException`.
`SplitShard` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the shard split.
:type shard_to_split: string
:param shard_to_split: The shard ID of the shard to split.
:type new_starting_hash_key: string
:param new_starting_hash_key: A hash key value for the starting hash
key of one of the child shards created by the split. The hash key
range for a given shard constitutes a set of ordered contiguous
positive integers. The value for `NewStartingHashKey` must be in
the range of hash keys being mapped into the shard. The
`NewStartingHashKey` hash key value and all higher hash key values
in hash key range are distributed to one of the child shards. All
the lower hash key values in the range are distributed to the other
child shard.
"""
params = {
'StreamName': stream_name,
'ShardToSplit': shard_to_split,
'NewStartingHashKey': new_starting_hash_key,
}
return self.make_request(action='SplitShard',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response.getheaders())
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/kinesis/layer1.py
| 0.763748 | 0.286609 |
layer1.py
|
|
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.support import exceptions
class SupportConnection(AWSQueryConnection):
"""
AWS Support
The AWS Support API reference is intended for programmers who need
detailed information about the AWS Support operations and data
types. This service enables you to manage your AWS Support cases
programmatically. It uses HTTP methods that return results in JSON
format.
The AWS Support service also exposes a set of `Trusted Advisor`_
features. You can retrieve a list of checks and their
descriptions, get check results, specify checks to refresh, and
get the refresh status of checks.
The following list describes the AWS Support case management
operations:
+ **Service names, issue categories, and available severity
levels. **The DescribeServices and DescribeSeverityLevels
operations return AWS service names, service codes, service
categories, and problem severity levels. You use these values when
you call the CreateCase operation.
+ **Case creation, case details, and case resolution.** The
CreateCase, DescribeCases, DescribeAttachment, and ResolveCase
operations create AWS Support cases, retrieve information about
cases, and resolve cases.
+ **Case communication.** The DescribeCommunications,
AddCommunicationToCase, and AddAttachmentsToSet operations
retrieve and add communications and attachments to AWS Support
cases.
The following list describes the operations available from the AWS
Support service for Trusted Advisor:
+ DescribeTrustedAdvisorChecks returns the list of checks that run
against your AWS resources.
+ Using the `CheckId` for a specific check returned by
DescribeTrustedAdvisorChecks, you can call
DescribeTrustedAdvisorCheckResult to obtain the results for the
check you specified.
+ DescribeTrustedAdvisorCheckSummaries returns summarized results
for one or more Trusted Advisor checks.
+ RefreshTrustedAdvisorCheck requests that Trusted Advisor rerun a
specified check.
+ DescribeTrustedAdvisorCheckRefreshStatuses reports the refresh
status of one or more checks.
For authentication of requests, AWS Support uses `Signature
Version 4 Signing Process`_.
See `About the AWS Support API`_ in the AWS Support User Guide for
information about how to use this service to create and manage
your support cases, and how to call Trusted Advisor for results of
checks on your resources.
"""
APIVersion = "2013-04-15"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "support.us-east-1.amazonaws.com"
ServiceName = "Support"
TargetPrefix = "AWSSupport_20130415"
ResponseError = JSONResponseError
_faults = {
"CaseCreationLimitExceeded": exceptions.CaseCreationLimitExceeded,
"AttachmentLimitExceeded": exceptions.AttachmentLimitExceeded,
"CaseIdNotFound": exceptions.CaseIdNotFound,
"DescribeAttachmentLimitExceeded": exceptions.DescribeAttachmentLimitExceeded,
"AttachmentSetIdNotFound": exceptions.AttachmentSetIdNotFound,
"InternalServerError": exceptions.InternalServerError,
"AttachmentSetExpired": exceptions.AttachmentSetExpired,
"AttachmentIdNotFound": exceptions.AttachmentIdNotFound,
"AttachmentSetSizeLimitExceeded": exceptions.AttachmentSetSizeLimitExceeded,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(SupportConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_attachments_to_set(self, attachments, attachment_set_id=None):
"""
Adds one or more attachments to an attachment set. If an
`AttachmentSetId` is not specified, a new attachment set is
created, and the ID of the set is returned in the response. If
an `AttachmentSetId` is specified, the attachments are added
to the specified set, if it exists.
An attachment set is a temporary container for attachments
that are to be added to a case or case communication. The set
is available for one hour after it is created; the
`ExpiryTime` returned in the response indicates when the set
expires. The maximum number of attachments in a set is 3, and
the maximum size of any attachment in the set is 5 MB.
:type attachment_set_id: string
:param attachment_set_id: The ID of the attachment set. If an
`AttachmentSetId` is not specified, a new attachment set is
created, and the ID of the set is returned in the response. If an
`AttachmentSetId` is specified, the attachments are added to the
specified set, if it exists.
:type attachments: list
:param attachments: One or more attachments to add to the set. The
limit is 3 attachments per set, and the size limit is 5 MB per
attachment.
"""
params = {'attachments': attachments, }
if attachment_set_id is not None:
params['attachmentSetId'] = attachment_set_id
return self.make_request(action='AddAttachmentsToSet',
body=json.dumps(params))
def add_communication_to_case(self, communication_body, case_id=None,
cc_email_addresses=None,
attachment_set_id=None):
"""
Adds additional customer communication to an AWS Support case.
You use the `CaseId` value to identify the case to add
communication to. You can list a set of email addresses to
copy on the communication using the `CcEmailAddresses` value.
The `CommunicationBody` value contains the text of the
communication.
The response indicates the success or failure of the request.
This operation implements a subset of the behavior on the AWS
Support `Your Support Cases`_ web form.
:type case_id: string
:param case_id: The AWS Support case ID requested or returned in the
call. The case ID is an alphanumeric string formatted as shown in
this example: case- 12345678910-2013-c4c1d2bf33c5cf47
:type communication_body: string
:param communication_body: The body of an email communication to add to
the support case.
:type cc_email_addresses: list
:param cc_email_addresses: The email addresses in the CC line of an
email to be added to the support case.
:type attachment_set_id: string
:param attachment_set_id: The ID of a set of one or more attachments
for the communication to add to the case. Create the set by calling
AddAttachmentsToSet
"""
params = {'communicationBody': communication_body, }
if case_id is not None:
params['caseId'] = case_id
if cc_email_addresses is not None:
params['ccEmailAddresses'] = cc_email_addresses
if attachment_set_id is not None:
params['attachmentSetId'] = attachment_set_id
return self.make_request(action='AddCommunicationToCase',
body=json.dumps(params))
def create_case(self, subject, communication_body, service_code=None,
severity_code=None, category_code=None,
cc_email_addresses=None, language=None, issue_type=None,
attachment_set_id=None):
"""
Creates a new case in the AWS Support Center. This operation
is modeled on the behavior of the AWS Support Center `Open a
new case`_ page. Its parameters require you to specify the
following information:
#. **IssueType.** The type of issue for the case. You can
specify either "customer-service" or "technical." If you do
not indicate a value, the default is "technical."
#. **ServiceCode.** The code for an AWS service. You obtain
the `ServiceCode` by calling DescribeServices.
#. **CategoryCode.** The category for the service defined for
the `ServiceCode` value. You also obtain the category code for
a service by calling DescribeServices. Each AWS service
defines its own set of category codes.
#. **SeverityCode.** A value that indicates the urgency of the
case, which in turn determines the response time according to
your service level agreement with AWS Support. You obtain the
SeverityCode by calling DescribeSeverityLevels.
#. **Subject.** The **Subject** field on the AWS Support
Center `Open a new case`_ page.
#. **CommunicationBody.** The **Description** field on the AWS
Support Center `Open a new case`_ page.
#. **AttachmentSetId.** The ID of a set of attachments that
has been created by using AddAttachmentsToSet.
#. **Language.** The human language in which AWS Support
handles the case. English and Japanese are currently
supported.
#. **CcEmailAddresses.** The AWS Support Center **CC** field
on the `Open a new case`_ page. You can list email addresses
to be copied on any correspondence about the case. The account
that opens the case is already identified by passing the AWS
Credentials in the HTTP POST method or in a method or function
call from one of the programming languages supported by an
`AWS SDK`_.
A successful CreateCase request returns an AWS Support case
number. Case numbers are used by the DescribeCases operation
to retrieve existing AWS Support cases.
:type subject: string
:param subject: The title of the AWS Support case.
:type service_code: string
:param service_code: The code for the AWS service returned by the call
to DescribeServices.
:type severity_code: string
:param severity_code: The code for the severity level returned by the
call to DescribeSeverityLevels.
:type category_code: string
:param category_code: The category of problem for the AWS Support case.
:type communication_body: string
:param communication_body: The communication body text when you create
an AWS Support case by calling CreateCase.
:type cc_email_addresses: list
:param cc_email_addresses: A list of email addresses that AWS Support
copies on case correspondence.
:type language: string
:param language: The ISO 639-1 code for the language in which AWS
provides support. AWS Support currently supports English ("en") and
Japanese ("ja"). Language parameters must be passed explicitly for
operations that take them.
:type issue_type: string
:param issue_type: The type of issue for the case. You can specify
either "customer-service" or "technical." If you do not indicate a
value, the default is "technical."
:type attachment_set_id: string
:param attachment_set_id: The ID of a set of one or more attachments
for the case. Create the set by using AddAttachmentsToSet.
"""
params = {
'subject': subject,
'communicationBody': communication_body,
}
if service_code is not None:
params['serviceCode'] = service_code
if severity_code is not None:
params['severityCode'] = severity_code
if category_code is not None:
params['categoryCode'] = category_code
if cc_email_addresses is not None:
params['ccEmailAddresses'] = cc_email_addresses
if language is not None:
params['language'] = language
if issue_type is not None:
params['issueType'] = issue_type
if attachment_set_id is not None:
params['attachmentSetId'] = attachment_set_id
return self.make_request(action='CreateCase',
body=json.dumps(params))
def describe_attachment(self, attachment_id):
"""
Returns the attachment that has the specified ID. Attachment
IDs are generated by the case management system when you add
an attachment to a case or case communication. Attachment IDs
are returned in the AttachmentDetails objects that are
returned by the DescribeCommunications operation.
:type attachment_id: string
:param attachment_id: The ID of the attachment to return. Attachment
IDs are returned by the DescribeCommunications operation.
"""
params = {'attachmentId': attachment_id, }
return self.make_request(action='DescribeAttachment',
body=json.dumps(params))
def describe_cases(self, case_id_list=None, display_id=None,
after_time=None, before_time=None,
include_resolved_cases=None, next_token=None,
max_results=None, language=None,
include_communications=None):
"""
Returns a list of cases that you specify by passing one or
more case IDs. In addition, you can filter the cases by date
by setting values for the `AfterTime` and `BeforeTime` request
parameters.
Case data is available for 12 months after creation. If a case
was created more than 12 months ago, a request for data might
cause an error.
The response returns the following in JSON format:
#. One or more CaseDetails data types.
#. One or more `NextToken` values, which specify where to
paginate the returned records represented by the `CaseDetails`
objects.
:type case_id_list: list
:param case_id_list: A list of ID numbers of the support cases you want
returned. The maximum number of cases is 100.
:type display_id: string
:param display_id: The ID displayed for a case in the AWS Support
Center user interface.
:type after_time: string
:param after_time: The start date for a filtered date search on support
case communications. Case communications are available for 12
months after creation.
:type before_time: string
:param before_time: The end date for a filtered date search on support
case communications. Case communications are available for 12
months after creation.
:type include_resolved_cases: boolean
:param include_resolved_cases: Specifies whether resolved support cases
should be included in the DescribeCases results. The default is
false .
:type next_token: string
:param next_token: A resumption point for pagination.
:type max_results: integer
:param max_results: The maximum number of results to return before
paginating.
:type language: string
:param language: The ISO 639-1 code for the language in which AWS
provides support. AWS Support currently supports English ("en") and
Japanese ("ja"). Language parameters must be passed explicitly for
operations that take them.
:type include_communications: boolean
:param include_communications: Specifies whether communications should
be included in the DescribeCases results. The default is true .
"""
params = {}
if case_id_list is not None:
params['caseIdList'] = case_id_list
if display_id is not None:
params['displayId'] = display_id
if after_time is not None:
params['afterTime'] = after_time
if before_time is not None:
params['beforeTime'] = before_time
if include_resolved_cases is not None:
params['includeResolvedCases'] = include_resolved_cases
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
if language is not None:
params['language'] = language
if include_communications is not None:
params['includeCommunications'] = include_communications
return self.make_request(action='DescribeCases',
body=json.dumps(params))
def describe_communications(self, case_id, before_time=None,
after_time=None, next_token=None,
max_results=None):
"""
Returns communications (and attachments) for one or more
support cases. You can use the `AfterTime` and `BeforeTime`
parameters to filter by date. You can use the `CaseId`
parameter to restrict the results to a particular case.
Case data is available for 12 months after creation. If a case
was created more than 12 months ago, a request for data might
cause an error.
You can use the `MaxResults` and `NextToken` parameters to
control the pagination of the result set. Set `MaxResults` to
the number of cases you want displayed on each page, and use
`NextToken` to specify the resumption of pagination.
:type case_id: string
:param case_id: The AWS Support case ID requested or returned in the
call. The case ID is an alphanumeric string formatted as shown in
this example: case- 12345678910-2013-c4c1d2bf33c5cf47
:type before_time: string
:param before_time: The end date for a filtered date search on support
case communications. Case communications are available for 12
months after creation.
:type after_time: string
:param after_time: The start date for a filtered date search on support
case communications. Case communications are available for 12
months after creation.
:type next_token: string
:param next_token: A resumption point for pagination.
:type max_results: integer
:param max_results: The maximum number of results to return before
paginating.
"""
params = {'caseId': case_id, }
if before_time is not None:
params['beforeTime'] = before_time
if after_time is not None:
params['afterTime'] = after_time
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
return self.make_request(action='DescribeCommunications',
body=json.dumps(params))
def describe_services(self, service_code_list=None, language=None):
"""
Returns the current list of AWS services and a list of service
categories that applies to each one. You then use service
names and categories in your CreateCase requests. Each AWS
service has its own set of categories.
The service codes and category codes correspond to the values
that are displayed in the **Service** and **Category** drop-
down lists on the AWS Support Center `Open a new case`_ page.
The values in those fields, however, do not necessarily match
the service codes and categories returned by the
`DescribeServices` request. Always use the service codes and
categories obtained programmatically. This practice ensures
that you always have the most recent set of service and
category codes.
:type service_code_list: list
:param service_code_list: A JSON-formatted list of service codes
available for AWS services.
:type language: string
:param language: The ISO 639-1 code for the language in which AWS
provides support. AWS Support currently supports English ("en") and
Japanese ("ja"). Language parameters must be passed explicitly for
operations that take them.
"""
params = {}
if service_code_list is not None:
params['serviceCodeList'] = service_code_list
if language is not None:
params['language'] = language
return self.make_request(action='DescribeServices',
body=json.dumps(params))
def describe_severity_levels(self, language=None):
"""
Returns the list of severity levels that you can assign to an
AWS Support case. The severity level for a case is also a
field in the CaseDetails data type included in any CreateCase
request.
:type language: string
:param language: The ISO 639-1 code for the language in which AWS
provides support. AWS Support currently supports English ("en") and
Japanese ("ja"). Language parameters must be passed explicitly for
operations that take them.
"""
params = {}
if language is not None:
params['language'] = language
return self.make_request(action='DescribeSeverityLevels',
body=json.dumps(params))
def describe_trusted_advisor_check_refresh_statuses(self, check_ids):
"""
Returns the refresh status of the Trusted Advisor checks that
have the specified check IDs. Check IDs can be obtained by
calling DescribeTrustedAdvisorChecks.
:type check_ids: list
:param check_ids: The IDs of the Trusted Advisor checks.
"""
params = {'checkIds': check_ids, }
return self.make_request(action='DescribeTrustedAdvisorCheckRefreshStatuses',
body=json.dumps(params))
def describe_trusted_advisor_check_result(self, check_id, language=None):
"""
Returns the results of the Trusted Advisor check that has the
specified check ID. Check IDs can be obtained by calling
DescribeTrustedAdvisorChecks.
The response contains a TrustedAdvisorCheckResult object,
which contains these three objects:
+ TrustedAdvisorCategorySpecificSummary
+ TrustedAdvisorResourceDetail
+ TrustedAdvisorResourcesSummary
In addition, the response contains these fields:
+ **Status.** The alert status of the check: "ok" (green),
"warning" (yellow), "error" (red), or "not_available".
+ **Timestamp.** The time of the last refresh of the check.
+ **CheckId.** The unique identifier for the check.
:type check_id: string
:param check_id: The unique identifier for the Trusted Advisor check.
:type language: string
:param language: The ISO 639-1 code for the language in which AWS
provides support. AWS Support currently supports English ("en") and
Japanese ("ja"). Language parameters must be passed explicitly for
operations that take them.
"""
params = {'checkId': check_id, }
if language is not None:
params['language'] = language
return self.make_request(action='DescribeTrustedAdvisorCheckResult',
body=json.dumps(params))
def describe_trusted_advisor_check_summaries(self, check_ids):
"""
Returns the summaries of the results of the Trusted Advisor
checks that have the specified check IDs. Check IDs can be
obtained by calling DescribeTrustedAdvisorChecks.
The response contains an array of TrustedAdvisorCheckSummary
objects.
:type check_ids: list
:param check_ids: The IDs of the Trusted Advisor checks.
"""
params = {'checkIds': check_ids, }
return self.make_request(action='DescribeTrustedAdvisorCheckSummaries',
body=json.dumps(params))
def describe_trusted_advisor_checks(self, language):
"""
Returns information about all available Trusted Advisor
checks, including name, ID, category, description, and
metadata. You must specify a language code; English ("en") and
Japanese ("ja") are currently supported. The response contains
a TrustedAdvisorCheckDescription for each check.
:type language: string
:param language: The ISO 639-1 code for the language in which AWS
provides support. AWS Support currently supports English ("en") and
Japanese ("ja"). Language parameters must be passed explicitly for
operations that take them.
"""
params = {'language': language, }
return self.make_request(action='DescribeTrustedAdvisorChecks',
body=json.dumps(params))
def refresh_trusted_advisor_check(self, check_id):
"""
Requests a refresh of the Trusted Advisor check that has the
specified check ID. Check IDs can be obtained by calling
DescribeTrustedAdvisorChecks.
The response contains a RefreshTrustedAdvisorCheckResult
object, which contains these fields:
+ **Status.** The refresh status of the check: "none",
"enqueued", "processing", "success", or "abandoned".
+ **MillisUntilNextRefreshable.** The amount of time, in
milliseconds, until the check is eligible for refresh.
+ **CheckId.** The unique identifier for the check.
:type check_id: string
:param check_id: The unique identifier for the Trusted Advisor check.
"""
params = {'checkId': check_id, }
return self.make_request(action='RefreshTrustedAdvisorCheck',
body=json.dumps(params))
def resolve_case(self, case_id=None):
"""
Takes a `CaseId` and returns the initial state of the case
along with the state of the case after the call to ResolveCase
completed.
:type case_id: string
:param case_id: The AWS Support case ID requested or returned in the
call. The case ID is an alphanumeric string formatted as shown in
this example: case- 12345678910-2013-c4c1d2bf33c5cf47
"""
params = {}
if case_id is not None:
params['caseId'] = case_id
return self.make_request(action='ResolveCase',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/support/layer1.py
| 0.705582 | 0.359111 |
layer1.py
|
|
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.cognito.identity import exceptions
class CognitoIdentityConnection(AWSQueryConnection):
"""
Amazon Cognito
Amazon Cognito is a web service that delivers scoped temporary
credentials to mobile devices and other untrusted environments.
Amazon Cognito uniquely identifies a device and supplies the user
with a consistent identity over the lifetime of an application.
Using Amazon Cognito, you can enable authentication with one or
more third-party identity providers (Facebook, Google, or Login
with Amazon), and you can also choose to support unauthenticated
access from your app. Cognito delivers a unique identifier for
each user and acts as an OpenID token provider trusted by AWS
Security Token Service (STS) to access temporary, limited-
privilege AWS credentials.
To provide end-user credentials, first make an unsigned call to
GetId. If the end user is authenticated with one of the supported
identity providers, set the `Logins` map with the identity
provider token. `GetId` returns a unique identifier for the user.
Next, make an unsigned call to GetOpenIdToken, which returns the
OpenID token necessary to call STS and retrieve AWS credentials.
This call expects the same `Logins` map as the `GetId` call, as
well as the `IdentityID` originally returned by `GetId`. The token
returned by `GetOpenIdToken` can be passed to the STS operation
`AssumeRoleWithWebIdentity`_ to retrieve AWS credentials.
"""
APIVersion = "2014-06-30"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "cognito-identity.us-east-1.amazonaws.com"
ServiceName = "CognitoIdentity"
TargetPrefix = "AWSCognitoIdentityService"
ResponseError = JSONResponseError
_faults = {
"LimitExceededException": exceptions.LimitExceededException,
"ResourceConflictException": exceptions.ResourceConflictException,
"DeveloperUserAlreadyRegisteredException": exceptions.DeveloperUserAlreadyRegisteredException,
"TooManyRequestsException": exceptions.TooManyRequestsException,
"InvalidParameterException": exceptions.InvalidParameterException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InternalErrorException": exceptions.InternalErrorException,
"NotAuthorizedException": exceptions.NotAuthorizedException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CognitoIdentityConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_identity_pool(self, identity_pool_name,
allow_unauthenticated_identities,
supported_login_providers=None,
developer_provider_name=None,
open_id_connect_provider_ar_ns=None):
"""
Creates a new identity pool. The identity pool is a store of
user identity information that is specific to your AWS
account. The limit on identity pools is 60 per account.
:type identity_pool_name: string
:param identity_pool_name: A string that you provide.
:type allow_unauthenticated_identities: boolean
:param allow_unauthenticated_identities: TRUE if the identity pool
supports unauthenticated logins.
:type supported_login_providers: map
:param supported_login_providers: Optional key:value pairs mapping
provider names to provider app IDs.
:type developer_provider_name: string
:param developer_provider_name: The "domain" by which Cognito will
refer to your users. This name acts as a placeholder that allows
your backend and the Cognito service to communicate about the
developer provider. For the `DeveloperProviderName`, you can use
letters as well as period ( `.`), underscore ( `_`), and dash (
`-`).
Once you have set a developer provider name, you cannot change it.
Please take care in setting this parameter.
:type open_id_connect_provider_ar_ns: list
:param open_id_connect_provider_ar_ns:
"""
params = {
'IdentityPoolName': identity_pool_name,
'AllowUnauthenticatedIdentities': allow_unauthenticated_identities,
}
if supported_login_providers is not None:
params['SupportedLoginProviders'] = supported_login_providers
if developer_provider_name is not None:
params['DeveloperProviderName'] = developer_provider_name
if open_id_connect_provider_ar_ns is not None:
params['OpenIdConnectProviderARNs'] = open_id_connect_provider_ar_ns
return self.make_request(action='CreateIdentityPool',
body=json.dumps(params))
def delete_identity_pool(self, identity_pool_id):
"""
Deletes a user pool. Once a pool is deleted, users will not be
able to authenticate with the pool.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
"""
params = {'IdentityPoolId': identity_pool_id, }
return self.make_request(action='DeleteIdentityPool',
body=json.dumps(params))
def describe_identity_pool(self, identity_pool_id):
"""
Gets details about a particular identity pool, including the
pool name, ID description, creation date, and current number
of users.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
"""
params = {'IdentityPoolId': identity_pool_id, }
return self.make_request(action='DescribeIdentityPool',
body=json.dumps(params))
def get_id(self, account_id, identity_pool_id, logins=None):
"""
Generates (or retrieves) a Cognito ID. Supplying multiple
logins will create an implicit linked account.
:type account_id: string
:param account_id: A standard AWS account ID (9+ digits).
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type logins: map
:param logins: A set of optional name-value pairs that map provider
names to provider tokens.
The available provider names for `Logins` are as follows:
+ Facebook: `graph.facebook.com`
+ Google: `accounts.google.com`
+ Amazon: `www.amazon.com`
"""
params = {
'AccountId': account_id,
'IdentityPoolId': identity_pool_id,
}
if logins is not None:
params['Logins'] = logins
return self.make_request(action='GetId',
body=json.dumps(params))
def get_open_id_token(self, identity_id, logins=None):
"""
Gets an OpenID token, using a known Cognito ID. This known
Cognito ID is returned by GetId. You can optionally add
additional logins for the identity. Supplying multiple logins
creates an implicit link.
The OpenId token is valid for 15 minutes.
:type identity_id: string
:param identity_id: A unique identifier in the format REGION:GUID.
:type logins: map
:param logins: A set of optional name-value pairs that map provider
names to provider tokens.
"""
params = {'IdentityId': identity_id, }
if logins is not None:
params['Logins'] = logins
return self.make_request(action='GetOpenIdToken',
body=json.dumps(params))
def get_open_id_token_for_developer_identity(self, identity_pool_id,
logins, identity_id=None,
token_duration=None):
"""
Registers (or retrieves) a Cognito `IdentityId` and an OpenID
Connect token for a user authenticated by your backend
authentication process. Supplying multiple logins will create
an implicit linked account. You can only specify one developer
provider as part of the `Logins` map, which is linked to the
identity pool. The developer provider is the "domain" by which
Cognito will refer to your users.
You can use `GetOpenIdTokenForDeveloperIdentity` to create a
new identity and to link new logins (that is, user credentials
issued by a public provider or developer provider) to an
existing identity. When you want to create a new identity, the
`IdentityId` should be null. When you want to associate a new
login with an existing authenticated/unauthenticated identity,
you can do so by providing the existing `IdentityId`. This API
will create the identity in the specified `IdentityPoolId`.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type identity_id: string
:param identity_id: A unique identifier in the format REGION:GUID.
:type logins: map
:param logins: A set of optional name-value pairs that map provider
names to provider tokens. Each name-value pair represents a user
from a public provider or developer provider. If the user is from a
developer provider, the name-value pair will follow the syntax
`"developer_provider_name": "developer_user_identifier"`. The
developer provider is the "domain" by which Cognito will refer to
your users; you provided this domain while creating/updating the
identity pool. The developer user identifier is an identifier from
your backend that uniquely identifies a user. When you create an
identity pool, you can specify the supported logins.
:type token_duration: long
:param token_duration: The expiration time of the token, in seconds.
You can specify a custom expiration time for the token so that you
can cache it. If you don't provide an expiration time, the token is
valid for 15 minutes. You can exchange the token with Amazon STS
for temporary AWS credentials, which are valid for a maximum of one
hour. The maximum token duration you can set is 24 hours. You
should take care in setting the expiration time for a token, as
there are significant security implications: an attacker could use
a leaked token to access your AWS resources for the token's
duration.
"""
params = {
'IdentityPoolId': identity_pool_id,
'Logins': logins,
}
if identity_id is not None:
params['IdentityId'] = identity_id
if token_duration is not None:
params['TokenDuration'] = token_duration
return self.make_request(action='GetOpenIdTokenForDeveloperIdentity',
body=json.dumps(params))
def list_identities(self, identity_pool_id, max_results, next_token=None):
"""
Lists the identities in a pool.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type max_results: integer
:param max_results: The maximum number of identities to return.
:type next_token: string
:param next_token: A pagination token.
"""
params = {
'IdentityPoolId': identity_pool_id,
'MaxResults': max_results,
}
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='ListIdentities',
body=json.dumps(params))
def list_identity_pools(self, max_results, next_token=None):
"""
Lists all of the Cognito identity pools registered for your
account.
:type max_results: integer
:param max_results: The maximum number of identities to return.
:type next_token: string
:param next_token: A pagination token.
"""
params = {'MaxResults': max_results, }
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='ListIdentityPools',
body=json.dumps(params))
def lookup_developer_identity(self, identity_pool_id, identity_id=None,
developer_user_identifier=None,
max_results=None, next_token=None):
"""
Retrieves the `IdentityID` associated with a
`DeveloperUserIdentifier` or the list of
`DeveloperUserIdentifier`s associated with an `IdentityId` for
an existing identity. Either `IdentityID` or
`DeveloperUserIdentifier` must not be null. If you supply only
one of these values, the other value will be searched in the
database and returned as a part of the response. If you supply
both, `DeveloperUserIdentifier` will be matched against
`IdentityID`. If the values are verified against the database,
the response returns both values and is the same as the
request. Otherwise a `ResourceConflictException` is thrown.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type identity_id: string
:param identity_id: A unique identifier in the format REGION:GUID.
:type developer_user_identifier: string
:param developer_user_identifier: A unique ID used by your backend
authentication process to identify a user. Typically, a developer
identity provider would issue many developer user identifiers, in
keeping with the number of users.
:type max_results: integer
:param max_results: The maximum number of identities to return.
:type next_token: string
:param next_token: A pagination token. The first call you make will
have `NextToken` set to null. After that the service will return
`NextToken` values as needed. For example, let's say you make a
request with `MaxResults` set to 10, and there are 20 matches in
the database. The service will return a pagination token as a part
of the response. This token can be used to call the API again and
get results starting from the 11th match.
"""
params = {'IdentityPoolId': identity_pool_id, }
if identity_id is not None:
params['IdentityId'] = identity_id
if developer_user_identifier is not None:
params['DeveloperUserIdentifier'] = developer_user_identifier
if max_results is not None:
params['MaxResults'] = max_results
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='LookupDeveloperIdentity',
body=json.dumps(params))
def merge_developer_identities(self, source_user_identifier,
destination_user_identifier,
developer_provider_name, identity_pool_id):
"""
Merges two users having different `IdentityId`s, existing in
the same identity pool, and identified by the same developer
provider. You can use this action to request that discrete
users be merged and identified as a single user in the Cognito
environment. Cognito associates the given source user (
`SourceUserIdentifier`) with the `IdentityId` of the
`DestinationUserIdentifier`. Only developer-authenticated
users can be merged. If the users to be merged are associated
with the same public provider, but as two different users, an
exception will be thrown.
:type source_user_identifier: string
:param source_user_identifier: User identifier for the source user. The
value should be a `DeveloperUserIdentifier`.
:type destination_user_identifier: string
:param destination_user_identifier: User identifier for the destination
user. The value should be a `DeveloperUserIdentifier`.
:type developer_provider_name: string
:param developer_provider_name: The "domain" by which Cognito will
refer to your users. This is a (pseudo) domain name that you
provide while creating an identity pool. This name acts as a
placeholder that allows your backend and the Cognito service to
communicate about the developer provider. For the
`DeveloperProviderName`, you can use letters as well as period (.),
underscore (_), and dash (-).
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
"""
params = {
'SourceUserIdentifier': source_user_identifier,
'DestinationUserIdentifier': destination_user_identifier,
'DeveloperProviderName': developer_provider_name,
'IdentityPoolId': identity_pool_id,
}
return self.make_request(action='MergeDeveloperIdentities',
body=json.dumps(params))
def unlink_developer_identity(self, identity_id, identity_pool_id,
developer_provider_name,
developer_user_identifier):
"""
Unlinks a `DeveloperUserIdentifier` from an existing identity.
Unlinked developer users will be considered new identities
next time they are seen. If, for a given Cognito identity, you
remove all federated identities as well as the developer user
identifier, the Cognito identity becomes inaccessible.
:type identity_id: string
:param identity_id: A unique identifier in the format REGION:GUID.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type developer_provider_name: string
:param developer_provider_name: The "domain" by which Cognito will
refer to your users.
:type developer_user_identifier: string
:param developer_user_identifier: A unique ID used by your backend
authentication process to identify a user.
"""
params = {
'IdentityId': identity_id,
'IdentityPoolId': identity_pool_id,
'DeveloperProviderName': developer_provider_name,
'DeveloperUserIdentifier': developer_user_identifier,
}
return self.make_request(action='UnlinkDeveloperIdentity',
body=json.dumps(params))
def unlink_identity(self, identity_id, logins, logins_to_remove):
"""
Unlinks a federated identity from an existing account.
Unlinked logins will be considered new identities next time
they are seen. Removing the last linked login will make this
identity inaccessible.
:type identity_id: string
:param identity_id: A unique identifier in the format REGION:GUID.
:type logins: map
:param logins: A set of optional name-value pairs that map provider
names to provider tokens.
:type logins_to_remove: list
:param logins_to_remove: Provider names to unlink from this identity.
"""
params = {
'IdentityId': identity_id,
'Logins': logins,
'LoginsToRemove': logins_to_remove,
}
return self.make_request(action='UnlinkIdentity',
body=json.dumps(params))
def update_identity_pool(self, identity_pool_id, identity_pool_name,
allow_unauthenticated_identities,
supported_login_providers=None,
developer_provider_name=None,
open_id_connect_provider_ar_ns=None):
"""
Updates a user pool.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type identity_pool_name: string
:param identity_pool_name: A string that you provide.
:type allow_unauthenticated_identities: boolean
:param allow_unauthenticated_identities: TRUE if the identity pool
supports unauthenticated logins.
:type supported_login_providers: map
:param supported_login_providers: Optional key:value pairs mapping
provider names to provider app IDs.
:type developer_provider_name: string
:param developer_provider_name: The "domain" by which Cognito will
refer to your users.
:type open_id_connect_provider_ar_ns: list
:param open_id_connect_provider_ar_ns:
"""
params = {
'IdentityPoolId': identity_pool_id,
'IdentityPoolName': identity_pool_name,
'AllowUnauthenticatedIdentities': allow_unauthenticated_identities,
}
if supported_login_providers is not None:
params['SupportedLoginProviders'] = supported_login_providers
if developer_provider_name is not None:
params['DeveloperProviderName'] = developer_provider_name
if open_id_connect_provider_ar_ns is not None:
params['OpenIdConnectProviderARNs'] = open_id_connect_provider_ar_ns
return self.make_request(action='UpdateIdentityPool',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cognito/identity/layer1.py
| 0.718989 | 0.240602 |
layer1.py
|
|
from boto.compat import json
from boto.exception import JSONResponseError
from boto.connection import AWSAuthConnection
from boto.regioninfo import RegionInfo
from boto.cognito.sync import exceptions
class CognitoSyncConnection(AWSAuthConnection):
"""
Amazon Cognito Sync
Amazon Cognito Sync provides an AWS service and client library
that enable cross-device syncing of application-related user data.
High-level client libraries are available for both iOS and
Android. You can use these libraries to persist data locally so
that it's available even if the device is offline. Developer
credentials don't need to be stored on the mobile device to access
the service. You can use Amazon Cognito to obtain a normalized
user ID and credentials. User data is persisted in a dataset that
can store up to 1 MB of key-value pairs, and you can have up to 20
datasets per user identity.
With Amazon Cognito Sync, the data stored for each identity is
accessible only to credentials assigned to that identity. In order
to use the Cognito Sync service, you need to make API calls using
credentials retrieved with `Amazon Cognito Identity service`_.
"""
APIVersion = "2014-06-30"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "cognito-sync.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"LimitExceededException": exceptions.LimitExceededException,
"ResourceConflictException": exceptions.ResourceConflictException,
"InvalidConfigurationException": exceptions.InvalidConfigurationException,
"TooManyRequestsException": exceptions.TooManyRequestsException,
"InvalidParameterException": exceptions.InvalidParameterException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InternalErrorException": exceptions.InternalErrorException,
"NotAuthorizedException": exceptions.NotAuthorizedException,
}
def __init__(self, **kwargs):
region = kwargs.get('region')
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
else:
del kwargs['region']
kwargs['host'] = region.endpoint
super(CognitoSyncConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def delete_dataset(self, identity_pool_id, identity_id, dataset_name):
"""
Deletes the specific dataset. The dataset will be deleted
permanently, and the action can't be undone. Datasets that
this dataset was merged with will no longer report the merge.
Any consequent operation on this dataset will result in a
ResourceNotFoundException.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. GUID generation is unique within a region.
:type identity_id: string
:param identity_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. GUID generation is unique within a region.
:type dataset_name: string
:param dataset_name: A string of up to 128 characters. Allowed
characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.'
(dot).
"""
uri = '/identitypools/{0}/identities/{1}/datasets/{2}'.format(
identity_pool_id, identity_id, dataset_name)
return self.make_request('DELETE', uri, expected_status=200)
def describe_dataset(self, identity_pool_id, identity_id, dataset_name):
"""
Gets metadata about a dataset by identity and dataset name.
The credentials used to make this API call need to have access
to the identity data. With Amazon Cognito Sync, each identity
has access only to its own data. You should use Amazon Cognito
Identity service to retrieve the credentials necessary to make
this API call.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. GUID generation is unique within a region.
:type identity_id: string
:param identity_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. GUID generation is unique within a region.
:type dataset_name: string
:param dataset_name: A string of up to 128 characters. Allowed
characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.'
(dot).
"""
uri = '/identitypools/{0}/identities/{1}/datasets/{2}'.format(
identity_pool_id, identity_id, dataset_name)
return self.make_request('GET', uri, expected_status=200)
def describe_identity_pool_usage(self, identity_pool_id):
"""
Gets usage details (for example, data storage) about a
particular identity pool.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. GUID generation is unique within a region.
"""
uri = '/identitypools/{0}'.format(identity_pool_id)
return self.make_request('GET', uri, expected_status=200)
def describe_identity_usage(self, identity_pool_id, identity_id):
"""
Gets usage information for an identity, including number of
datasets and data usage.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. GUID generation is unique within a region.
:type identity_id: string
:param identity_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. GUID generation is unique within a region.
"""
uri = '/identitypools/{0}/identities/{1}'.format(
identity_pool_id, identity_id)
return self.make_request('GET', uri, expected_status=200)
def get_identity_pool_configuration(self, identity_pool_id):
"""
Gets the configuration settings of an identity pool.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. This is the ID of the pool for which to return a
configuration.
"""
uri = '/identitypools/{0}/configuration'.format(identity_pool_id)
return self.make_request('GET', uri, expected_status=200)
def list_datasets(self, identity_pool_id, identity_id, next_token=None,
max_results=None):
"""
Lists datasets for an identity. The credentials used to make
this API call need to have access to the identity data. With
Amazon Cognito Sync, each identity has access only to its own
data. You should use Amazon Cognito Identity service to
retrieve the credentials necessary to make this API call.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. GUID generation is unique within a region.
:type identity_id: string
:param identity_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. GUID generation is unique within a region.
:type next_token: string
:param next_token: A pagination token for obtaining the next page of
results.
:type max_results: integer
:param max_results: The maximum number of results to be returned.
"""
uri = '/identitypools/{0}/identities/{1}/datasets'.format(
identity_pool_id, identity_id)
params = {}
headers = {}
query_params = {}
if next_token is not None:
query_params['nextToken'] = next_token
if max_results is not None:
query_params['maxResults'] = max_results
return self.make_request('GET', uri, expected_status=200,
data=json.dumps(params), headers=headers,
params=query_params)
def list_identity_pool_usage(self, next_token=None, max_results=None):
"""
Gets a list of identity pools registered with Cognito.
:type next_token: string
:param next_token: A pagination token for obtaining the next page of
results.
:type max_results: integer
:param max_results: The maximum number of results to be returned.
"""
uri = '/identitypools'
params = {}
headers = {}
query_params = {}
if next_token is not None:
query_params['nextToken'] = next_token
if max_results is not None:
query_params['maxResults'] = max_results
return self.make_request('GET', uri, expected_status=200,
data=json.dumps(params), headers=headers,
params=query_params)
def list_records(self, identity_pool_id, identity_id, dataset_name,
last_sync_count=None, next_token=None, max_results=None,
sync_session_token=None):
"""
Gets paginated records, optionally changed after a particular
sync count for a dataset and identity. The credentials used to
make this API call need to have access to the identity data.
With Amazon Cognito Sync, each identity has access only to its
own data. You should use Amazon Cognito Identity service to
retrieve the credentials necessary to make this API call.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. GUID generation is unique within a region.
:type identity_id: string
:param identity_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. GUID generation is unique within a region.
:type dataset_name: string
:param dataset_name: A string of up to 128 characters. Allowed
characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.'
(dot).
:type last_sync_count: long
:param last_sync_count: The last server sync count for this record.
:type next_token: string
:param next_token: A pagination token for obtaining the next page of
results.
:type max_results: integer
:param max_results: The maximum number of results to be returned.
:type sync_session_token: string
:param sync_session_token: A token containing a session ID, identity
ID, and expiration.
"""
uri = '/identitypools/{0}/identities/{1}/datasets/{2}/records'.format(
identity_pool_id, identity_id, dataset_name)
params = {}
headers = {}
query_params = {}
if last_sync_count is not None:
query_params['lastSyncCount'] = last_sync_count
if next_token is not None:
query_params['nextToken'] = next_token
if max_results is not None:
query_params['maxResults'] = max_results
if sync_session_token is not None:
query_params['syncSessionToken'] = sync_session_token
return self.make_request('GET', uri, expected_status=200,
data=json.dumps(params), headers=headers,
params=query_params)
def register_device(self, identity_pool_id, identity_id, platform, token):
"""
Registers a device to receive push sync notifications.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. Here, the ID of the pool that the identity belongs to.
:type identity_id: string
:param identity_id: The unique ID for this identity.
:type platform: string
:param platform: The SNS platform type (e.g. GCM, SDM, APNS,
APNS_SANDBOX).
:type token: string
:param token: The push token.
"""
uri = '/identitypools/{0}/identity/{1}/device'.format(
identity_pool_id, identity_id)
params = {'Platform': platform, 'Token': token, }
headers = {}
query_params = {}
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params), headers=headers,
params=query_params)
def set_identity_pool_configuration(self, identity_pool_id,
push_sync=None):
"""
Sets the necessary configuration for push sync.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. This is the ID of the pool to modify.
:type push_sync: dict
:param push_sync: Configuration options to be applied to the identity
pool.
"""
uri = '/identitypools/{0}/configuration'.format(identity_pool_id)
params = {}
headers = {}
query_params = {}
if push_sync is not None:
params['PushSync'] = push_sync
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params), headers=headers,
params=query_params)
def subscribe_to_dataset(self, identity_pool_id, identity_id,
dataset_name, device_id):
"""
Subscribes to receive notifications when a dataset is modified
by another device.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. The ID of the pool to which the identity belongs.
:type identity_id: string
:param identity_id: Unique ID for this identity.
:type dataset_name: string
:param dataset_name: The name of the dataset to subcribe to.
:type device_id: string
:param device_id: The unique ID generated for this device by Cognito.
"""
uri = '/identitypools/{0}/identities/{1}/datasets/{2}/subscriptions/{3}'.format(
identity_pool_id, identity_id, dataset_name, device_id)
return self.make_request('POST', uri, expected_status=200)
def unsubscribe_from_dataset(self, identity_pool_id, identity_id,
dataset_name, device_id):
"""
Unsubscribe from receiving notifications when a dataset is
modified by another device.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. The ID of the pool to which this identity belongs.
:type identity_id: string
:param identity_id: Unique ID for this identity.
:type dataset_name: string
:param dataset_name: The name of the dataset from which to unsubcribe.
:type device_id: string
:param device_id: The unique ID generated for this device by Cognito.
"""
uri = '/identitypools/{0}/identities/{1}/datasets/{2}/subscriptions/{3}'.format(
identity_pool_id, identity_id, dataset_name, device_id)
return self.make_request('DELETE', uri, expected_status=200)
def update_records(self, identity_pool_id, identity_id, dataset_name,
sync_session_token, device_id=None,
record_patches=None, client_context=None):
"""
Posts updates to records and add and delete records for a
dataset and user. The credentials used to make this API call
need to have access to the identity data. With Amazon Cognito
Sync, each identity has access only to its own data. You
should use Amazon Cognito Identity service to retrieve the
credentials necessary to make this API call.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. GUID generation is unique within a region.
:type identity_id: string
:param identity_id: A name-spaced GUID (for example, us-
east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
Cognito. GUID generation is unique within a region.
:type dataset_name: string
:param dataset_name: A string of up to 128 characters. Allowed
characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.'
(dot).
:type device_id: string
:param device_id: The unique ID generated for this device by Cognito.
:type record_patches: list
:param record_patches: A list of patch operations.
:type sync_session_token: string
:param sync_session_token: The SyncSessionToken returned by a previous
call to ListRecords for this dataset and identity.
:type client_context: string
:param client_context: Intended to supply a device ID that will
populate the `lastModifiedBy` field referenced in other methods.
The `ClientContext` field is not yet implemented.
"""
uri = '/identitypools/{0}/identities/{1}/datasets/{2}'.format(
identity_pool_id, identity_id, dataset_name)
params = {'SyncSessionToken': sync_session_token, }
headers = {}
query_params = {}
if device_id is not None:
params['DeviceId'] = device_id
if record_patches is not None:
params['RecordPatches'] = record_patches
if client_context is not None:
headers['x-amz-Client-Context'] = client_context
if client_context is not None:
headers['x-amz-Client-Context'] = client_context
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params), headers=headers,
params=query_params)
def make_request(self, verb, resource, headers=None, data='',
expected_status=None, params=None):
if headers is None:
headers = {}
response = AWSAuthConnection.make_request(
self, verb, resource, headers=headers, data=data, params=params)
body = json.loads(response.read().decode('utf-8'))
if response.status == expected_status:
return body
else:
error_type = response.getheader('x-amzn-ErrorType').split(':')[0]
error_class = self._faults.get(error_type, self.ResponseError)
raise error_class(response.status, response.reason, body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cognito/sync/layer1.py
| 0.721939 | 0.367724 |
layer1.py
|
|
import xml.sax
import threading
import boto
from boto import handler
from boto.connection import AWSQueryConnection
from boto.sdb.domain import Domain, DomainMetaData
from boto.sdb.item import Item
from boto.sdb.regioninfo import SDBRegionInfo
from boto.exception import SDBResponseError
class ItemThread(threading.Thread):
"""
A threaded :class:`Item <boto.sdb.item.Item>` retriever utility class.
Retrieved :class:`Item <boto.sdb.item.Item>` objects are stored in the
``items`` instance variable after :py:meth:`run() <run>` is called.
.. tip:: The item retrieval will not start until
the :func:`run() <boto.sdb.connection.ItemThread.run>` method is called.
"""
def __init__(self, name, domain_name, item_names):
"""
:param str name: A thread name. Used for identification.
:param str domain_name: The name of a SimpleDB
:class:`Domain <boto.sdb.domain.Domain>`
:type item_names: string or list of strings
:param item_names: The name(s) of the items to retrieve from the specified
:class:`Domain <boto.sdb.domain.Domain>`.
:ivar list items: A list of items retrieved. Starts as empty list.
"""
super(ItemThread, self).__init__(name=name)
#print 'starting %s with %d items' % (name, len(item_names))
self.domain_name = domain_name
self.conn = SDBConnection()
self.item_names = item_names
self.items = []
def run(self):
"""
Start the threaded retrieval of items. Populates the
``items`` list with :class:`Item <boto.sdb.item.Item>` objects.
"""
for item_name in self.item_names:
item = self.conn.get_attributes(self.domain_name, item_name)
self.items.append(item)
#boto.set_stream_logger('sdb')
class SDBConnection(AWSQueryConnection):
"""
This class serves as a gateway to your SimpleDB region (defaults to
us-east-1). Methods within allow access to SimpleDB
:class:`Domain <boto.sdb.domain.Domain>` objects and their associated
:class:`Item <boto.sdb.item.Item>` objects.
.. tip::
While you may instantiate this class directly, it may be easier to
go through :py:func:`boto.connect_sdb`.
"""
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'sdb.us-east-1.amazonaws.com'
APIVersion = '2009-04-15'
ResponseError = SDBResponseError
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
converter=None, security_token=None, validate_certs=True,
profile_name=None):
"""
For any keywords that aren't documented, refer to the parent class,
:py:class:`boto.connection.AWSAuthConnection`. You can avoid having
to worry about these keyword arguments by instantiating these objects
via :py:func:`boto.connect_sdb`.
:type region: :class:`boto.sdb.regioninfo.SDBRegionInfo`
:keyword region: Explicitly specify a region. Defaults to ``us-east-1``
if not specified. You may also specify the region in your ``boto.cfg``:
.. code-block:: cfg
[SDB]
region = eu-west-1
"""
if not region:
region_name = boto.config.get('SDB', 'region', self.DefaultRegionName)
for reg in boto.sdb.regions():
if reg.name == region_name:
region = reg
break
self.region = region
super(SDBConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy,
proxy_port, proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs,
profile_name=profile_name)
self.box_usage = 0.0
self.converter = converter
self.item_cls = Item
def _required_auth_capability(self):
return ['sdb']
def set_item_cls(self, cls):
"""
While the default item class is :py:class:`boto.sdb.item.Item`, this
default may be overridden. Use this method to change a connection's
item class.
:param object cls: The new class to set as this connection's item
class. See the default item class for inspiration as to what your
replacement should/could look like.
"""
self.item_cls = cls
def _build_name_value_list(self, params, attributes, replace=False,
label='Attribute'):
keys = sorted(attributes.keys())
i = 1
for key in keys:
value = attributes[key]
if isinstance(value, list):
for v in value:
params['%s.%d.Name' % (label, i)] = key
if self.converter:
v = self.converter.encode(v)
params['%s.%d.Value' % (label, i)] = v
if replace:
params['%s.%d.Replace' % (label, i)] = 'true'
i += 1
else:
params['%s.%d.Name' % (label, i)] = key
if self.converter:
value = self.converter.encode(value)
params['%s.%d.Value' % (label, i)] = value
if replace:
params['%s.%d.Replace' % (label, i)] = 'true'
i += 1
def _build_expected_value(self, params, expected_value):
params['Expected.1.Name'] = expected_value[0]
if expected_value[1] is True:
params['Expected.1.Exists'] = 'true'
elif expected_value[1] is False:
params['Expected.1.Exists'] = 'false'
else:
params['Expected.1.Value'] = expected_value[1]
def _build_batch_list(self, params, items, replace=False):
item_names = items.keys()
i = 0
for item_name in item_names:
params['Item.%d.ItemName' % i] = item_name
j = 0
item = items[item_name]
if item is not None:
attr_names = item.keys()
for attr_name in attr_names:
value = item[attr_name]
if isinstance(value, list):
for v in value:
if self.converter:
v = self.converter.encode(v)
params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name
params['Item.%d.Attribute.%d.Value' % (i, j)] = v
if replace:
params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true'
j += 1
else:
params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name
if self.converter:
value = self.converter.encode(value)
params['Item.%d.Attribute.%d.Value' % (i, j)] = value
if replace:
params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true'
j += 1
i += 1
def _build_name_list(self, params, attribute_names):
i = 1
attribute_names.sort()
for name in attribute_names:
params['Attribute.%d.Name' % i] = name
i += 1
def get_usage(self):
"""
Returns the BoxUsage (in USD) accumulated on this specific SDBConnection
instance.
.. tip:: This can be out of date, and should only be treated as a
rough estimate. Also note that this estimate only applies to the
requests made on this specific connection instance. It is by
no means an account-wide estimate.
:rtype: float
:return: The accumulated BoxUsage of all requests made on the connection.
"""
return self.box_usage
def print_usage(self):
"""
Print the BoxUsage and approximate costs of all requests made on
this specific SDBConnection instance.
.. tip:: This can be out of date, and should only be treated as a
rough estimate. Also note that this estimate only applies to the
requests made on this specific connection instance. It is by
no means an account-wide estimate.
"""
print('Total Usage: %f compute seconds' % self.box_usage)
cost = self.box_usage * 0.14
print('Approximate Cost: $%f' % cost)
def get_domain(self, domain_name, validate=True):
"""
Retrieves a :py:class:`boto.sdb.domain.Domain` object whose name
matches ``domain_name``.
:param str domain_name: The name of the domain to retrieve
:keyword bool validate: When ``True``, check to see if the domain
actually exists. If ``False``, blindly return a
:py:class:`Domain <boto.sdb.domain.Domain>` object with the
specified name set.
:raises:
:py:class:`boto.exception.SDBResponseError` if ``validate`` is
``True`` and no match could be found.
:rtype: :py:class:`boto.sdb.domain.Domain`
:return: The requested domain
"""
domain = Domain(self, domain_name)
if validate:
self.select(domain, """select * from `%s` limit 1""" % domain_name)
return domain
def lookup(self, domain_name, validate=True):
"""
Lookup an existing SimpleDB domain. This differs from
:py:meth:`get_domain` in that ``None`` is returned if ``validate`` is
``True`` and no match was found (instead of raising an exception).
:param str domain_name: The name of the domain to retrieve
:param bool validate: If ``True``, a ``None`` value will be returned
if the specified domain can't be found. If ``False``, a
:py:class:`Domain <boto.sdb.domain.Domain>` object will be dumbly
returned, regardless of whether it actually exists.
:rtype: :class:`boto.sdb.domain.Domain` object or ``None``
:return: The Domain object or ``None`` if the domain does not exist.
"""
try:
domain = self.get_domain(domain_name, validate)
except:
domain = None
return domain
def get_all_domains(self, max_domains=None, next_token=None):
"""
Returns a :py:class:`boto.resultset.ResultSet` containing
all :py:class:`boto.sdb.domain.Domain` objects associated with
this connection's Access Key ID.
:keyword int max_domains: Limit the returned
:py:class:`ResultSet <boto.resultset.ResultSet>` to the specified
number of members.
:keyword str next_token: A token string that was returned in an
earlier call to this method as the ``next_token`` attribute
on the returned :py:class:`ResultSet <boto.resultset.ResultSet>`
object. This attribute is set if there are more than Domains than
the value specified in the ``max_domains`` keyword. Pass the
``next_token`` value from you earlier query in this keyword to
get the next 'page' of domains.
"""
params = {}
if max_domains:
params['MaxNumberOfDomains'] = max_domains
if next_token:
params['NextToken'] = next_token
return self.get_list('ListDomains', params, [('DomainName', Domain)])
def create_domain(self, domain_name):
"""
Create a SimpleDB domain.
:type domain_name: string
:param domain_name: The name of the new domain
:rtype: :class:`boto.sdb.domain.Domain` object
:return: The newly created domain
"""
params = {'DomainName': domain_name}
d = self.get_object('CreateDomain', params, Domain)
d.name = domain_name
return d
def get_domain_and_name(self, domain_or_name):
"""
Given a ``str`` or :class:`boto.sdb.domain.Domain`, return a
``tuple`` with the following members (in order):
* In instance of :class:`boto.sdb.domain.Domain` for the requested
domain
* The domain's name as a ``str``
:type domain_or_name: ``str`` or :class:`boto.sdb.domain.Domain`
:param domain_or_name: The domain or domain name to get the domain
and name for.
:raises: :class:`boto.exception.SDBResponseError` when an invalid
domain name is specified.
:rtype: tuple
:return: A ``tuple`` with contents outlined as per above.
"""
if (isinstance(domain_or_name, Domain)):
return (domain_or_name, domain_or_name.name)
else:
return (self.get_domain(domain_or_name), domain_or_name)
def delete_domain(self, domain_or_name):
"""
Delete a SimpleDB domain.
.. caution:: This will delete the domain and all items within the domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:rtype: bool
:return: True if successful
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName': domain_name}
return self.get_status('DeleteDomain', params)
def domain_metadata(self, domain_or_name):
"""
Get the Metadata for a SimpleDB domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:rtype: :class:`boto.sdb.domain.DomainMetaData` object
:return: The newly created domain metadata object
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName': domain_name}
d = self.get_object('DomainMetadata', params, DomainMetaData)
d.domain = domain
return d
def put_attributes(self, domain_or_name, item_name, attributes,
replace=True, expected_value=None):
"""
Store attributes for a given item in a domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type item_name: string
:param item_name: The name of the item whose attributes are being
stored.
:type attribute_names: dict or dict-like object
:param attribute_names: The name/value pairs to store as attributes
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be
of the form:
* ['name', 'value']
In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or
non-existence (False) of the attribute.
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
:rtype: bool
:return: True if successful
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName': domain_name,
'ItemName': item_name}
self._build_name_value_list(params, attributes, replace)
if expected_value:
self._build_expected_value(params, expected_value)
return self.get_status('PutAttributes', params)
def batch_put_attributes(self, domain_or_name, items, replace=True):
"""
Store attributes for multiple items in a domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type items: dict or dict-like object
:param items: A dictionary-like object. The keys of the dictionary are
the item names and the values are themselves dictionaries
of attribute names/values, exactly the same as the
attribute_names parameter of the scalar put_attributes
call.
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
:rtype: bool
:return: True if successful
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName': domain_name}
self._build_batch_list(params, items, replace)
return self.get_status('BatchPutAttributes', params, verb='POST')
def get_attributes(self, domain_or_name, item_name, attribute_names=None,
consistent_read=False, item=None):
"""
Retrieve attributes for a given item in a domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type item_name: string
:param item_name: The name of the item whose attributes are
being retrieved.
:type attribute_names: string or list of strings
:param attribute_names: An attribute name or list of attribute names.
This parameter is optional. If not supplied, all attributes will
be retrieved for the item.
:type consistent_read: bool
:param consistent_read: When set to true, ensures that the most recent
data is returned.
:type item: :class:`boto.sdb.item.Item`
:keyword item: Instead of instantiating a new Item object, you may
specify one to update.
:rtype: :class:`boto.sdb.item.Item`
:return: An Item with the requested attribute name/values set on it
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName': domain_name,
'ItemName': item_name}
if consistent_read:
params['ConsistentRead'] = 'true'
if attribute_names:
if not isinstance(attribute_names, list):
attribute_names = [attribute_names]
self.build_list_params(params, attribute_names, 'AttributeName')
response = self.make_request('GetAttributes', params)
body = response.read()
if response.status == 200:
if item is None:
item = self.item_cls(domain, item_name)
h = handler.XmlHandler(item, self)
xml.sax.parseString(body, h)
return item
else:
raise SDBResponseError(response.status, response.reason, body)
def delete_attributes(self, domain_or_name, item_name, attr_names=None,
expected_value=None):
"""
Delete attributes from a given item in a domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type item_name: string
:param item_name: The name of the item whose attributes are being
deleted.
:type attributes: dict, list or :class:`boto.sdb.item.Item`
:param attributes: Either a list containing attribute names which
will cause all values associated with that attribute
name to be deleted or a dict or Item containing the
attribute names and keys and list of values to
delete as the value. If no value is supplied,
all attribute name/values for the item will be
deleted.
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be
of the form:
* ['name', 'value']
In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or
non-existence (False) of the attribute.
:rtype: bool
:return: True if successful
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName': domain_name,
'ItemName': item_name}
if attr_names:
if isinstance(attr_names, list):
self._build_name_list(params, attr_names)
elif isinstance(attr_names, dict) or isinstance(attr_names, self.item_cls):
self._build_name_value_list(params, attr_names)
if expected_value:
self._build_expected_value(params, expected_value)
return self.get_status('DeleteAttributes', params)
def batch_delete_attributes(self, domain_or_name, items):
"""
Delete multiple items in a domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type items: dict or dict-like object
:param items: A dictionary-like object. The keys of the dictionary are
the item names and the values are either:
* dictionaries of attribute names/values, exactly the
same as the attribute_names parameter of the scalar
put_attributes call. The attribute name/value pairs
will only be deleted if they match the name/value
pairs passed in.
* None which means that all attributes associated
with the item should be deleted.
:return: True if successful
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName': domain_name}
self._build_batch_list(params, items, False)
return self.get_status('BatchDeleteAttributes', params, verb='POST')
def select(self, domain_or_name, query='', next_token=None,
consistent_read=False):
"""
Returns a set of Attributes for item names within domain_name that
match the query. The query must be expressed in using the SELECT
style syntax rather than the original SimpleDB query language.
Even though the select request does not require a domain object,
a domain object must be passed into this method so the Item objects
returned can point to the appropriate domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object
:param domain_or_name: Either the name of a domain or a Domain object
:type query: string
:param query: The SimpleDB query to be performed.
:type consistent_read: bool
:param consistent_read: When set to true, ensures that the most recent
data is returned.
:rtype: ResultSet
:return: An iterator containing the results.
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'SelectExpression': query}
if consistent_read:
params['ConsistentRead'] = 'true'
if next_token:
params['NextToken'] = next_token
try:
return self.get_list('Select', params, [('Item', self.item_cls)],
parent=domain)
except SDBResponseError as e:
e.body = "Query: %s\n%s" % (query, e.body)
raise e
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/sdb/connection.py
| 0.582135 | 0.156395 |
connection.py
|
|
import base64
class Item(dict):
"""
A ``dict`` sub-class that serves as an object representation of a
SimpleDB item. An item in SDB is similar to a row in a relational
database. Items belong to a :py:class:`Domain <boto.sdb.domain.Domain>`,
which is similar to a table in a relational database.
The keys on instances of this object correspond to attributes that are
stored on the SDB item.
.. tip:: While it is possible to instantiate this class directly, you may
want to use the convenience methods on :py:class:`boto.sdb.domain.Domain`
for that purpose. For example, :py:meth:`boto.sdb.domain.Domain.get_item`.
"""
def __init__(self, domain, name='', active=False):
"""
:type domain: :py:class:`boto.sdb.domain.Domain`
:param domain: The domain that this item belongs to.
:param str name: The name of this item. This name will be used when
querying for items using methods like
:py:meth:`boto.sdb.domain.Domain.get_item`
"""
dict.__init__(self)
self.domain = domain
self.name = name
self.active = active
self.request_id = None
self.encoding = None
self.in_attribute = False
self.converter = self.domain.connection.converter
def startElement(self, name, attrs, connection):
if name == 'Attribute':
self.in_attribute = True
self.encoding = attrs.get('encoding', None)
return None
def decode_value(self, value):
if self.encoding == 'base64':
self.encoding = None
return base64.decodestring(value)
else:
return value
def endElement(self, name, value, connection):
if name == 'ItemName':
self.name = self.decode_value(value)
elif name == 'Name':
if self.in_attribute:
self.last_key = self.decode_value(value)
else:
self.name = self.decode_value(value)
elif name == 'Value':
if self.last_key in self:
if not isinstance(self[self.last_key], list):
self[self.last_key] = [self[self.last_key]]
value = self.decode_value(value)
if self.converter:
value = self.converter.decode(value)
self[self.last_key].append(value)
else:
value = self.decode_value(value)
if self.converter:
value = self.converter.decode(value)
self[self.last_key] = value
elif name == 'BoxUsage':
try:
connection.box_usage += float(value)
except:
pass
elif name == 'RequestId':
self.request_id = value
elif name == 'Attribute':
self.in_attribute = False
else:
setattr(self, name, value)
def load(self):
"""
Loads or re-loads this item's attributes from SDB.
.. warning::
If you have changed attribute values on an Item instance,
this method will over-write the values if they are different in
SDB. For any local attributes that don't yet exist in SDB,
they will be safe.
"""
self.domain.get_attributes(self.name, item=self)
def save(self, replace=True):
"""
Saves this item to SDB.
:param bool replace: If ``True``, delete any attributes on the remote
SDB item that have a ``None`` value on this object.
"""
self.domain.put_attributes(self.name, self, replace)
# Delete any attributes set to "None"
if replace:
del_attrs = []
for name in self:
if self[name] is None:
del_attrs.append(name)
if len(del_attrs) > 0:
self.domain.delete_attributes(self.name, del_attrs)
def add_value(self, key, value):
"""
Helps set or add to attributes on this item. If you are adding a new
attribute that has yet to be set, it will simply create an attribute
named ``key`` with your given ``value`` as its value. If you are
adding a value to an existing attribute, this method will convert the
attribute to a list (if it isn't already) and append your new value
to said list.
For clarification, consider the following interactive session:
.. code-block:: python
>>> item = some_domain.get_item('some_item')
>>> item.has_key('some_attr')
False
>>> item.add_value('some_attr', 1)
>>> item['some_attr']
1
>>> item.add_value('some_attr', 2)
>>> item['some_attr']
[1, 2]
:param str key: The attribute to add a value to.
:param object value: The value to set or append to the attribute.
"""
if key in self:
# We already have this key on the item.
if not isinstance(self[key], list):
# The key isn't already a list, take its current value and
# convert it to a list with the only member being the
# current value.
self[key] = [self[key]]
# Add the new value to the list.
self[key].append(value)
else:
# This is a new attribute, just set it.
self[key] = value
def delete(self):
"""
Deletes this item in SDB.
.. note:: This local Python object remains in its current state
after deletion, this only deletes the remote item in SDB.
"""
self.domain.delete_item(self)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/sdb/item.py
| 0.828106 | 0.445771 |
item.py
|
|
from __future__ import print_function
"""
Represents an SDB Domain
"""
from boto.sdb.queryresultset import SelectResultSet
from boto.compat import six
class Domain(object):
def __init__(self, connection=None, name=None):
self.connection = connection
self.name = name
self._metadata = None
def __repr__(self):
return 'Domain:%s' % self.name
def __iter__(self):
return iter(self.select("SELECT * FROM `%s`" % self.name))
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'DomainName':
self.name = value
else:
setattr(self, name, value)
def get_metadata(self):
if not self._metadata:
self._metadata = self.connection.domain_metadata(self)
return self._metadata
def put_attributes(self, item_name, attributes,
replace=True, expected_value=None):
"""
Store attributes for a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being stored.
:type attribute_names: dict or dict-like object
:param attribute_names: The name/value pairs to store as attributes
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be
of the form:
* ['name', 'value']
In which case the call will first verify that the attribute
"name" of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or non-existence
(False) of the attribute.
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
:rtype: bool
:return: True if successful
"""
return self.connection.put_attributes(self, item_name, attributes,
replace, expected_value)
def batch_put_attributes(self, items, replace=True):
"""
Store attributes for multiple items.
:type items: dict or dict-like object
:param items: A dictionary-like object. The keys of the dictionary are
the item names and the values are themselves dictionaries
of attribute names/values, exactly the same as the
attribute_names parameter of the scalar put_attributes
call.
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
:rtype: bool
:return: True if successful
"""
return self.connection.batch_put_attributes(self, items, replace)
def get_attributes(self, item_name, attribute_name=None,
consistent_read=False, item=None):
"""
Retrieve attributes for a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being retrieved.
:type attribute_names: string or list of strings
:param attribute_names: An attribute name or list of attribute names. This
parameter is optional. If not supplied, all attributes
will be retrieved for the item.
:rtype: :class:`boto.sdb.item.Item`
:return: An Item mapping type containing the requested attribute name/values
"""
return self.connection.get_attributes(self, item_name, attribute_name,
consistent_read, item)
def delete_attributes(self, item_name, attributes=None,
expected_values=None):
"""
Delete attributes from a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being deleted.
:type attributes: dict, list or :class:`boto.sdb.item.Item`
:param attributes: Either a list containing attribute names which will cause
all values associated with that attribute name to be deleted or
a dict or Item containing the attribute names and keys and list
of values to delete as the value. If no value is supplied,
all attribute name/values for the item will be deleted.
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be of
the form:
* ['name', 'value']
In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or
non-existence (False) of the attribute.
:rtype: bool
:return: True if successful
"""
return self.connection.delete_attributes(self, item_name, attributes,
expected_values)
def batch_delete_attributes(self, items):
"""
Delete multiple items in this domain.
:type items: dict or dict-like object
:param items: A dictionary-like object. The keys of the dictionary are
the item names and the values are either:
* dictionaries of attribute names/values, exactly the
same as the attribute_names parameter of the scalar
put_attributes call. The attribute name/value pairs
will only be deleted if they match the name/value
pairs passed in.
* None which means that all attributes associated
with the item should be deleted.
:rtype: bool
:return: True if successful
"""
return self.connection.batch_delete_attributes(self, items)
def select(self, query='', next_token=None, consistent_read=False, max_items=None):
"""
Returns a set of Attributes for item names within domain_name that match the query.
The query must be expressed in using the SELECT style syntax rather than the
original SimpleDB query language.
:type query: string
:param query: The SimpleDB query to be performed.
:rtype: iter
:return: An iterator containing the results. This is actually a generator
function that will iterate across all search results, not just the
first page.
"""
return SelectResultSet(self, query, max_items=max_items, next_token=next_token,
consistent_read=consistent_read)
def get_item(self, item_name, consistent_read=False):
"""
Retrieves an item from the domain, along with all of its attributes.
:param string item_name: The name of the item to retrieve.
:rtype: :class:`boto.sdb.item.Item` or ``None``
:keyword bool consistent_read: When set to true, ensures that the most
recent data is returned.
:return: The requested item, or ``None`` if there was no match found
"""
item = self.get_attributes(item_name, consistent_read=consistent_read)
if item:
item.domain = self
return item
else:
return None
def new_item(self, item_name):
return self.connection.item_cls(self, item_name)
def delete_item(self, item):
self.delete_attributes(item.name)
def to_xml(self, f=None):
"""Get this domain as an XML DOM Document
:param f: Optional File to dump directly to
:type f: File or Stream
:return: File object where the XML has been dumped to
:rtype: file
"""
if not f:
from tempfile import TemporaryFile
f = TemporaryFile()
print('<?xml version="1.0" encoding="UTF-8"?>', file=f)
print('<Domain id="%s">' % self.name, file=f)
for item in self:
print('\t<Item id="%s">' % item.name, file=f)
for k in item:
print('\t\t<attribute id="%s">' % k, file=f)
values = item[k]
if not isinstance(values, list):
values = [values]
for value in values:
print('\t\t\t<value><![CDATA[', end=' ', file=f)
if isinstance(value, six.text_type):
value = value.encode('utf-8', 'replace')
else:
value = six.text_type(value, errors='replace').encode('utf-8', 'replace')
f.write(value)
print(']]></value>', file=f)
print('\t\t</attribute>', file=f)
print('\t</Item>', file=f)
print('</Domain>', file=f)
f.flush()
f.seek(0)
return f
def from_xml(self, doc):
"""Load this domain based on an XML document"""
import xml.sax
handler = DomainDumpParser(self)
xml.sax.parse(doc, handler)
return handler
def delete(self):
"""
Delete this domain, and all items under it
"""
return self.connection.delete_domain(self)
class DomainMetaData(object):
def __init__(self, domain=None):
self.domain = domain
self.item_count = None
self.item_names_size = None
self.attr_name_count = None
self.attr_names_size = None
self.attr_value_count = None
self.attr_values_size = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'ItemCount':
self.item_count = int(value)
elif name == 'ItemNamesSizeBytes':
self.item_names_size = int(value)
elif name == 'AttributeNameCount':
self.attr_name_count = int(value)
elif name == 'AttributeNamesSizeBytes':
self.attr_names_size = int(value)
elif name == 'AttributeValueCount':
self.attr_value_count = int(value)
elif name == 'AttributeValuesSizeBytes':
self.attr_values_size = int(value)
elif name == 'Timestamp':
self.timestamp = value
else:
setattr(self, name, value)
import sys
from xml.sax.handler import ContentHandler
class DomainDumpParser(ContentHandler):
"""
SAX parser for a domain that has been dumped
"""
def __init__(self, domain):
self.uploader = UploaderThread(domain)
self.item_id = None
self.attrs = {}
self.attribute = None
self.value = ""
self.domain = domain
def startElement(self, name, attrs):
if name == "Item":
self.item_id = attrs['id']
self.attrs = {}
elif name == "attribute":
self.attribute = attrs['id']
elif name == "value":
self.value = ""
def characters(self, ch):
self.value += ch
def endElement(self, name):
if name == "value":
if self.value and self.attribute:
value = self.value.strip()
attr_name = self.attribute.strip()
if attr_name in self.attrs:
self.attrs[attr_name].append(value)
else:
self.attrs[attr_name] = [value]
elif name == "Item":
self.uploader.items[self.item_id] = self.attrs
# Every 20 items we spawn off the uploader
if len(self.uploader.items) >= 20:
self.uploader.start()
self.uploader = UploaderThread(self.domain)
elif name == "Domain":
# If we're done, spawn off our last Uploader Thread
self.uploader.start()
from threading import Thread
class UploaderThread(Thread):
"""Uploader Thread"""
def __init__(self, domain):
self.db = domain
self.items = {}
super(UploaderThread, self).__init__()
def run(self):
try:
self.db.batch_put_attributes(self.items)
except:
print("Exception using batch put, trying regular put instead")
for item_name in self.items:
self.db.put_attributes(item_name, self.items[item_name])
print(".", end=' ')
sys.stdout.flush()
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/sdb/domain.py
| 0.91217 | 0.442576 |
domain.py
|
|
import boto
from boto.utils import find_class, Password
from boto.sdb.db.key import Key
from boto.sdb.db.model import Model
from boto.compat import six, encodebytes
from datetime import datetime
from xml.dom.minidom import getDOMImplementation, parse, parseString, Node
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
class XMLConverter(object):
"""
Responsible for converting base Python types to format compatible with underlying
database. For SimpleDB, that means everything needs to be converted to a string
when stored in SimpleDB and from a string when retrieved.
To convert a value, pass it to the encode or decode method. The encode method
will take a Python native value and convert to DB format. The decode method will
take a DB format value and convert it to Python native format. To find the appropriate
method to call, the generic encode/decode methods will look for the type-specific
method by searching for a method called "encode_<type name>" or "decode_<type name>".
"""
def __init__(self, manager):
self.manager = manager
self.type_map = { bool : (self.encode_bool, self.decode_bool),
int : (self.encode_int, self.decode_int),
Model : (self.encode_reference, self.decode_reference),
Key : (self.encode_reference, self.decode_reference),
Password : (self.encode_password, self.decode_password),
datetime : (self.encode_datetime, self.decode_datetime)}
if six.PY2:
self.type_map[long] = (self.encode_long, self.decode_long)
def get_text_value(self, parent_node):
value = ''
for node in parent_node.childNodes:
if node.nodeType == node.TEXT_NODE:
value += node.data
return value
def encode(self, item_type, value):
if item_type in self.type_map:
encode = self.type_map[item_type][0]
return encode(value)
return value
def decode(self, item_type, value):
if item_type in self.type_map:
decode = self.type_map[item_type][1]
return decode(value)
else:
value = self.get_text_value(value)
return value
def encode_prop(self, prop, value):
if isinstance(value, list):
if hasattr(prop, 'item_type'):
new_value = []
for v in value:
item_type = getattr(prop, "item_type")
if Model in item_type.mro():
item_type = Model
new_value.append(self.encode(item_type, v))
return new_value
else:
return value
else:
return self.encode(prop.data_type, value)
def decode_prop(self, prop, value):
if prop.data_type == list:
if hasattr(prop, 'item_type'):
item_type = getattr(prop, "item_type")
if Model in item_type.mro():
item_type = Model
values = []
for item_node in value.getElementsByTagName('item'):
value = self.decode(item_type, item_node)
values.append(value)
return values
else:
return self.get_text_value(value)
else:
return self.decode(prop.data_type, value)
def encode_int(self, value):
value = int(value)
return '%d' % value
def decode_int(self, value):
value = self.get_text_value(value)
if value:
value = int(value)
else:
value = None
return value
def encode_long(self, value):
value = long(value)
return '%d' % value
def decode_long(self, value):
value = self.get_text_value(value)
return long(value)
def encode_bool(self, value):
if value == True:
return 'true'
else:
return 'false'
def decode_bool(self, value):
value = self.get_text_value(value)
if value.lower() == 'true':
return True
else:
return False
def encode_datetime(self, value):
return value.strftime(ISO8601)
def decode_datetime(self, value):
value = self.get_text_value(value)
try:
return datetime.strptime(value, ISO8601)
except:
return None
def encode_reference(self, value):
if isinstance(value, six.string_types):
return value
if value is None:
return ''
else:
val_node = self.manager.doc.createElement("object")
val_node.setAttribute('id', value.id)
val_node.setAttribute('class', '%s.%s' % (value.__class__.__module__, value.__class__.__name__))
return val_node
def decode_reference(self, value):
if not value:
return None
try:
value = value.childNodes[0]
class_name = value.getAttribute("class")
id = value.getAttribute("id")
cls = find_class(class_name)
return cls.get_by_ids(id)
except:
return None
def encode_password(self, value):
if value and len(value) > 0:
return str(value)
else:
return None
def decode_password(self, value):
value = self.get_text_value(value)
return Password(value)
class XMLManager(object):
def __init__(self, cls, db_name, db_user, db_passwd,
db_host, db_port, db_table, ddl_dir, enable_ssl):
self.cls = cls
if not db_name:
db_name = cls.__name__.lower()
self.db_name = db_name
self.db_user = db_user
self.db_passwd = db_passwd
self.db_host = db_host
self.db_port = db_port
self.db_table = db_table
self.ddl_dir = ddl_dir
self.s3 = None
self.converter = XMLConverter(self)
self.impl = getDOMImplementation()
self.doc = self.impl.createDocument(None, 'objects', None)
self.connection = None
self.enable_ssl = enable_ssl
self.auth_header = None
if self.db_user:
base64string = encodebytes('%s:%s' % (self.db_user, self.db_passwd))[:-1]
authheader = "Basic %s" % base64string
self.auth_header = authheader
def _connect(self):
if self.db_host:
if self.enable_ssl:
from httplib import HTTPSConnection as Connection
else:
from httplib import HTTPConnection as Connection
self.connection = Connection(self.db_host, self.db_port)
def _make_request(self, method, url, post_data=None, body=None):
"""
Make a request on this connection
"""
if not self.connection:
self._connect()
try:
self.connection.close()
except:
pass
self.connection.connect()
headers = {}
if self.auth_header:
headers["Authorization"] = self.auth_header
self.connection.request(method, url, body, headers)
resp = self.connection.getresponse()
return resp
def new_doc(self):
return self.impl.createDocument(None, 'objects', None)
def _object_lister(self, cls, doc):
for obj_node in doc.getElementsByTagName('object'):
if not cls:
class_name = obj_node.getAttribute('class')
cls = find_class(class_name)
id = obj_node.getAttribute('id')
obj = cls(id)
for prop_node in obj_node.getElementsByTagName('property'):
prop_name = prop_node.getAttribute('name')
prop = obj.find_property(prop_name)
if prop:
if hasattr(prop, 'item_type'):
value = self.get_list(prop_node, prop.item_type)
else:
value = self.decode_value(prop, prop_node)
value = prop.make_value_from_datastore(value)
setattr(obj, prop.name, value)
yield obj
def reset(self):
self._connect()
def get_doc(self):
return self.doc
def encode_value(self, prop, value):
return self.converter.encode_prop(prop, value)
def decode_value(self, prop, value):
return self.converter.decode_prop(prop, value)
def get_s3_connection(self):
if not self.s3:
self.s3 = boto.connect_s3(self.aws_access_key_id, self.aws_secret_access_key)
return self.s3
def get_list(self, prop_node, item_type):
values = []
try:
items_node = prop_node.getElementsByTagName('items')[0]
except:
return []
for item_node in items_node.getElementsByTagName('item'):
value = self.converter.decode(item_type, item_node)
values.append(value)
return values
def get_object_from_doc(self, cls, id, doc):
obj_node = doc.getElementsByTagName('object')[0]
if not cls:
class_name = obj_node.getAttribute('class')
cls = find_class(class_name)
if not id:
id = obj_node.getAttribute('id')
obj = cls(id)
for prop_node in obj_node.getElementsByTagName('property'):
prop_name = prop_node.getAttribute('name')
prop = obj.find_property(prop_name)
value = self.decode_value(prop, prop_node)
value = prop.make_value_from_datastore(value)
if value is not None:
try:
setattr(obj, prop.name, value)
except:
pass
return obj
def get_props_from_doc(self, cls, id, doc):
"""
Pull out the properties from this document
Returns the class, the properties in a hash, and the id if provided as a tuple
:return: (cls, props, id)
"""
obj_node = doc.getElementsByTagName('object')[0]
if not cls:
class_name = obj_node.getAttribute('class')
cls = find_class(class_name)
if not id:
id = obj_node.getAttribute('id')
props = {}
for prop_node in obj_node.getElementsByTagName('property'):
prop_name = prop_node.getAttribute('name')
prop = cls.find_property(prop_name)
value = self.decode_value(prop, prop_node)
value = prop.make_value_from_datastore(value)
if value is not None:
props[prop.name] = value
return (cls, props, id)
def get_object(self, cls, id):
if not self.connection:
self._connect()
if not self.connection:
raise NotImplementedError("Can't query without a database connection")
url = "/%s/%s" % (self.db_name, id)
resp = self._make_request('GET', url)
if resp.status == 200:
doc = parse(resp)
else:
raise Exception("Error: %s" % resp.status)
return self.get_object_from_doc(cls, id, doc)
def query(self, cls, filters, limit=None, order_by=None):
if not self.connection:
self._connect()
if not self.connection:
raise NotImplementedError("Can't query without a database connection")
from urllib import urlencode
query = str(self._build_query(cls, filters, limit, order_by))
if query:
url = "/%s?%s" % (self.db_name, urlencode({"query": query}))
else:
url = "/%s" % self.db_name
resp = self._make_request('GET', url)
if resp.status == 200:
doc = parse(resp)
else:
raise Exception("Error: %s" % resp.status)
return self._object_lister(cls, doc)
def _build_query(self, cls, filters, limit, order_by):
import types
if len(filters) > 4:
raise Exception('Too many filters, max is 4')
parts = []
properties = cls.properties(hidden=False)
for filter, value in filters:
name, op = filter.strip().split()
found = False
for property in properties:
if property.name == name:
found = True
if types.TypeType(value) == list:
filter_parts = []
for val in value:
val = self.encode_value(property, val)
filter_parts.append("'%s' %s '%s'" % (name, op, val))
parts.append("[%s]" % " OR ".join(filter_parts))
else:
value = self.encode_value(property, value)
parts.append("['%s' %s '%s']" % (name, op, value))
if not found:
raise Exception('%s is not a valid field' % name)
if order_by:
if order_by.startswith("-"):
key = order_by[1:]
type = "desc"
else:
key = order_by
type = "asc"
parts.append("['%s' starts-with ''] sort '%s' %s" % (key, key, type))
return ' intersection '.join(parts)
def query_gql(self, query_string, *args, **kwds):
raise NotImplementedError("GQL queries not supported in XML")
def save_list(self, doc, items, prop_node):
items_node = doc.createElement('items')
prop_node.appendChild(items_node)
for item in items:
item_node = doc.createElement('item')
items_node.appendChild(item_node)
if isinstance(item, Node):
item_node.appendChild(item)
else:
text_node = doc.createTextNode(item)
item_node.appendChild(text_node)
def save_object(self, obj, expected_value=None):
"""
Marshal the object and do a PUT
"""
doc = self.marshal_object(obj)
if obj.id:
url = "/%s/%s" % (self.db_name, obj.id)
else:
url = "/%s" % (self.db_name)
resp = self._make_request("PUT", url, body=doc.toxml())
new_obj = self.get_object_from_doc(obj.__class__, None, parse(resp))
obj.id = new_obj.id
for prop in obj.properties():
try:
propname = prop.name
except AttributeError:
propname = None
if propname:
value = getattr(new_obj, prop.name)
if value:
setattr(obj, prop.name, value)
return obj
def marshal_object(self, obj, doc=None):
if not doc:
doc = self.new_doc()
if not doc:
doc = self.doc
obj_node = doc.createElement('object')
if obj.id:
obj_node.setAttribute('id', obj.id)
obj_node.setAttribute('class', '%s.%s' % (obj.__class__.__module__,
obj.__class__.__name__))
root = doc.documentElement
root.appendChild(obj_node)
for property in obj.properties(hidden=False):
prop_node = doc.createElement('property')
prop_node.setAttribute('name', property.name)
prop_node.setAttribute('type', property.type_name)
value = property.get_value_for_datastore(obj)
if value is not None:
value = self.encode_value(property, value)
if isinstance(value, list):
self.save_list(doc, value, prop_node)
elif isinstance(value, Node):
prop_node.appendChild(value)
else:
text_node = doc.createTextNode(six.text_type(value).encode("ascii", "ignore"))
prop_node.appendChild(text_node)
obj_node.appendChild(prop_node)
return doc
def unmarshal_object(self, fp, cls=None, id=None):
if isinstance(fp, six.string_types):
doc = parseString(fp)
else:
doc = parse(fp)
return self.get_object_from_doc(cls, id, doc)
def unmarshal_props(self, fp, cls=None, id=None):
"""
Same as unmarshalling an object, except it returns
from "get_props_from_doc"
"""
if isinstance(fp, six.string_types):
doc = parseString(fp)
else:
doc = parse(fp)
return self.get_props_from_doc(cls, id, doc)
def delete_object(self, obj):
url = "/%s/%s" % (self.db_name, obj.id)
return self._make_request("DELETE", url)
def set_key_value(self, obj, name, value):
self.domain.put_attributes(obj.id, {name: value}, replace=True)
def delete_key_value(self, obj, name):
self.domain.delete_attributes(obj.id, name)
def get_key_value(self, obj, name):
a = self.domain.get_attributes(obj.id, name)
if name in a:
return a[name]
else:
return None
def get_raw_item(self, obj):
return self.domain.get_item(obj.id)
def set_property(self, prop, obj, name, value):
pass
def get_property(self, prop, obj, name):
pass
def load_object(self, obj):
if not obj._loaded:
obj = obj.get_by_id(obj.id)
obj._loaded = True
return obj
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/sdb/db/manager/xmlmanager.py
| 0.558086 | 0.20142 |
xmlmanager.py
|
|
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.datapipeline import exceptions
class DataPipelineConnection(AWSQueryConnection):
"""
This is the AWS Data Pipeline API Reference . This guide provides
descriptions and samples of the AWS Data Pipeline API.
AWS Data Pipeline is a web service that configures and manages a
data-driven workflow called a pipeline. AWS Data Pipeline handles
the details of scheduling and ensuring that data dependencies are
met so your application can focus on processing the data.
The AWS Data Pipeline API implements two main sets of
functionality. The first set of actions configure the pipeline in
the web service. You call these actions to create a pipeline and
define data sources, schedules, dependencies, and the transforms
to be performed on the data.
The second set of actions are used by a task runner application
that calls the AWS Data Pipeline API to receive the next task
ready for processing. The logic for performing the task, such as
querying the data, running data analysis, or converting the data
from one format to another, is contained within the task runner.
The task runner performs the task assigned to it by the web
service, reporting progress to the web service as it does so. When
the task is done, the task runner reports the final success or
failure of the task to the web service.
AWS Data Pipeline provides an open-source implementation of a task
runner called AWS Data Pipeline Task Runner. AWS Data Pipeline
Task Runner provides logic for common data management scenarios,
such as performing database queries and running data analysis
using Amazon Elastic MapReduce (Amazon EMR). You can use AWS Data
Pipeline Task Runner as your task runner, or you can write your
own task runner to provide custom data management.
The AWS Data Pipeline API uses the Signature Version 4 protocol
for signing requests. For more information about how to sign a
request with this protocol, see `Signature Version 4 Signing
Process`_. In the code examples in this reference, the Signature
Version 4 Request parameters are represented as AuthParams.
"""
APIVersion = "2012-10-29"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "datapipeline.us-east-1.amazonaws.com"
ServiceName = "DataPipeline"
TargetPrefix = "DataPipeline"
ResponseError = JSONResponseError
_faults = {
"PipelineDeletedException": exceptions.PipelineDeletedException,
"InvalidRequestException": exceptions.InvalidRequestException,
"TaskNotFoundException": exceptions.TaskNotFoundException,
"PipelineNotFoundException": exceptions.PipelineNotFoundException,
"InternalServiceError": exceptions.InternalServiceError,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
kwargs['host'] = region.endpoint
super(DataPipelineConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def activate_pipeline(self, pipeline_id):
"""
Validates a pipeline and initiates processing. If the pipeline
does not pass validation, activation fails.
Call this action to start processing pipeline tasks of a
pipeline you've created using the CreatePipeline and
PutPipelineDefinition actions. A pipeline cannot be modified
after it has been successfully activated.
:type pipeline_id: string
:param pipeline_id: The identifier of the pipeline to activate.
"""
params = {'pipelineId': pipeline_id, }
return self.make_request(action='ActivatePipeline',
body=json.dumps(params))
def create_pipeline(self, name, unique_id, description=None):
"""
Creates a new empty pipeline. When this action succeeds, you
can then use the PutPipelineDefinition action to populate the
pipeline.
:type name: string
:param name: The name of the new pipeline. You can use the same name
for multiple pipelines associated with your AWS account, because
AWS Data Pipeline assigns each new pipeline a unique pipeline
identifier.
:type unique_id: string
:param unique_id: A unique identifier that you specify. This identifier
is not the same as the pipeline identifier assigned by AWS Data
Pipeline. You are responsible for defining the format and ensuring
the uniqueness of this identifier. You use this parameter to ensure
idempotency during repeated calls to CreatePipeline. For example,
if the first call to CreatePipeline does not return a clear
success, you can pass in the same unique identifier and pipeline
name combination on a subsequent call to CreatePipeline.
CreatePipeline ensures that if a pipeline already exists with the
same name and unique identifier, a new pipeline will not be
created. Instead, you'll receive the pipeline identifier from the
previous attempt. The uniqueness of the name and unique identifier
combination is scoped to the AWS account or IAM user credentials.
:type description: string
:param description: The description of the new pipeline.
"""
params = {'name': name, 'uniqueId': unique_id, }
if description is not None:
params['description'] = description
return self.make_request(action='CreatePipeline',
body=json.dumps(params))
def delete_pipeline(self, pipeline_id):
"""
Permanently deletes a pipeline, its pipeline definition and
its run history. You cannot query or restore a deleted
pipeline. AWS Data Pipeline will attempt to cancel instances
associated with the pipeline that are currently being
processed by task runners. Deleting a pipeline cannot be
undone.
To temporarily pause a pipeline instead of deleting it, call
SetStatus with the status set to Pause on individual
components. Components that are paused by SetStatus can be
resumed.
:type pipeline_id: string
:param pipeline_id: The identifier of the pipeline to be deleted.
"""
params = {'pipelineId': pipeline_id, }
return self.make_request(action='DeletePipeline',
body=json.dumps(params))
def describe_objects(self, object_ids, pipeline_id, marker=None,
evaluate_expressions=None):
"""
Returns the object definitions for a set of objects associated
with the pipeline. Object definitions are composed of a set of
fields that define the properties of the object.
:type pipeline_id: string
:param pipeline_id: Identifier of the pipeline that contains the object
definitions.
:type object_ids: list
:param object_ids: Identifiers of the pipeline objects that contain the
definitions to be described. You can pass as many as 25 identifiers
in a single call to DescribeObjects.
:type evaluate_expressions: boolean
:param evaluate_expressions: Indicates whether any expressions in the
object should be evaluated when the object descriptions are
returned.
:type marker: string
:param marker: The starting point for the results to be returned. The
first time you call DescribeObjects, this value should be empty. As
long as the action returns `HasMoreResults` as `True`, you can call
DescribeObjects again and pass the marker value from the response
to retrieve the next set of results.
"""
params = {
'pipelineId': pipeline_id,
'objectIds': object_ids,
}
if evaluate_expressions is not None:
params['evaluateExpressions'] = evaluate_expressions
if marker is not None:
params['marker'] = marker
return self.make_request(action='DescribeObjects',
body=json.dumps(params))
def describe_pipelines(self, pipeline_ids):
"""
Retrieve metadata about one or more pipelines. The information
retrieved includes the name of the pipeline, the pipeline
identifier, its current state, and the user account that owns
the pipeline. Using account credentials, you can retrieve
metadata about pipelines that you or your IAM users have
created. If you are using an IAM user account, you can
retrieve metadata about only those pipelines you have read
permission for.
To retrieve the full pipeline definition instead of metadata
about the pipeline, call the GetPipelineDefinition action.
:type pipeline_ids: list
:param pipeline_ids: Identifiers of the pipelines to describe. You can
pass as many as 25 identifiers in a single call to
DescribePipelines. You can obtain pipeline identifiers by calling
ListPipelines.
"""
params = {'pipelineIds': pipeline_ids, }
return self.make_request(action='DescribePipelines',
body=json.dumps(params))
def evaluate_expression(self, pipeline_id, expression, object_id):
"""
Evaluates a string in the context of a specified object. A
task runner can use this action to evaluate SQL queries stored
in Amazon S3.
:type pipeline_id: string
:param pipeline_id: The identifier of the pipeline.
:type object_id: string
:param object_id: The identifier of the object.
:type expression: string
:param expression: The expression to evaluate.
"""
params = {
'pipelineId': pipeline_id,
'objectId': object_id,
'expression': expression,
}
return self.make_request(action='EvaluateExpression',
body=json.dumps(params))
def get_pipeline_definition(self, pipeline_id, version=None):
"""
Returns the definition of the specified pipeline. You can call
GetPipelineDefinition to retrieve the pipeline definition you
provided using PutPipelineDefinition.
:type pipeline_id: string
:param pipeline_id: The identifier of the pipeline.
:type version: string
:param version: The version of the pipeline definition to retrieve.
This parameter accepts the values `latest` (default) and `active`.
Where `latest` indicates the last definition saved to the pipeline
and `active` indicates the last definition of the pipeline that was
activated.
"""
params = {'pipelineId': pipeline_id, }
if version is not None:
params['version'] = version
return self.make_request(action='GetPipelineDefinition',
body=json.dumps(params))
def list_pipelines(self, marker=None):
"""
Returns a list of pipeline identifiers for all active
pipelines. Identifiers are returned only for pipelines you
have permission to access.
:type marker: string
:param marker: The starting point for the results to be returned. The
first time you call ListPipelines, this value should be empty. As
long as the action returns `HasMoreResults` as `True`, you can call
ListPipelines again and pass the marker value from the response to
retrieve the next set of results.
"""
params = {}
if marker is not None:
params['marker'] = marker
return self.make_request(action='ListPipelines',
body=json.dumps(params))
def poll_for_task(self, worker_group, hostname=None,
instance_identity=None):
"""
Task runners call this action to receive a task to perform
from AWS Data Pipeline. The task runner specifies which tasks
it can perform by setting a value for the workerGroup
parameter of the PollForTask call. The task returned by
PollForTask may come from any of the pipelines that match the
workerGroup value passed in by the task runner and that was
launched using the IAM user credentials specified by the task
runner.
If tasks are ready in the work queue, PollForTask returns a
response immediately. If no tasks are available in the queue,
PollForTask uses long-polling and holds on to a poll
connection for up to a 90 seconds during which time the first
newly scheduled task is handed to the task runner. To
accomodate this, set the socket timeout in your task runner to
90 seconds. The task runner should not call PollForTask again
on the same `workerGroup` until it receives a response, and
this may take up to 90 seconds.
:type worker_group: string
:param worker_group: Indicates the type of task the task runner is
configured to accept and process. The worker group is set as a
field on objects in the pipeline when they are created. You can
only specify a single value for `workerGroup` in the call to
PollForTask. There are no wildcard values permitted in
`workerGroup`, the string must be an exact, case-sensitive, match.
:type hostname: string
:param hostname: The public DNS name of the calling task runner.
:type instance_identity: dict
:param instance_identity: Identity information for the Amazon EC2
instance that is hosting the task runner. You can get this value by
calling the URI, `http://169.254.169.254/latest/meta-data/instance-
id`, from the EC2 instance. For more information, go to `Instance
Metadata`_ in the Amazon Elastic Compute Cloud User Guide. Passing
in this value proves that your task runner is running on an EC2
instance, and ensures the proper AWS Data Pipeline service charges
are applied to your pipeline.
"""
params = {'workerGroup': worker_group, }
if hostname is not None:
params['hostname'] = hostname
if instance_identity is not None:
params['instanceIdentity'] = instance_identity
return self.make_request(action='PollForTask',
body=json.dumps(params))
def put_pipeline_definition(self, pipeline_objects, pipeline_id):
"""
Adds tasks, schedules, and preconditions that control the
behavior of the pipeline. You can use PutPipelineDefinition to
populate a new pipeline or to update an existing pipeline that
has not yet been activated.
PutPipelineDefinition also validates the configuration as it
adds it to the pipeline. Changes to the pipeline are saved
unless one of the following three validation errors exists in
the pipeline.
#. An object is missing a name or identifier field.
#. A string or reference field is empty.
#. The number of objects in the pipeline exceeds the maximum
allowed objects.
Pipeline object definitions are passed to the
PutPipelineDefinition action and returned by the
GetPipelineDefinition action.
:type pipeline_id: string
:param pipeline_id: The identifier of the pipeline to be configured.
:type pipeline_objects: list
:param pipeline_objects: The objects that define the pipeline. These
will overwrite the existing pipeline definition.
"""
params = {
'pipelineId': pipeline_id,
'pipelineObjects': pipeline_objects,
}
return self.make_request(action='PutPipelineDefinition',
body=json.dumps(params))
def query_objects(self, pipeline_id, sphere, marker=None, query=None,
limit=None):
"""
Queries a pipeline for the names of objects that match a
specified set of conditions.
The objects returned by QueryObjects are paginated and then
filtered by the value you set for query. This means the action
may return an empty result set with a value set for marker. If
`HasMoreResults` is set to `True`, you should continue to call
QueryObjects, passing in the returned value for marker, until
`HasMoreResults` returns `False`.
:type pipeline_id: string
:param pipeline_id: Identifier of the pipeline to be queried for object
names.
:type query: dict
:param query: Query that defines the objects to be returned. The Query
object can contain a maximum of ten selectors. The conditions in
the query are limited to top-level String fields in the object.
These filters can be applied to components, instances, and
attempts.
:type sphere: string
:param sphere: Specifies whether the query applies to components or
instances. Allowable values: `COMPONENT`, `INSTANCE`, `ATTEMPT`.
:type marker: string
:param marker: The starting point for the results to be returned. The
first time you call QueryObjects, this value should be empty. As
long as the action returns `HasMoreResults` as `True`, you can call
QueryObjects again and pass the marker value from the response to
retrieve the next set of results.
:type limit: integer
:param limit: Specifies the maximum number of object names that
QueryObjects will return in a single call. The default value is
100.
"""
params = {'pipelineId': pipeline_id, 'sphere': sphere, }
if query is not None:
params['query'] = query
if marker is not None:
params['marker'] = marker
if limit is not None:
params['limit'] = limit
return self.make_request(action='QueryObjects',
body=json.dumps(params))
def report_task_progress(self, task_id):
"""
Updates the AWS Data Pipeline service on the progress of the
calling task runner. When the task runner is assigned a task,
it should call ReportTaskProgress to acknowledge that it has
the task within 2 minutes. If the web service does not recieve
this acknowledgement within the 2 minute window, it will
assign the task in a subsequent PollForTask call. After this
initial acknowledgement, the task runner only needs to report
progress every 15 minutes to maintain its ownership of the
task. You can change this reporting time from 15 minutes by
specifying a `reportProgressTimeout` field in your pipeline.
If a task runner does not report its status after 5 minutes,
AWS Data Pipeline will assume that the task runner is unable
to process the task and will reassign the task in a subsequent
response to PollForTask. task runners should call
ReportTaskProgress every 60 seconds.
:type task_id: string
:param task_id: Identifier of the task assigned to the task runner.
This value is provided in the TaskObject that the service returns
with the response for the PollForTask action.
"""
params = {'taskId': task_id, }
return self.make_request(action='ReportTaskProgress',
body=json.dumps(params))
def report_task_runner_heartbeat(self, taskrunner_id, worker_group=None,
hostname=None):
"""
Task runners call ReportTaskRunnerHeartbeat every 15 minutes
to indicate that they are operational. In the case of AWS Data
Pipeline Task Runner launched on a resource managed by AWS
Data Pipeline, the web service can use this call to detect
when the task runner application has failed and restart a new
instance.
:type taskrunner_id: string
:param taskrunner_id: The identifier of the task runner. This value
should be unique across your AWS account. In the case of AWS Data
Pipeline Task Runner launched on a resource managed by AWS Data
Pipeline, the web service provides a unique identifier when it
launches the application. If you have written a custom task runner,
you should assign a unique identifier for the task runner.
:type worker_group: string
:param worker_group: Indicates the type of task the task runner is
configured to accept and process. The worker group is set as a
field on objects in the pipeline when they are created. You can
only specify a single value for `workerGroup` in the call to
ReportTaskRunnerHeartbeat. There are no wildcard values permitted
in `workerGroup`, the string must be an exact, case-sensitive,
match.
:type hostname: string
:param hostname: The public DNS name of the calling task runner.
"""
params = {'taskrunnerId': taskrunner_id, }
if worker_group is not None:
params['workerGroup'] = worker_group
if hostname is not None:
params['hostname'] = hostname
return self.make_request(action='ReportTaskRunnerHeartbeat',
body=json.dumps(params))
def set_status(self, object_ids, status, pipeline_id):
"""
Requests that the status of an array of physical or logical
pipeline objects be updated in the pipeline. This update may
not occur immediately, but is eventually consistent. The
status that can be set depends on the type of object.
:type pipeline_id: string
:param pipeline_id: Identifies the pipeline that contains the objects.
:type object_ids: list
:param object_ids: Identifies an array of objects. The corresponding
objects can be either physical or components, but not a mix of both
types.
:type status: string
:param status: Specifies the status to be set on all the objects in
`objectIds`. For components, this can be either `PAUSE` or
`RESUME`. For instances, this can be either `CANCEL`, `RERUN`, or
`MARK_FINISHED`.
"""
params = {
'pipelineId': pipeline_id,
'objectIds': object_ids,
'status': status,
}
return self.make_request(action='SetStatus',
body=json.dumps(params))
def set_task_status(self, task_id, task_status, error_id=None,
error_message=None, error_stack_trace=None):
"""
Notifies AWS Data Pipeline that a task is completed and
provides information about the final status. The task runner
calls this action regardless of whether the task was
sucessful. The task runner does not need to call SetTaskStatus
for tasks that are canceled by the web service during a call
to ReportTaskProgress.
:type task_id: string
:param task_id: Identifies the task assigned to the task runner. This
value is set in the TaskObject that is returned by the PollForTask
action.
:type task_status: string
:param task_status: If `FINISHED`, the task successfully completed. If
`FAILED` the task ended unsuccessfully. The `FALSE` value is used
by preconditions.
:type error_id: string
:param error_id: If an error occurred during the task, this value
specifies an id value that represents the error. This value is set
on the physical attempt object. It is used to display error
information to the user. It should not start with string "Service_"
which is reserved by the system.
:type error_message: string
:param error_message: If an error occurred during the task, this value
specifies a text description of the error. This value is set on the
physical attempt object. It is used to display error information to
the user. The web service does not parse this value.
:type error_stack_trace: string
:param error_stack_trace: If an error occurred during the task, this
value specifies the stack trace associated with the error. This
value is set on the physical attempt object. It is used to display
error information to the user. The web service does not parse this
value.
"""
params = {'taskId': task_id, 'taskStatus': task_status, }
if error_id is not None:
params['errorId'] = error_id
if error_message is not None:
params['errorMessage'] = error_message
if error_stack_trace is not None:
params['errorStackTrace'] = error_stack_trace
return self.make_request(action='SetTaskStatus',
body=json.dumps(params))
def validate_pipeline_definition(self, pipeline_objects, pipeline_id):
"""
Tests the pipeline definition with a set of validation checks
to ensure that it is well formed and can run without error.
:type pipeline_id: string
:param pipeline_id: Identifies the pipeline whose definition is to be
validated.
:type pipeline_objects: list
:param pipeline_objects: A list of objects that define the pipeline
changes to validate against the pipeline.
"""
params = {
'pipelineId': pipeline_id,
'pipelineObjects': pipeline_objects,
}
return self.make_request(action='ValidatePipelineDefinition',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/datapipeline/layer1.py
| 0.805288 | 0.642685 |
layer1.py
|
|
class ResultEntry(dict):
"""
The result (successful or unsuccessful) of a single
message within a send_message_batch request.
In the case of a successful result, this dict-like
object will contain the following items:
:ivar id: A string containing the user-supplied ID of the message.
:ivar message_id: A string containing the SQS ID of the new message.
:ivar message_md5: A string containing the MD5 hash of the message body.
In the case of an error, this object will contain the following
items:
:ivar id: A string containing the user-supplied ID of the message.
:ivar sender_fault: A boolean value.
:ivar error_code: A string containing a short description of the error.
:ivar error_message: A string containing a description of the error.
"""
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Id':
self['id'] = value
elif name == 'MessageId':
self['message_id'] = value
elif name == 'MD5OfMessageBody':
self['message_md5'] = value
elif name == 'SenderFault':
self['sender_fault'] = value
elif name == 'Code':
self['error_code'] = value
elif name == 'Message':
self['error_message'] = value
class BatchResults(object):
"""
A container for the results of a send_message_batch request.
:ivar results: A list of successful results. Each item in the
list will be an instance of :class:`ResultEntry`.
:ivar errors: A list of unsuccessful results. Each item in the
list will be an instance of :class:`ResultEntry`.
"""
def __init__(self, parent):
self.parent = parent
self.results = []
self.errors = []
def startElement(self, name, attrs, connection):
if name.endswith('MessageBatchResultEntry'):
entry = ResultEntry()
self.results.append(entry)
return entry
if name == 'BatchResultErrorEntry':
entry = ResultEntry()
self.errors.append(entry)
return entry
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/sqs/batchresults.py
| 0.838184 | 0.490114 |
batchresults.py
|
|
import base64
import boto
from boto.compat import StringIO
from boto.compat import six
from boto.sqs.attributes import Attributes
from boto.sqs.messageattributes import MessageAttributes
from boto.exception import SQSDecodeError
class RawMessage(object):
"""
Base class for SQS messages. RawMessage does not encode the message
in any way. Whatever you store in the body of the message is what
will be written to SQS and whatever is returned from SQS is stored
directly into the body of the message.
"""
def __init__(self, queue=None, body=''):
self.queue = queue
self.set_body(body)
self.id = None
self.receipt_handle = None
self.md5 = None
self.attributes = Attributes(self)
self.message_attributes = MessageAttributes(self)
self.md5_message_attributes = None
def __len__(self):
return len(self.encode(self._body))
def startElement(self, name, attrs, connection):
if name == 'Attribute':
return self.attributes
if name == 'MessageAttribute':
return self.message_attributes
return None
def endElement(self, name, value, connection):
if name == 'Body':
self.set_body(value)
elif name == 'MessageId':
self.id = value
elif name == 'ReceiptHandle':
self.receipt_handle = value
elif name == 'MD5OfBody':
self.md5 = value
elif name == 'MD5OfMessageAttributes':
self.md5_message_attributes = value
else:
setattr(self, name, value)
def endNode(self, connection):
self.set_body(self.decode(self.get_body()))
def encode(self, value):
"""Transform body object into serialized byte array format."""
return value
def decode(self, value):
"""Transform seralized byte array into any object."""
return value
def set_body(self, body):
"""Override the current body for this object, using decoded format."""
self._body = body
def get_body(self):
return self._body
def get_body_encoded(self):
"""
This method is really a semi-private method used by the Queue.write
method when writing the contents of the message to SQS.
You probably shouldn't need to call this method in the normal course of events.
"""
return self.encode(self.get_body())
def delete(self):
if self.queue:
return self.queue.delete_message(self)
def change_visibility(self, visibility_timeout):
if self.queue:
self.queue.connection.change_message_visibility(self.queue,
self.receipt_handle,
visibility_timeout)
class Message(RawMessage):
"""
The default Message class used for SQS queues. This class automatically
encodes/decodes the message body using Base64 encoding to avoid any
illegal characters in the message body. See:
https://forums.aws.amazon.com/thread.jspa?threadID=13067
for details on why this is a good idea. The encode/decode is meant to
be transparent to the end-user.
"""
def encode(self, value):
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
return base64.b64encode(value).decode('utf-8')
def decode(self, value):
try:
value = base64.b64decode(value.encode('utf-8')).decode('utf-8')
except:
boto.log.warning('Unable to decode message')
return value
return value
class MHMessage(Message):
"""
The MHMessage class provides a message that provides RFC821-like
headers like this:
HeaderName: HeaderValue
The encoding/decoding of this is handled automatically and after
the message body has been read, the message instance can be treated
like a mapping object, i.e. m['HeaderName'] would return 'HeaderValue'.
"""
def __init__(self, queue=None, body=None, xml_attrs=None):
if body is None or body == '':
body = {}
super(MHMessage, self).__init__(queue, body)
def decode(self, value):
try:
msg = {}
fp = StringIO(value)
line = fp.readline()
while line:
delim = line.find(':')
key = line[0:delim]
value = line[delim+1:].strip()
msg[key.strip()] = value.strip()
line = fp.readline()
except:
raise SQSDecodeError('Unable to decode message', self)
return msg
def encode(self, value):
s = ''
for item in value.items():
s = s + '%s: %s\n' % (item[0], item[1])
return s
def __contains__(self, key):
return key in self._body
def __getitem__(self, key):
if key in self._body:
return self._body[key]
else:
raise KeyError(key)
def __setitem__(self, key, value):
self._body[key] = value
self.set_body(self._body)
def keys(self):
return self._body.keys()
def values(self):
return self._body.values()
def items(self):
return self._body.items()
def has_key(self, key):
return key in self._body
def update(self, d):
self._body.update(d)
self.set_body(self._body)
def get(self, key, default=None):
return self._body.get(key, default)
class EncodedMHMessage(MHMessage):
"""
The EncodedMHMessage class provides a message that provides RFC821-like
headers like this:
HeaderName: HeaderValue
This variation encodes/decodes the body of the message in base64 automatically.
The message instance can be treated like a mapping object,
i.e. m['HeaderName'] would return 'HeaderValue'.
"""
def decode(self, value):
try:
value = base64.b64decode(value.encode('utf-8')).decode('utf-8')
except:
raise SQSDecodeError('Unable to decode message', self)
return super(EncodedMHMessage, self).decode(value)
def encode(self, value):
value = super(EncodedMHMessage, self).encode(value)
return base64.b64encode(value.encode('utf-8')).decode('utf-8')
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/sqs/message.py
| 0.689933 | 0.154408 |
message.py
|
|
import uuid
import boto
from boto.sqs.message import RawMessage
from boto.exception import SQSDecodeError
class BigMessage(RawMessage):
"""
The BigMessage class provides large payloads (up to 5GB)
by storing the payload itself in S3 and then placing a reference
to the S3 object in the actual SQS message payload.
To create a BigMessage, you should create a BigMessage object
and pass in a file-like object as the ``body`` param and also
pass in the an S3 URL specifying the bucket in which to store
the message body::
import boto.sqs
from boto.sqs.bigmessage import BigMessage
sqs = boto.sqs.connect_to_region('us-west-2')
queue = sqs.get_queue('myqueue')
fp = open('/path/to/bigmessage/data')
msg = BigMessage(queue, fp, 's3://mybucket')
queue.write(msg)
Passing in a fully-qualified S3 URL (e.g. s3://mybucket/foo)
is interpreted to mean that the body of the message is already
stored in S3 and the that S3 URL is then used directly with no
content uploaded by BigMessage.
"""
def __init__(self, queue=None, body=None, s3_url=None):
self.s3_url = s3_url
super(BigMessage, self).__init__(queue, body)
def _get_bucket_key(self, s3_url):
bucket_name = key_name = None
if s3_url:
if s3_url.startswith('s3://'):
# We need to split out the bucket from the key (if
# supplied). We also have to be aware that someone
# may provide a trailing '/' character as in:
# s3://foo/ and we want to handle that.
s3_components = s3_url[5:].split('/', 1)
bucket_name = s3_components[0]
if len(s3_components) > 1:
if s3_components[1]:
key_name = s3_components[1]
else:
msg = 's3_url parameter should start with s3://'
raise SQSDecodeError(msg, self)
return bucket_name, key_name
def encode(self, value):
"""
:type value: file-like object
:param value: A file-like object containing the content
of the message. The actual content will be stored
in S3 and a link to the S3 object will be stored in
the message body.
"""
bucket_name, key_name = self._get_bucket_key(self.s3_url)
if bucket_name and key_name:
return self.s3_url
key_name = uuid.uuid4()
s3_conn = boto.connect_s3()
s3_bucket = s3_conn.get_bucket(bucket_name)
key = s3_bucket.new_key(key_name)
key.set_contents_from_file(value)
self.s3_url = 's3://%s/%s' % (bucket_name, key_name)
return self.s3_url
def _get_s3_object(self, s3_url):
bucket_name, key_name = self._get_bucket_key(s3_url)
if bucket_name and key_name:
s3_conn = boto.connect_s3()
s3_bucket = s3_conn.get_bucket(bucket_name)
key = s3_bucket.get_key(key_name)
return key
else:
msg = 'Unable to decode S3 URL: %s' % s3_url
raise SQSDecodeError(msg, self)
def decode(self, value):
self.s3_url = value
key = self._get_s3_object(value)
return key.get_contents_as_string()
def delete(self):
# Delete the object in S3 first, then delete the SQS message
if self.s3_url:
key = self._get_s3_object(self.s3_url)
key.delete()
super(BigMessage, self).delete()
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/sqs/bigmessage.py
| 0.630344 | 0.310328 |
bigmessage.py
|
|
# this is here for backward compatibility
# originally, the IAMConnection class was defined here
from boto.iam.connection import IAMConnection
from boto.regioninfo import RegionInfo, get_regions
from boto.regioninfo import connect
class IAMRegionInfo(RegionInfo):
def connect(self, **kw_params):
"""
Connect to this Region's endpoint. Returns an connection
object pointing to the endpoint associated with this region.
You may pass any of the arguments accepted by the connection
class's constructor as keyword arguments and they will be
passed along to the connection object.
:rtype: Connection object
:return: The connection to this regions endpoint
"""
if self.connection_cls:
return self.connection_cls(host=self.endpoint, **kw_params)
def regions():
"""
Get all available regions for the IAM service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo` instances
"""
regions = get_regions(
'iam',
region_cls=IAMRegionInfo,
connection_cls=IAMConnection
)
# For historical reasons, we had a "universal" endpoint as well.
regions.append(
IAMRegionInfo(
name='universal',
endpoint='iam.amazonaws.com',
connection_cls=IAMConnection
)
)
return regions
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.iam.connection.IAMConnection`.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.iam.connection.IAMConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
if region_name == 'universal':
region = IAMRegionInfo(
name='universal',
endpoint='iam.amazonaws.com',
connection_cls=IAMConnection
)
return region.connect(**kw_params)
return connect('iam', region_name, region_cls=IAMRegionInfo,
connection_cls=IAMConnection, **kw_params)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/iam/__init__.py
| 0.716417 | 0.204878 |
__init__.py
|
|
from boto.rds.dbsecuritygroup import DBSecurityGroup
from boto.resultset import ResultSet
class OptionGroup(object):
"""
Represents an RDS option group
Properties reference available from the AWS documentation at
http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_OptionGroup.html
:ivar connection: :py:class:`boto.rds.RDSConnection` associated with the
current object
:ivar name: Name of the option group
:ivar description: The description of the option group
:ivar engine_name: The name of the database engine to use
:ivar major_engine_version: The major version number of the engine to use
:ivar allow_both_vpc_and_nonvpc: Indicates whether this option group can be
applied to both VPC and non-VPC instances.
The value ``True`` indicates the option
group can be applied to both VPC and
non-VPC instances.
:ivar vpc_id: If AllowsVpcAndNonVpcInstanceMemberships is 'false', this
field is blank. If AllowsVpcAndNonVpcInstanceMemberships is
``True`` and this field is blank, then this option group can
be applied to both VPC and non-VPC instances. If this field
contains a value, then this option group can only be applied
to instances that are in the VPC indicated by this field.
:ivar options: The list of :py:class:`boto.rds.optiongroup.Option` objects
associated with the group
"""
def __init__(self, connection=None, name=None, engine_name=None,
major_engine_version=None, description=None,
allow_both_vpc_and_nonvpc=False, vpc_id=None):
self.name = name
self.engine_name = engine_name
self.major_engine_version = major_engine_version
self.description = description
self.allow_both_vpc_and_nonvpc = allow_both_vpc_and_nonvpc
self.vpc_id = vpc_id
self.options = []
def __repr__(self):
return 'OptionGroup:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'Options':
self.options = ResultSet([
('Options', Option)
])
else:
return None
def endElement(self, name, value, connection):
if name == 'OptionGroupName':
self.name = value
elif name == 'EngineName':
self.engine_name = value
elif name == 'MajorEngineVersion':
self.major_engine_version = value
elif name == 'OptionGroupDescription':
self.description = value
elif name == 'AllowsVpcAndNonVpcInstanceMemberships':
if value.lower() == 'true':
self.allow_both_vpc_and_nonvpc = True
else:
self.allow_both_vpc_and_nonvpc = False
elif name == 'VpcId':
self.vpc_id = value
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_option_group(self.name)
class Option(object):
"""
Describes a Option for use in an OptionGroup
:ivar name: The name of the option
:ivar description: The description of the option.
:ivar permanent: Indicate if this option is permanent.
:ivar persistent: Indicate if this option is persistent.
:ivar port: If required, the port configured for this option to use.
:ivar settings: The option settings for this option.
:ivar db_security_groups: If the option requires access to a port, then
this DB Security Group allows access to the port.
:ivar vpc_security_groups: If the option requires access to a port, then
this VPC Security Group allows access to the
port.
"""
def __init__(self, name=None, description=None, permanent=False,
persistent=False, port=None, settings=None,
db_security_groups=None, vpc_security_groups=None):
self.name = name
self.description = description
self.permanent = permanent
self.persistent = persistent
self.port = port
self.settings = settings
self.db_security_groups = db_security_groups
self.vpc_security_groups = vpc_security_groups
if self.settings is None:
self.settings = []
if self.db_security_groups is None:
self.db_security_groups = []
if self.vpc_security_groups is None:
self.vpc_security_groups = []
def __repr__(self):
return 'Option:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'OptionSettings':
self.settings = ResultSet([
('OptionSettings', OptionSetting)
])
elif name == 'DBSecurityGroupMemberships':
self.db_security_groups = ResultSet([
('DBSecurityGroupMemberships', DBSecurityGroup)
])
elif name == 'VpcSecurityGroupMemberships':
self.vpc_security_groups = ResultSet([
('VpcSecurityGroupMemberships', VpcSecurityGroup)
])
else:
return None
def endElement(self, name, value, connection):
if name == 'OptionName':
self.name = value
elif name == 'OptionDescription':
self.description = value
elif name == 'Permanent':
if value.lower() == 'true':
self.permenant = True
else:
self.permenant = False
elif name == 'Persistent':
if value.lower() == 'true':
self.persistent = True
else:
self.persistent = False
elif name == 'Port':
self.port = int(value)
else:
setattr(self, name, value)
class OptionSetting(object):
"""
Describes a OptionSetting for use in an Option
:ivar name: The name of the option that has settings that you can set.
:ivar description: The description of the option setting.
:ivar value: The current value of the option setting.
:ivar default_value: The default value of the option setting.
:ivar allowed_values: The allowed values of the option setting.
:ivar data_type: The data type of the option setting.
:ivar apply_type: The DB engine specific parameter type.
:ivar is_modifiable: A Boolean value that, when true, indicates the option
setting can be modified from the default.
:ivar is_collection: Indicates if the option setting is part of a
collection.
"""
def __init__(self, name=None, description=None, value=None,
default_value=False, allowed_values=None, data_type=None,
apply_type=None, is_modifiable=False, is_collection=False):
self.name = name
self.description = description
self.value = value
self.default_value = default_value
self.allowed_values = allowed_values
self.data_type = data_type
self.apply_type = apply_type
self.is_modifiable = is_modifiable
self.is_collection = is_collection
def __repr__(self):
return 'OptionSetting:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'Description':
self.description = value
elif name == 'Value':
self.value = value
elif name == 'DefaultValue':
self.default_value = value
elif name == 'AllowedValues':
self.allowed_values = value
elif name == 'DataType':
self.data_type = value
elif name == 'ApplyType':
self.apply_type = value
elif name == 'IsModifiable':
if value.lower() == 'true':
self.is_modifiable = True
else:
self.is_modifiable = False
elif name == 'IsCollection':
if value.lower() == 'true':
self.is_collection = True
else:
self.is_collection = False
else:
setattr(self, name, value)
class VpcSecurityGroup(object):
"""
Describes a VPC security group for use in a OptionGroup
"""
def __init__(self, vpc_id=None, status=None):
self.vpc_id = vpc_id
self.status = status
def __repr__(self):
return 'VpcSecurityGroup:%s' % self.vpc_id
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'VpcSecurityGroupId':
self.vpc_id = value
elif name == 'Status':
self.status = value
else:
setattr(self, name, value)
class OptionGroupOption(object):
"""
Describes a OptionGroupOption for use in an OptionGroup
:ivar name: The name of the option
:ivar description: The description of the option.
:ivar engine_name: Engine name that this option can be applied to.
:ivar major_engine_version: Indicates the major engine version that the
option is available for.
:ivar min_minor_engine_version: The minimum required engine version for the
option to be applied.
:ivar permanent: Indicate if this option is permanent.
:ivar persistent: Indicate if this option is persistent.
:ivar port_required: Specifies whether the option requires a port.
:ivar default_port: If the option requires a port, specifies the default
port for the option.
:ivar settings: The option settings for this option.
:ivar depends_on: List of all options that are prerequisites for this
option.
"""
def __init__(self, name=None, description=None, engine_name=None,
major_engine_version=None, min_minor_engine_version=None,
permanent=False, persistent=False, port_required=False,
default_port=None, settings=None, depends_on=None):
self.name = name
self.description = description
self.engine_name = engine_name
self.major_engine_version = major_engine_version
self.min_minor_engine_version = min_minor_engine_version
self.permanent = permanent
self.persistent = persistent
self.port_required = port_required
self.default_port = default_port
self.settings = settings
self.depends_on = depends_on
if self.settings is None:
self.settings = []
if self.depends_on is None:
self.depends_on = []
def __repr__(self):
return 'OptionGroupOption:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'OptionGroupOptionSettings':
self.settings = ResultSet([
('OptionGroupOptionSettings', OptionGroupOptionSetting)
])
elif name == 'OptionsDependedOn':
self.depends_on = []
else:
return None
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'Description':
self.description = value
elif name == 'EngineName':
self.engine_name = value
elif name == 'MajorEngineVersion':
self.major_engine_version = value
elif name == 'MinimumRequiredMinorEngineVersion':
self.min_minor_engine_version = value
elif name == 'Permanent':
if value.lower() == 'true':
self.permenant = True
else:
self.permenant = False
elif name == 'Persistent':
if value.lower() == 'true':
self.persistent = True
else:
self.persistent = False
elif name == 'PortRequired':
if value.lower() == 'true':
self.port_required = True
else:
self.port_required = False
elif name == 'DefaultPort':
self.default_port = int(value)
else:
setattr(self, name, value)
class OptionGroupOptionSetting(object):
"""
Describes a OptionGroupOptionSetting for use in an OptionGroupOption.
:ivar name: The name of the option that has settings that you can set.
:ivar description: The description of the option setting.
:ivar value: The current value of the option setting.
:ivar default_value: The default value of the option setting.
:ivar allowed_values: The allowed values of the option setting.
:ivar data_type: The data type of the option setting.
:ivar apply_type: The DB engine specific parameter type.
:ivar is_modifiable: A Boolean value that, when true, indicates the option
setting can be modified from the default.
:ivar is_collection: Indicates if the option setting is part of a
collection.
"""
def __init__(self, name=None, description=None, default_value=False,
allowed_values=None, apply_type=None, is_modifiable=False):
self.name = name
self.description = description
self.default_value = default_value
self.allowed_values = allowed_values
self.apply_type = apply_type
self.is_modifiable = is_modifiable
def __repr__(self):
return 'OptionGroupOptionSetting:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'SettingName':
self.name = value
elif name == 'SettingDescription':
self.description = value
elif name == 'DefaultValue':
self.default_value = value
elif name == 'AllowedValues':
self.allowed_values = value
elif name == 'ApplyType':
self.apply_type = value
elif name == 'IsModifiable':
if value.lower() == 'true':
self.is_modifiable = True
else:
self.is_modifiable = False
else:
setattr(self, name, value)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/rds/optiongroup.py
| 0.776623 | 0.220321 |
optiongroup.py
|
|
from boto.rds.dbsecuritygroup import DBSecurityGroup
from boto.rds.parametergroup import ParameterGroup
from boto.rds.statusinfo import StatusInfo
from boto.rds.dbsubnetgroup import DBSubnetGroup
from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership
from boto.resultset import ResultSet
class DBInstance(object):
"""
Represents a RDS DBInstance
Properties reference available from the AWS documentation at
http://goo.gl/sC2Kn
:ivar connection: connection
:ivar id: The name and identifier of the DBInstance
:ivar create_time: The date and time of creation
:ivar engine: The database engine being used
:ivar status: The status of the database in a string. e.g. "available"
:ivar allocated_storage: The size of the disk in gigabytes (int).
:ivar auto_minor_version_upgrade: Indicates that minor version patches
are applied automatically.
:ivar endpoint: A tuple that describes the hostname and port of
the instance. This is only available when the database is
in status "available".
:ivar instance_class: Contains the name of the compute and memory
capacity class of the DB Instance.
:ivar master_username: The username that is set as master username
at creation time.
:ivar parameter_groups: Provides the list of DB Parameter Groups
applied to this DB Instance.
:ivar security_groups: Provides List of DB Security Group elements
containing only DBSecurityGroup.Name and DBSecurityGroup.Status
subelements.
:ivar availability_zone: Specifies the name of the Availability Zone
the DB Instance is located in.
:ivar backup_retention_period: Specifies the number of days for
which automatic DB Snapshots are retained.
:ivar preferred_backup_window: Specifies the daily time range during
which automated backups are created if automated backups are
enabled, as determined by the backup_retention_period.
:ivar preferred_maintenance_window: Specifies the weekly time
range (in UTC) during which system maintenance can occur. (string)
:ivar latest_restorable_time: Specifies the latest time to which
a database can be restored with point-in-time restore. (string)
:ivar multi_az: Boolean that specifies if the DB Instance is a
Multi-AZ deployment.
:ivar iops: The current number of provisioned IOPS for the DB Instance.
Can be None if this is a standard instance.
:ivar vpc_security_groups: List of VPC Security Group Membership elements
containing only VpcSecurityGroupMembership.VpcSecurityGroupId and
VpcSecurityGroupMembership.Status subelements.
:ivar pending_modified_values: Specifies that changes to the
DB Instance are pending. This element is only included when changes
are pending. Specific changes are identified by subelements.
:ivar read_replica_dbinstance_identifiers: List of read replicas
associated with this DB instance.
:ivar status_infos: The status of a Read Replica. If the instance is not a
for a read replica, this will be blank.
:ivar character_set_name: If present, specifies the name of the character
set that this instance is associated with.
:ivar subnet_group: Specifies information on the subnet group associated
with the DB instance, including the name, description, and subnets
in the subnet group.
:ivar engine_version: Indicates the database engine version.
:ivar license_model: License model information for this DB instance.
"""
def __init__(self, connection=None, id=None):
self.connection = connection
self.id = id
self.create_time = None
self.engine = None
self.status = None
self.allocated_storage = None
self.auto_minor_version_upgrade = None
self.endpoint = None
self.instance_class = None
self.master_username = None
self.parameter_groups = []
self.security_groups = []
self.read_replica_dbinstance_identifiers = []
self.availability_zone = None
self.backup_retention_period = None
self.preferred_backup_window = None
self.preferred_maintenance_window = None
self.latest_restorable_time = None
self.multi_az = False
self.iops = None
self.vpc_security_groups = None
self.pending_modified_values = None
self._in_endpoint = False
self._port = None
self._address = None
self.status_infos = None
self.character_set_name = None
self.subnet_group = None
self.engine_version = None
self.license_model = None
def __repr__(self):
return 'DBInstance:%s' % self.id
def startElement(self, name, attrs, connection):
if name == 'Endpoint':
self._in_endpoint = True
elif name == 'DBParameterGroups':
self.parameter_groups = ResultSet([('DBParameterGroup',
ParameterGroup)])
return self.parameter_groups
elif name == 'DBSecurityGroups':
self.security_groups = ResultSet([('DBSecurityGroup',
DBSecurityGroup)])
return self.security_groups
elif name == 'VpcSecurityGroups':
self.vpc_security_groups = ResultSet([('VpcSecurityGroupMembership',
VPCSecurityGroupMembership)])
return self.vpc_security_groups
elif name == 'PendingModifiedValues':
self.pending_modified_values = PendingModifiedValues()
return self.pending_modified_values
elif name == 'ReadReplicaDBInstanceIdentifiers':
self.read_replica_dbinstance_identifiers = \
ReadReplicaDBInstanceIdentifiers()
return self.read_replica_dbinstance_identifiers
elif name == 'StatusInfos':
self.status_infos = ResultSet([
('DBInstanceStatusInfo', StatusInfo)
])
return self.status_infos
elif name == 'DBSubnetGroup':
self.subnet_group = DBSubnetGroup()
return self.subnet_group
return None
def endElement(self, name, value, connection):
if name == 'DBInstanceIdentifier':
self.id = value
elif name == 'DBInstanceStatus':
self.status = value
elif name == 'InstanceCreateTime':
self.create_time = value
elif name == 'Engine':
self.engine = value
elif name == 'DBInstanceStatus':
self.status = value
elif name == 'AllocatedStorage':
self.allocated_storage = int(value)
elif name == 'AutoMinorVersionUpgrade':
self.auto_minor_version_upgrade = value.lower() == 'true'
elif name == 'DBInstanceClass':
self.instance_class = value
elif name == 'MasterUsername':
self.master_username = value
elif name == 'Port':
if self._in_endpoint:
self._port = int(value)
elif name == 'Address':
if self._in_endpoint:
self._address = value
elif name == 'Endpoint':
self.endpoint = (self._address, self._port)
self._in_endpoint = False
elif name == 'AvailabilityZone':
self.availability_zone = value
elif name == 'BackupRetentionPeriod':
self.backup_retention_period = int(value)
elif name == 'LatestRestorableTime':
self.latest_restorable_time = value
elif name == 'PreferredMaintenanceWindow':
self.preferred_maintenance_window = value
elif name == 'PreferredBackupWindow':
self.preferred_backup_window = value
elif name == 'MultiAZ':
if value.lower() == 'true':
self.multi_az = True
elif name == 'Iops':
self.iops = int(value)
elif name == 'CharacterSetName':
self.character_set_name = value
elif name == 'EngineVersion':
self.engine_version = value
elif name == 'LicenseModel':
self.license_model = value
else:
setattr(self, name, value)
@property
def security_group(self):
"""
Provide backward compatibility for previous security_group
attribute.
"""
if len(self.security_groups) > 0:
return self.security_groups[-1]
else:
return None
@property
def parameter_group(self):
"""
Provide backward compatibility for previous parameter_group
attribute.
"""
if len(self.parameter_groups) > 0:
return self.parameter_groups[-1]
else:
return None
def snapshot(self, snapshot_id):
"""
Create a new DB snapshot of this DBInstance.
:type identifier: string
:param identifier: The identifier for the DBSnapshot
:rtype: :class:`boto.rds.dbsnapshot.DBSnapshot`
:return: The newly created DBSnapshot
"""
return self.connection.create_dbsnapshot(snapshot_id, self.id)
def reboot(self):
"""
Reboot this DBInstance
:rtype: :class:`boto.rds.dbsnapshot.DBSnapshot`
:return: The newly created DBSnapshot
"""
return self.connection.reboot_dbinstance(self.id)
def update(self, validate=False):
"""
Update the DB instance's status information by making a call to fetch
the current instance attributes from the service.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
instance the update method returns quietly. If the
validate param is True, however, it will raise a
ValueError exception if no data is returned from EC2.
"""
rs = self.connection.get_all_dbinstances(self.id)
if len(rs) > 0:
for i in rs:
if i.id == self.id:
self.__dict__.update(i.__dict__)
elif validate:
raise ValueError('%s is not a valid Instance ID' % self.id)
return self.status
def stop(self, skip_final_snapshot=False, final_snapshot_id=''):
"""
Delete this DBInstance.
:type skip_final_snapshot: bool
:param skip_final_snapshot: This parameter determines whether
a final db snapshot is created before the instance is
deleted. If True, no snapshot is created. If False, a
snapshot is created before deleting the instance.
:type final_snapshot_id: str
:param final_snapshot_id: If a final snapshot is requested, this
is the identifier used for that snapshot.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The deleted db instance.
"""
return self.connection.delete_dbinstance(self.id,
skip_final_snapshot,
final_snapshot_id)
def modify(self, param_group=None, security_groups=None,
preferred_maintenance_window=None,
master_password=None, allocated_storage=None,
instance_class=None,
backup_retention_period=None,
preferred_backup_window=None,
multi_az=False,
iops=None,
vpc_security_groups=None,
apply_immediately=False,
new_instance_id=None):
"""
Modify this DBInstance.
:type param_group: str
:param param_group: Name of DBParameterGroup to associate with
this DBInstance.
:type security_groups: list of str or list of DBSecurityGroup objects
:param security_groups: List of names of DBSecurityGroup to
authorize on this DBInstance.
:type preferred_maintenance_window: str
:param preferred_maintenance_window: The weekly time range (in
UTC) during which maintenance can occur. Default is
Sun:05:00-Sun:09:00
:type master_password: str
:param master_password: Password of master user for the DBInstance.
Must be 4-15 alphanumeric characters.
:type allocated_storage: int
:param allocated_storage: The new allocated storage size, in GBs.
Valid values are [5-1024]
:type instance_class: str
:param instance_class: The compute and memory capacity of the
DBInstance. Changes will be applied at next maintenance
window unless apply_immediately is True.
Valid values are:
* db.m1.small
* db.m1.large
* db.m1.xlarge
* db.m2.xlarge
* db.m2.2xlarge
* db.m2.4xlarge
:type apply_immediately: bool
:param apply_immediately: If true, the modifications will be
applied as soon as possible rather than waiting for the
next preferred maintenance window.
:type new_instance_id: str
:param new_instance_id: The new DB instance identifier.
:type backup_retention_period: int
:param backup_retention_period: The number of days for which
automated backups are retained. Setting this to zero
disables automated backups.
:type preferred_backup_window: str
:param preferred_backup_window: The daily time range during
which automated backups are created (if enabled). Must be
in h24:mi-hh24:mi format (UTC).
:type multi_az: bool
:param multi_az: If True, specifies the DB Instance will be
deployed in multiple availability zones.
:type iops: int
:param iops: The amount of IOPS (input/output operations per
second) to Provisioned for the DB Instance. Can be
modified at a later date.
Must scale linearly. For every 1000 IOPS provision, you
must allocated 100 GB of storage space. This scales up to
1 TB / 10 000 IOPS for MySQL and Oracle. MSSQL is limited
to 700 GB / 7 000 IOPS.
If you specify a value, it must be at least 1000 IOPS and
you must allocate 100 GB of storage.
:type vpc_security_groups: list
:param vpc_security_groups: List of VPCSecurityGroupMembership
that this DBInstance is a memberof.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The modified db instance.
"""
return self.connection.modify_dbinstance(self.id,
param_group,
security_groups,
preferred_maintenance_window,
master_password,
allocated_storage,
instance_class,
backup_retention_period,
preferred_backup_window,
multi_az,
apply_immediately,
iops,
vpc_security_groups,
new_instance_id)
class PendingModifiedValues(dict):
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name != 'PendingModifiedValues':
self[name] = value
class ReadReplicaDBInstanceIdentifiers(list):
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'ReadReplicaDBInstanceIdentifier':
self.append(value)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/rds/dbinstance.py
| 0.653238 | 0.336222 |
dbinstance.py
|
|
from boto.ec2.securitygroup import SecurityGroup
class DBSecurityGroup(object):
"""
Represents an RDS database security group
Properties reference available from the AWS documentation at
http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSecurityGroup.html
:ivar Status: The current status of the security group. Possible values are
[ active, ? ]. Reference documentation lacks specifics of possibilities
:ivar connection: :py:class:`boto.rds.RDSConnection` associated with the current object
:ivar description: The description of the security group
:ivar ec2_groups: List of :py:class:`EC2 Security Group
<boto.ec2.securitygroup.SecurityGroup>` objects that this security
group PERMITS
:ivar ip_ranges: List of :py:class:`boto.rds.dbsecuritygroup.IPRange`
objects (containing CIDR addresses) that this security group PERMITS
:ivar name: Name of the security group
:ivar owner_id: ID of the owner of the security group. Can be 'None'
"""
def __init__(self, connection=None, owner_id=None,
name=None, description=None):
self.connection = connection
self.owner_id = owner_id
self.name = name
self.description = description
self.ec2_groups = []
self.ip_ranges = []
def __repr__(self):
return 'DBSecurityGroup:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'IPRange':
cidr = IPRange(self)
self.ip_ranges.append(cidr)
return cidr
elif name == 'EC2SecurityGroup':
ec2_grp = EC2SecurityGroup(self)
self.ec2_groups.append(ec2_grp)
return ec2_grp
else:
return None
def endElement(self, name, value, connection):
if name == 'OwnerId':
self.owner_id = value
elif name == 'DBSecurityGroupName':
self.name = value
elif name == 'DBSecurityGroupDescription':
self.description = value
elif name == 'IPRanges':
pass
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_dbsecurity_group(self.name)
def authorize(self, cidr_ip=None, ec2_group=None):
"""
Add a new rule to this DBSecurity group.
You need to pass in either a CIDR block to authorize or
and EC2 SecurityGroup.
:type cidr_ip: string
:param cidr_ip: A valid CIDR IP range to authorize
:type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup`
:param ec2_group: An EC2 security group to authorize
:rtype: bool
:return: True if successful.
"""
if isinstance(ec2_group, SecurityGroup):
group_name = ec2_group.name
group_owner_id = ec2_group.owner_id
else:
group_name = None
group_owner_id = None
return self.connection.authorize_dbsecurity_group(self.name,
cidr_ip,
group_name,
group_owner_id)
def revoke(self, cidr_ip=None, ec2_group=None):
"""
Revoke access to a CIDR range or EC2 SecurityGroup.
You need to pass in either a CIDR block or
an EC2 SecurityGroup from which to revoke access.
:type cidr_ip: string
:param cidr_ip: A valid CIDR IP range to revoke
:type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup`
:param ec2_group: An EC2 security group to revoke
:rtype: bool
:return: True if successful.
"""
if isinstance(ec2_group, SecurityGroup):
group_name = ec2_group.name
group_owner_id = ec2_group.owner_id
return self.connection.revoke_dbsecurity_group(
self.name,
ec2_security_group_name=group_name,
ec2_security_group_owner_id=group_owner_id)
# Revoking by CIDR IP range
return self.connection.revoke_dbsecurity_group(
self.name, cidr_ip=cidr_ip)
class IPRange(object):
"""
Describes a CIDR address range for use in a DBSecurityGroup
:ivar cidr_ip: IP Address range
"""
def __init__(self, parent=None):
self.parent = parent
self.cidr_ip = None
self.status = None
def __repr__(self):
return 'IPRange:%s' % self.cidr_ip
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'CIDRIP':
self.cidr_ip = value
elif name == 'Status':
self.status = value
else:
setattr(self, name, value)
class EC2SecurityGroup(object):
"""
Describes an EC2 security group for use in a DBSecurityGroup
"""
def __init__(self, parent=None):
self.parent = parent
self.name = None
self.owner_id = None
def __repr__(self):
return 'EC2SecurityGroup:%s' % self.name
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'EC2SecurityGroupName':
self.name = value
elif name == 'EC2SecurityGroupOwnerId':
self.owner_id = value
else:
setattr(self, name, value)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/rds/dbsecuritygroup.py
| 0.760206 | 0.318485 |
dbsecuritygroup.py
|
|
class DBSnapshot(object):
"""
Represents a RDS DB Snapshot
Properties reference available from the AWS documentation at http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DBSnapshot.html
:ivar engine_version: Specifies the version of the database engine
:ivar license_model: License model information for the restored DB instance
:ivar allocated_storage: Specifies the allocated storage size in gigabytes (GB)
:ivar availability_zone: Specifies the name of the Availability Zone the DB Instance was located in at the time of the DB Snapshot
:ivar connection: boto.rds.RDSConnection associated with the current object
:ivar engine: Specifies the name of the database engine
:ivar id: Specifies the identifier for the DB Snapshot (DBSnapshotIdentifier)
:ivar instance_create_time: Specifies the time (UTC) when the snapshot was taken
:ivar instance_id: Specifies the the DBInstanceIdentifier of the DB Instance this DB Snapshot was created from (DBInstanceIdentifier)
:ivar master_username: Provides the master username for the DB Instance
:ivar port: Specifies the port that the database engine was listening on at the time of the snapshot
:ivar snapshot_create_time: Provides the time (UTC) when the snapshot was taken
:ivar status: Specifies the status of this DB Snapshot. Possible values are [ available, backing-up, creating, deleted, deleting, failed, modifying, rebooting, resetting-master-credentials ]
:ivar iops: Specifies the Provisioned IOPS (I/O operations per second) value of the DB instance at the time of the snapshot.
:ivar option_group_name: Provides the option group name for the DB snapshot.
:ivar percent_progress: The percentage of the estimated data that has been transferred.
:ivar snapshot_type: Provides the type of the DB snapshot.
:ivar source_region: The region that the DB snapshot was created in or copied from.
:ivar vpc_id: Provides the Vpc Id associated with the DB snapshot.
"""
def __init__(self, connection=None, id=None):
self.connection = connection
self.id = id
self.engine = None
self.engine_version = None
self.snapshot_create_time = None
self.instance_create_time = None
self.port = None
self.status = None
self.availability_zone = None
self.master_username = None
self.allocated_storage = None
self.instance_id = None
self.availability_zone = None
self.license_model = None
self.iops = None
self.option_group_name = None
self.percent_progress = None
self.snapshot_type = None
self.source_region = None
self.vpc_id = None
def __repr__(self):
return 'DBSnapshot:%s' % self.id
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Engine':
self.engine = value
elif name == 'EngineVersion':
self.engine_version = value
elif name == 'InstanceCreateTime':
self.instance_create_time = value
elif name == 'SnapshotCreateTime':
self.snapshot_create_time = value
elif name == 'DBInstanceIdentifier':
self.instance_id = value
elif name == 'DBSnapshotIdentifier':
self.id = value
elif name == 'Port':
self.port = int(value)
elif name == 'Status':
self.status = value
elif name == 'AvailabilityZone':
self.availability_zone = value
elif name == 'MasterUsername':
self.master_username = value
elif name == 'AllocatedStorage':
self.allocated_storage = int(value)
elif name == 'SnapshotTime':
self.time = value
elif name == 'LicenseModel':
self.license_model = value
elif name == 'Iops':
self.iops = int(value)
elif name == 'OptionGroupName':
self.option_group_name = value
elif name == 'PercentProgress':
self.percent_progress = int(value)
elif name == 'SnapshotType':
self.snapshot_type = value
elif name == 'SourceRegion':
self.source_region = value
elif name == 'VpcId':
self.vpc_id = value
else:
setattr(self, name, value)
def update(self, validate=False):
"""
Update the DB snapshot's status information by making a call to fetch
the current snapshot attributes from the service.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
instance the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
"""
rs = self.connection.get_all_dbsnapshots(self.id)
if len(rs) > 0:
for i in rs:
if i.id == self.id:
self.__dict__.update(i.__dict__)
elif validate:
raise ValueError('%s is not a valid Snapshot ID' % self.id)
return self.status
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/rds/dbsnapshot.py
| 0.820972 | 0.419588 |
dbsnapshot.py
|
|
class ParameterGroup(dict):
def __init__(self, connection=None):
dict.__init__(self)
self.connection = connection
self.name = None
self.description = None
self.engine = None
self._current_param = None
def __repr__(self):
return 'ParameterGroup:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'Parameter':
if self._current_param:
self[self._current_param.name] = self._current_param
self._current_param = Parameter(self)
return self._current_param
def endElement(self, name, value, connection):
if name == 'DBParameterGroupName':
self.name = value
elif name == 'Description':
self.description = value
elif name == 'Engine':
self.engine = value
else:
setattr(self, name, value)
def modifiable(self):
mod = []
for key in self:
p = self[key]
if p.is_modifiable:
mod.append(p)
return mod
def get_params(self):
pg = self.connection.get_all_dbparameters(self.name)
self.update(pg)
def add_param(self, name, value, apply_method):
param = Parameter()
param.name = name
param.value = value
param.apply_method = apply_method
self.params.append(param)
class Parameter(object):
"""
Represents a RDS Parameter
"""
ValidTypes = {'integer' : int,
'string' : str,
'boolean' : bool}
ValidSources = ['user', 'system', 'engine-default']
ValidApplyTypes = ['static', 'dynamic']
ValidApplyMethods = ['immediate', 'pending-reboot']
def __init__(self, group=None, name=None):
self.group = group
self.name = name
self._value = None
self.type = 'string'
self.source = None
self.is_modifiable = True
self.description = None
self.apply_method = None
self.allowed_values = None
def __repr__(self):
return 'Parameter:%s' % self.name
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'ParameterName':
self.name = value
elif name == 'ParameterValue':
self._value = value
elif name == 'DataType':
if value in self.ValidTypes:
self.type = value
elif name == 'Source':
if value in self.ValidSources:
self.source = value
elif name == 'IsModifiable':
if value.lower() == 'true':
self.is_modifiable = True
else:
self.is_modifiable = False
elif name == 'Description':
self.description = value
elif name == 'ApplyType':
if value in self.ValidApplyTypes:
self.apply_type = value
elif name == 'AllowedValues':
self.allowed_values = value
else:
setattr(self, name, value)
def merge(self, d, i):
prefix = 'Parameters.member.%d.' % i
if self.name:
d[prefix+'ParameterName'] = self.name
if self._value is not None:
d[prefix+'ParameterValue'] = self._value
if self.apply_type:
d[prefix+'ApplyMethod'] = self.apply_method
def _set_string_value(self, value):
if not isinstance(value, basestring):
raise ValueError('value must be of type str')
if self.allowed_values:
choices = self.allowed_values.split(',')
if value not in choices:
raise ValueError('value must be in %s' % self.allowed_values)
self._value = value
def _set_integer_value(self, value):
if isinstance(value, basestring):
value = int(value)
if isinstance(value, int) or isinstance(value, long):
if self.allowed_values:
min, max = self.allowed_values.split('-')
if value < int(min) or value > int(max):
raise ValueError('range is %s' % self.allowed_values)
self._value = value
else:
raise ValueError('value must be integer')
def _set_boolean_value(self, value):
if isinstance(value, bool):
self._value = value
elif isinstance(value, basestring):
if value.lower() == 'true':
self._value = True
else:
self._value = False
else:
raise ValueError('value must be boolean')
def set_value(self, value):
if self.type == 'string':
self._set_string_value(value)
elif self.type == 'integer':
self._set_integer_value(value)
elif self.type == 'boolean':
self._set_boolean_value(value)
else:
raise TypeError('unknown type (%s)' % self.type)
def get_value(self):
if self._value is None:
return self._value
if self.type == 'string':
return self._value
elif self.type == 'integer':
if not isinstance(self._value, int) and not isinstance(self._value, long):
self._set_integer_value(self._value)
return self._value
elif self.type == 'boolean':
if not isinstance(self._value, bool):
self._set_boolean_value(self._value)
return self._value
else:
raise TypeError('unknown type (%s)' % self.type)
value = property(get_value, set_value, 'The value of the parameter')
def apply(self, immediate=False):
if immediate:
self.apply_method = 'immediate'
else:
self.apply_method = 'pending-reboot'
self.group.connection.modify_parameter_group(self.group.name, [self])
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/rds/parametergroup.py
| 0.689515 | 0.151467 |
parametergroup.py
|
|
from boto.ec2.ec2object import TaggedEC2Object
from boto.resultset import ResultSet
class Icmp(object):
"""
Defines the ICMP code and type.
"""
def __init__(self, connection=None):
self.code = None
self.type = None
def __repr__(self):
return 'Icmp::code:%s, type:%s)' % ( self.code, self.type)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'code':
self.code = value
elif name == 'type':
self.type = value
class NetworkAcl(TaggedEC2Object):
def __init__(self, connection=None):
super(NetworkAcl, self).__init__(connection)
self.id = None
self.vpc_id = None
self.network_acl_entries = []
self.associations = []
def __repr__(self):
return 'NetworkAcl:%s' % self.id
def startElement(self, name, attrs, connection):
result = super(NetworkAcl, self).startElement(name, attrs, connection)
if result is not None:
# Parent found an interested element, just return it
return result
if name == 'entrySet':
self.network_acl_entries = ResultSet([('item', NetworkAclEntry)])
return self.network_acl_entries
elif name == 'associationSet':
self.associations = ResultSet([('item', NetworkAclAssociation)])
return self.associations
else:
return None
def endElement(self, name, value, connection):
if name == 'networkAclId':
self.id = value
elif name == 'vpcId':
self.vpc_id = value
else:
setattr(self, name, value)
class NetworkAclEntry(object):
def __init__(self, connection=None):
self.rule_number = None
self.protocol = None
self.rule_action = None
self.egress = None
self.cidr_block = None
self.port_range = PortRange()
self.icmp = Icmp()
def __repr__(self):
return 'Acl:%s' % self.rule_number
def startElement(self, name, attrs, connection):
if name == 'portRange':
return self.port_range
elif name == 'icmpTypeCode':
return self.icmp
else:
return None
def endElement(self, name, value, connection):
if name == 'cidrBlock':
self.cidr_block = value
elif name == 'egress':
self.egress = value
elif name == 'protocol':
self.protocol = value
elif name == 'ruleAction':
self.rule_action = value
elif name == 'ruleNumber':
self.rule_number = value
class NetworkAclAssociation(object):
def __init__(self, connection=None):
self.id = None
self.subnet_id = None
self.network_acl_id = None
def __repr__(self):
return 'NetworkAclAssociation:%s' % self.id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'networkAclAssociationId':
self.id = value
elif name == 'networkAclId':
self.network_acl_id = value
elif name == 'subnetId':
self.subnet_id = value
class PortRange(object):
"""
Define the port range for the ACL entry if it is tcp / udp
"""
def __init__(self, connection=None):
self.from_port = None
self.to_port = None
def __repr__(self):
return 'PortRange:(%s-%s)' % ( self.from_port, self.to_port)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'from':
self.from_port = value
elif name == 'to':
self.to_port = value
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/vpc/networkacl.py
| 0.551936 | 0.170923 |
networkacl.py
|
|
import boto
from datetime import datetime
from boto.resultset import ResultSet
"""
Represents a VPN Connectionn
"""
from boto.ec2.ec2object import TaggedEC2Object
class VpnConnectionOptions(object):
"""
Represents VPN connection options
:ivar static_routes_only: Indicates whether the VPN connection uses static
routes only. Static routes must be used for devices that don't support
BGP.
"""
def __init__(self, static_routes_only=None):
self.static_routes_only = static_routes_only
def __repr__(self):
return 'VpnConnectionOptions'
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'staticRoutesOnly':
self.static_routes_only = True if value == 'true' else False
else:
setattr(self, name, value)
class VpnStaticRoute(object):
"""
Represents a static route for a VPN connection.
:ivar destination_cidr_block: The CIDR block associated with the local
subnet of the customer data center.
:ivar source: Indicates how the routes were provided.
:ivar state: The current state of the static route.
"""
def __init__(self, destination_cidr_block=None, source=None, state=None):
self.destination_cidr_block = destination_cidr_block
self.source = source
self.available = state
def __repr__(self):
return 'VpnStaticRoute: %s' % self.destination_cidr_block
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'destinationCidrBlock':
self.destination_cidr_block = value
elif name == 'source':
self.source = value
elif name == 'state':
self.state = value
else:
setattr(self, name, value)
class VpnTunnel(object):
"""
Represents telemetry for a VPN tunnel
:ivar outside_ip_address: The Internet-routable IP address of the
virtual private gateway's outside interface.
:ivar status: The status of the VPN tunnel. Valid values: UP | DOWN
:ivar last_status_change: The date and time of the last change in status.
:ivar status_message: If an error occurs, a description of the error.
:ivar accepted_route_count: The number of accepted routes.
"""
def __init__(self, outside_ip_address=None, status=None, last_status_change=None,
status_message=None, accepted_route_count=None):
self.outside_ip_address = outside_ip_address
self.status = status
self.last_status_change = last_status_change
self.status_message = status_message
self.accepted_route_count = accepted_route_count
def __repr__(self):
return 'VpnTunnel: %s' % self.outside_ip_address
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'outsideIpAddress':
self.outside_ip_address = value
elif name == 'status':
self.status = value
elif name == 'lastStatusChange':
self.last_status_change = datetime.strptime(value,
'%Y-%m-%dT%H:%M:%S.%fZ')
elif name == 'statusMessage':
self.status_message = value
elif name == 'acceptedRouteCount':
try:
value = int(value)
except ValueError:
boto.log.warning('Error converting code (%s) to int' % value)
self.accepted_route_count = value
else:
setattr(self, name, value)
class VpnConnection(TaggedEC2Object):
"""
Represents a VPN Connection
:ivar id: The ID of the VPN connection.
:ivar state: The current state of the VPN connection.
Valid values: pending | available | deleting | deleted
:ivar customer_gateway_configuration: The configuration information for the
VPN connection's customer gateway (in the native XML format). This
element is always present in the
:class:`boto.vpc.VPCConnection.create_vpn_connection` response;
however, it's present in the
:class:`boto.vpc.VPCConnection.get_all_vpn_connections` response only
if the VPN connection is in the pending or available state.
:ivar type: The type of VPN connection (ipsec.1).
:ivar customer_gateway_id: The ID of the customer gateway at your end of
the VPN connection.
:ivar vpn_gateway_id: The ID of the virtual private gateway
at the AWS side of the VPN connection.
:ivar tunnels: A list of the vpn tunnels (always 2)
:ivar options: The option set describing the VPN connection.
:ivar static_routes: A list of static routes associated with a VPN
connection.
"""
def __init__(self, connection=None):
super(VpnConnection, self).__init__(connection)
self.id = None
self.state = None
self.customer_gateway_configuration = None
self.type = None
self.customer_gateway_id = None
self.vpn_gateway_id = None
self.tunnels = []
self.options = None
self.static_routes = []
def __repr__(self):
return 'VpnConnection:%s' % self.id
def startElement(self, name, attrs, connection):
retval = super(VpnConnection, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'vgwTelemetry':
self.tunnels = ResultSet([('item', VpnTunnel)])
return self.tunnels
elif name == 'routes':
self.static_routes = ResultSet([('item', VpnStaticRoute)])
return self.static_routes
elif name == 'options':
self.options = VpnConnectionOptions()
return self.options
return None
def endElement(self, name, value, connection):
if name == 'vpnConnectionId':
self.id = value
elif name == 'state':
self.state = value
elif name == 'customerGatewayConfiguration':
self.customer_gateway_configuration = value
elif name == 'type':
self.type = value
elif name == 'customerGatewayId':
self.customer_gateway_id = value
elif name == 'vpnGatewayId':
self.vpn_gateway_id = value
else:
setattr(self, name, value)
def delete(self, dry_run=False):
return self.connection.delete_vpn_connection(
self.id,
dry_run=dry_run
)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/vpc/vpnconnection.py
| 0.638046 | 0.204144 |
vpnconnection.py
|
|
from boto.ec2.ec2object import TaggedEC2Object
class VPC(TaggedEC2Object):
def __init__(self, connection=None):
"""
Represents a VPC.
:ivar id: The unique ID of the VPC.
:ivar dhcp_options_id: The ID of the set of DHCP options you've associated with the VPC
(or default if the default options are associated with the VPC).
:ivar state: The current state of the VPC.
:ivar cidr_block: The CIDR block for the VPC.
:ivar is_default: Indicates whether the VPC is the default VPC.
:ivar instance_tenancy: The allowed tenancy of instances launched into the VPC.
:ivar classic_link_enabled: Indicates whether ClassicLink is enabled.
"""
super(VPC, self).__init__(connection)
self.id = None
self.dhcp_options_id = None
self.state = None
self.cidr_block = None
self.is_default = None
self.instance_tenancy = None
self.classic_link_enabled = None
def __repr__(self):
return 'VPC:%s' % self.id
def endElement(self, name, value, connection):
if name == 'vpcId':
self.id = value
elif name == 'dhcpOptionsId':
self.dhcp_options_id = value
elif name == 'state':
self.state = value
elif name == 'cidrBlock':
self.cidr_block = value
elif name == 'isDefault':
self.is_default = True if value == 'true' else False
elif name == 'instanceTenancy':
self.instance_tenancy = value
elif name == 'classicLinkEnabled':
self.classic_link_enabled = value
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_vpc(self.id)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def _get_status_then_update_vpc(self, get_status_method, validate=False,
dry_run=False):
vpc_list = get_status_method(
[self.id],
dry_run=dry_run
)
if len(vpc_list):
updated_vpc = vpc_list[0]
self._update(updated_vpc)
elif validate:
raise ValueError('%s is not a valid VPC ID' % (self.id,))
def update(self, validate=False, dry_run=False):
self._get_status_then_update_vpc(
self.connection.get_all_vpcs,
validate=validate,
dry_run=dry_run
)
return self.state
def update_classic_link_enabled(self, validate=False, dry_run=False):
"""
Updates instance's classic_link_enabled attribute
:rtype: bool
:return: self.classic_link_enabled after update has occurred.
"""
self._get_status_then_update_vpc(
self.connection.get_all_classic_link_vpcs,
validate=validate,
dry_run=dry_run
)
return self.classic_link_enabled
def disable_classic_link(self, dry_run=False):
"""
Disables ClassicLink for a VPC. You cannot disable ClassicLink for a
VPC that has EC2-Classic instances linked to it.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
return self.connection.disable_vpc_classic_link(self.id,
dry_run=dry_run)
def enable_classic_link(self, dry_run=False):
"""
Enables a VPC for ClassicLink. You can then link EC2-Classic instances
to your ClassicLink-enabled VPC to allow communication over private IP
addresses. You cannot enable your VPC for ClassicLink if any of your
VPC's route tables have existing routes for address ranges within the
10.0.0.0/8 IP address range, excluding local routes for VPCs in the
10.0.0.0/16 and 10.1.0.0/16 IP address ranges.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
return self.connection.enable_vpc_classic_link(self.id,
dry_run=dry_run)
def attach_classic_instance(self, instance_id, groups, dry_run=False):
"""
Links an EC2-Classic instance to a ClassicLink-enabled VPC through one
or more of the VPC's security groups. You cannot link an EC2-Classic
instance to more than one VPC at a time. You can only link an instance
that's in the running state. An instance is automatically unlinked from
a VPC when it's stopped. You can link it to the VPC again when you
restart it.
After you've linked an instance, you cannot change the VPC security
groups that are associated with it. To change the security groups, you
must first unlink the instance, and then link it again.
Linking your instance to a VPC is sometimes referred to as attaching
your instance.
:type intance_id: str
:param instance_is: The ID of a ClassicLink-enabled VPC.
:tye groups: list
:param groups: The ID of one or more of the VPC's security groups.
You cannot specify security groups from a different VPC. The
members of the list can be
:class:`boto.ec2.securitygroup.SecurityGroup` objects or
strings of the id's of the security groups.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
return self.connection.attach_classic_link_vpc(
vpc_id=self.id,
instance_id=instance_id,
groups=groups,
dry_run=dry_run
)
def detach_classic_instance(self, instance_id, dry_run=False):
"""
Unlinks a linked EC2-Classic instance from a VPC. After the instance
has been unlinked, the VPC security groups are no longer associated
with it. An instance is automatically unlinked from a VPC when
it's stopped.
:type intance_id: str
:param instance_is: The ID of the VPC to which the instance is linked.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
return self.connection.detach_classic_link_vpc(
vpc_id=self.id,
instance_id=instance_id,
dry_run=dry_run
)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/vpc/vpc.py
| 0.774583 | 0.187914 |
vpc.py
|
|
from boto.ec2.connection import EC2Connection
from boto.resultset import ResultSet
from boto.vpc.vpc import VPC
from boto.vpc.customergateway import CustomerGateway
from boto.vpc.networkacl import NetworkAcl
from boto.vpc.routetable import RouteTable
from boto.vpc.internetgateway import InternetGateway
from boto.vpc.vpngateway import VpnGateway, Attachment
from boto.vpc.dhcpoptions import DhcpOptions
from boto.vpc.subnet import Subnet
from boto.vpc.vpnconnection import VpnConnection
from boto.vpc.vpc_peering_connection import VpcPeeringConnection
from boto.ec2 import RegionData
from boto.regioninfo import RegionInfo, get_regions
from boto.regioninfo import connect
def regions(**kw_params):
"""
Get all available regions for the EC2 service.
You may pass any of the arguments accepted by the VPCConnection
object's constructor as keyword arguments and they will be
passed along to the VPCConnection object.
:rtype: list
:return: A list of :class:`boto.ec2.regioninfo.RegionInfo`
"""
return get_regions('ec2', connection_cls=VPCConnection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.vpc.VPCConnection`.
Any additional parameters after the region_name are passed on to
the connect method of the region object.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.vpc.VPCConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
return connect('ec2', region_name, connection_cls=VPCConnection,
**kw_params)
class VPCConnection(EC2Connection):
# VPC methods
def get_all_vpcs(self, vpc_ids=None, filters=None, dry_run=False):
"""
Retrieve information about your VPCs. You can filter results to
return information only about those VPCs that match your search
parameters. Otherwise, all VPCs associated with your account
are returned.
:type vpc_ids: list
:param vpc_ids: A list of strings with the desired VPC ID's
:type filters: list of tuples or dict
:param filters: A list of tuples or dict containing filters. Each tuple
or dict item consists of a filter key and a filter value.
Possible filter keys are:
* *state* - a list of states of the VPC (pending or available)
* *cidrBlock* - a list CIDR blocks of the VPC
* *dhcpOptionsId* - a list of IDs of a set of DHCP options
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: list
:return: A list of :class:`boto.vpc.vpc.VPC`
"""
params = {}
if vpc_ids:
self.build_list_params(params, vpc_ids, 'VpcId')
if filters:
self.build_filter_params(params, filters)
if dry_run:
params['DryRun'] = 'true'
return self.get_list('DescribeVpcs', params, [('item', VPC)])
def create_vpc(self, cidr_block, instance_tenancy=None, dry_run=False):
"""
Create a new Virtual Private Cloud.
:type cidr_block: str
:param cidr_block: A valid CIDR block
:type instance_tenancy: str
:param instance_tenancy: The supported tenancy options for instances
launched into the VPC. Valid values are 'default' and 'dedicated'.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: The newly created VPC
:return: A :class:`boto.vpc.vpc.VPC` object
"""
params = {'CidrBlock': cidr_block}
if instance_tenancy:
params['InstanceTenancy'] = instance_tenancy
if dry_run:
params['DryRun'] = 'true'
return self.get_object('CreateVpc', params, VPC)
def delete_vpc(self, vpc_id, dry_run=False):
"""
Delete a Virtual Private Cloud.
:type vpc_id: str
:param vpc_id: The ID of the vpc to be deleted.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {'VpcId': vpc_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DeleteVpc', params)
def modify_vpc_attribute(self, vpc_id,
enable_dns_support=None,
enable_dns_hostnames=None, dry_run=False):
"""
Modifies the specified attribute of the specified VPC.
You can only modify one attribute at a time.
:type vpc_id: str
:param vpc_id: The ID of the vpc to be deleted.
:type enable_dns_support: bool
:param enable_dns_support: Specifies whether the DNS server
provided by Amazon is enabled for the VPC.
:type enable_dns_hostnames: bool
:param enable_dns_hostnames: Specifies whether DNS hostnames are
provided for the instances launched in this VPC. You can only
set this attribute to ``true`` if EnableDnsSupport
is also ``true``.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
"""
params = {'VpcId': vpc_id}
if enable_dns_support is not None:
if enable_dns_support:
params['EnableDnsSupport.Value'] = 'true'
else:
params['EnableDnsSupport.Value'] = 'false'
if enable_dns_hostnames is not None:
if enable_dns_hostnames:
params['EnableDnsHostnames.Value'] = 'true'
else:
params['EnableDnsHostnames.Value'] = 'false'
if dry_run:
params['DryRun'] = 'true'
return self.get_status('ModifyVpcAttribute', params)
# Route Tables
def get_all_route_tables(self, route_table_ids=None, filters=None,
dry_run=False):
"""
Retrieve information about your routing tables. You can filter results
to return information only about those route tables that match your
search parameters. Otherwise, all route tables associated with your
account are returned.
:type route_table_ids: list
:param route_table_ids: A list of strings with the desired route table
IDs.
:type filters: list of tuples or dict
:param filters: A list of tuples or dict containing filters. Each tuple
or dict item consists of a filter key and a filter value.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: list
:return: A list of :class:`boto.vpc.routetable.RouteTable`
"""
params = {}
if route_table_ids:
self.build_list_params(params, route_table_ids, "RouteTableId")
if filters:
self.build_filter_params(params, filters)
if dry_run:
params['DryRun'] = 'true'
return self.get_list('DescribeRouteTables', params,
[('item', RouteTable)])
def associate_route_table(self, route_table_id, subnet_id, dry_run=False):
"""
Associates a route table with a specific subnet.
:type route_table_id: str
:param route_table_id: The ID of the route table to associate.
:type subnet_id: str
:param subnet_id: The ID of the subnet to associate with.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: str
:return: The ID of the association created
"""
params = {
'RouteTableId': route_table_id,
'SubnetId': subnet_id
}
if dry_run:
params['DryRun'] = 'true'
result = self.get_object('AssociateRouteTable', params, ResultSet)
return result.associationId
def disassociate_route_table(self, association_id, dry_run=False):
"""
Removes an association from a route table. This will cause all subnets
that would've used this association to now use the main routing
association instead.
:type association_id: str
:param association_id: The ID of the association to disassociate.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {'AssociationId': association_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DisassociateRouteTable', params)
def create_route_table(self, vpc_id, dry_run=False):
"""
Creates a new route table.
:type vpc_id: str
:param vpc_id: The VPC ID to associate this route table with.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: The newly created route table
:return: A :class:`boto.vpc.routetable.RouteTable` object
"""
params = {'VpcId': vpc_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_object('CreateRouteTable', params, RouteTable)
def delete_route_table(self, route_table_id, dry_run=False):
"""
Delete a route table.
:type route_table_id: str
:param route_table_id: The ID of the route table to delete.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {'RouteTableId': route_table_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DeleteRouteTable', params)
def _replace_route_table_association(self, association_id,
route_table_id, dry_run=False):
"""
Helper function for replace_route_table_association and
replace_route_table_association_with_assoc. Should not be used directly.
:type association_id: str
:param association_id: The ID of the existing association to replace.
:type route_table_id: str
:param route_table_id: The route table to ID to be used in the
association.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: ResultSet
:return: ResultSet of Amazon resposne
"""
params = {
'AssociationId': association_id,
'RouteTableId': route_table_id
}
if dry_run:
params['DryRun'] = 'true'
return self.get_object('ReplaceRouteTableAssociation', params,
ResultSet)
def replace_route_table_assocation(self, association_id,
route_table_id, dry_run=False):
"""
Replaces a route association with a new route table. This can be
used to replace the 'main' route table by using the main route
table association instead of the more common subnet type
association.
NOTE: It may be better to use replace_route_table_association_with_assoc
instead of this function; this function does not return the new
association ID. This function is retained for backwards compatibility.
:type association_id: str
:param association_id: The ID of the existing association to replace.
:type route_table_id: str
:param route_table_id: The route table to ID to be used in the
association.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
return self._replace_route_table_association(
association_id, route_table_id, dry_run=dry_run).status
def replace_route_table_association_with_assoc(self, association_id,
route_table_id,
dry_run=False):
"""
Replaces a route association with a new route table. This can be
used to replace the 'main' route table by using the main route
table association instead of the more common subnet type
association. Returns the new association ID.
:type association_id: str
:param association_id: The ID of the existing association to replace.
:type route_table_id: str
:param route_table_id: The route table to ID to be used in the
association.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: str
:return: New association ID
"""
return self._replace_route_table_association(
association_id, route_table_id, dry_run=dry_run).newAssociationId
def create_route(self, route_table_id, destination_cidr_block,
gateway_id=None, instance_id=None, interface_id=None,
vpc_peering_connection_id=None,
dry_run=False):
"""
Creates a new route in the route table within a VPC. The route's target
can be either a gateway attached to the VPC or a NAT instance in the
VPC.
:type route_table_id: str
:param route_table_id: The ID of the route table for the route.
:type destination_cidr_block: str
:param destination_cidr_block: The CIDR address block used for the
destination match.
:type gateway_id: str
:param gateway_id: The ID of the gateway attached to your VPC.
:type instance_id: str
:param instance_id: The ID of a NAT instance in your VPC.
:type interface_id: str
:param interface_id: Allows routing to network interface attachments.
:type vpc_peering_connection_id: str
:param vpc_peering_connection_id: Allows routing to VPC peering
connection.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {
'RouteTableId': route_table_id,
'DestinationCidrBlock': destination_cidr_block
}
if gateway_id is not None:
params['GatewayId'] = gateway_id
elif instance_id is not None:
params['InstanceId'] = instance_id
elif interface_id is not None:
params['NetworkInterfaceId'] = interface_id
elif vpc_peering_connection_id is not None:
params['VpcPeeringConnectionId'] = vpc_peering_connection_id
if dry_run:
params['DryRun'] = 'true'
return self.get_status('CreateRoute', params)
def replace_route(self, route_table_id, destination_cidr_block,
gateway_id=None, instance_id=None, interface_id=None,
vpc_peering_connection_id=None,
dry_run=False):
"""
Replaces an existing route within a route table in a VPC.
:type route_table_id: str
:param route_table_id: The ID of the route table for the route.
:type destination_cidr_block: str
:param destination_cidr_block: The CIDR address block used for the
destination match.
:type gateway_id: str
:param gateway_id: The ID of the gateway attached to your VPC.
:type instance_id: str
:param instance_id: The ID of a NAT instance in your VPC.
:type interface_id: str
:param interface_id: Allows routing to network interface attachments.
:type vpc_peering_connection_id: str
:param vpc_peering_connection_id: Allows routing to VPC peering
connection.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {
'RouteTableId': route_table_id,
'DestinationCidrBlock': destination_cidr_block
}
if gateway_id is not None:
params['GatewayId'] = gateway_id
elif instance_id is not None:
params['InstanceId'] = instance_id
elif interface_id is not None:
params['NetworkInterfaceId'] = interface_id
elif vpc_peering_connection_id is not None:
params['VpcPeeringConnectionId'] = vpc_peering_connection_id
if dry_run:
params['DryRun'] = 'true'
return self.get_status('ReplaceRoute', params)
def delete_route(self, route_table_id, destination_cidr_block,
dry_run=False):
"""
Deletes a route from a route table within a VPC.
:type route_table_id: str
:param route_table_id: The ID of the route table with the route.
:type destination_cidr_block: str
:param destination_cidr_block: The CIDR address block used for
destination match.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {
'RouteTableId': route_table_id,
'DestinationCidrBlock': destination_cidr_block
}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DeleteRoute', params)
#Network ACLs
def get_all_network_acls(self, network_acl_ids=None, filters=None):
"""
Retrieve information about your network acls. You can filter results
to return information only about those network acls that match your
search parameters. Otherwise, all network acls associated with your
account are returned.
:type network_acl_ids: list
:param network_acl_ids: A list of strings with the desired network ACL
IDs.
:type filters: list of tuples or dict
:param filters: A list of tuples or dict containing filters. Each tuple
or dict item consists of a filter key and a filter value.
:rtype: list
:return: A list of :class:`boto.vpc.networkacl.NetworkAcl`
"""
params = {}
if network_acl_ids:
self.build_list_params(params, network_acl_ids, "NetworkAclId")
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeNetworkAcls', params,
[('item', NetworkAcl)])
def associate_network_acl(self, network_acl_id, subnet_id):
"""
Associates a network acl with a specific subnet.
:type network_acl_id: str
:param network_acl_id: The ID of the network ACL to associate.
:type subnet_id: str
:param subnet_id: The ID of the subnet to associate with.
:rtype: str
:return: The ID of the association created
"""
acl = self.get_all_network_acls(filters=[('association.subnet-id', subnet_id)])[0]
association = [ association for association in acl.associations if association.subnet_id == subnet_id ][0]
params = {
'AssociationId': association.id,
'NetworkAclId': network_acl_id
}
result = self.get_object('ReplaceNetworkAclAssociation', params, ResultSet)
return result.newAssociationId
def disassociate_network_acl(self, subnet_id, vpc_id=None):
"""
Figures out what the default ACL is for the VPC, and associates
current network ACL with the default.
:type subnet_id: str
:param subnet_id: The ID of the subnet to which the ACL belongs.
:type vpc_id: str
:param vpc_id: The ID of the VPC to which the ACL/subnet belongs. Queries EC2 if omitted.
:rtype: str
:return: The ID of the association created
"""
if not vpc_id:
vpc_id = self.get_all_subnets([subnet_id])[0].vpc_id
acls = self.get_all_network_acls(filters=[('vpc-id', vpc_id), ('default', 'true')])
default_acl_id = acls[0].id
return self.associate_network_acl(default_acl_id, subnet_id)
def create_network_acl(self, vpc_id):
"""
Creates a new network ACL.
:type vpc_id: str
:param vpc_id: The VPC ID to associate this network ACL with.
:rtype: The newly created network ACL
:return: A :class:`boto.vpc.networkacl.NetworkAcl` object
"""
params = {'VpcId': vpc_id}
return self.get_object('CreateNetworkAcl', params, NetworkAcl)
def delete_network_acl(self, network_acl_id):
"""
Delete a network ACL
:type network_acl_id: str
:param network_acl_id: The ID of the network_acl to delete.
:rtype: bool
:return: True if successful
"""
params = {'NetworkAclId': network_acl_id}
return self.get_status('DeleteNetworkAcl', params)
def create_network_acl_entry(self, network_acl_id, rule_number, protocol, rule_action,
cidr_block, egress=None, icmp_code=None, icmp_type=None,
port_range_from=None, port_range_to=None):
"""
Creates a new network ACL entry in a network ACL within a VPC.
:type network_acl_id: str
:param network_acl_id: The ID of the network ACL for this network ACL entry.
:type rule_number: int
:param rule_number: The rule number to assign to the entry (for example, 100).
:type protocol: int
:param protocol: Valid values: -1 or a protocol number
(http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)
:type rule_action: str
:param rule_action: Indicates whether to allow or deny traffic that matches the rule.
:type cidr_block: str
:param cidr_block: The CIDR range to allow or deny, in CIDR notation (for example,
172.16.0.0/24).
:type egress: bool
:param egress: Indicates whether this rule applies to egress traffic from the subnet (true)
or ingress traffic to the subnet (false).
:type icmp_type: int
:param icmp_type: For the ICMP protocol, the ICMP type. You can use -1 to specify
all ICMP types.
:type icmp_code: int
:param icmp_code: For the ICMP protocol, the ICMP code. You can use -1 to specify
all ICMP codes for the given ICMP type.
:type port_range_from: int
:param port_range_from: The first port in the range.
:type port_range_to: int
:param port_range_to: The last port in the range.
:rtype: bool
:return: True if successful
"""
params = {
'NetworkAclId': network_acl_id,
'RuleNumber': rule_number,
'Protocol': protocol,
'RuleAction': rule_action,
'CidrBlock': cidr_block
}
if egress is not None:
if isinstance(egress, bool):
egress = str(egress).lower()
params['Egress'] = egress
if icmp_code is not None:
params['Icmp.Code'] = icmp_code
if icmp_type is not None:
params['Icmp.Type'] = icmp_type
if port_range_from is not None:
params['PortRange.From'] = port_range_from
if port_range_to is not None:
params['PortRange.To'] = port_range_to
return self.get_status('CreateNetworkAclEntry', params)
def replace_network_acl_entry(self, network_acl_id, rule_number, protocol, rule_action,
cidr_block, egress=None, icmp_code=None, icmp_type=None,
port_range_from=None, port_range_to=None):
"""
Creates a new network ACL entry in a network ACL within a VPC.
:type network_acl_id: str
:param network_acl_id: The ID of the network ACL for the id you want to replace
:type rule_number: int
:param rule_number: The rule number that you want to replace(for example, 100).
:type protocol: int
:param protocol: Valid values: -1 or a protocol number
(http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)
:type rule_action: str
:param rule_action: Indicates whether to allow or deny traffic that matches the rule.
:type cidr_block: str
:param cidr_block: The CIDR range to allow or deny, in CIDR notation (for example,
172.16.0.0/24).
:type egress: bool
:param egress: Indicates whether this rule applies to egress traffic from the subnet (true)
or ingress traffic to the subnet (false).
:type icmp_type: int
:param icmp_type: For the ICMP protocol, the ICMP type. You can use -1 to specify
all ICMP types.
:type icmp_code: int
:param icmp_code: For the ICMP protocol, the ICMP code. You can use -1 to specify
all ICMP codes for the given ICMP type.
:type port_range_from: int
:param port_range_from: The first port in the range.
:type port_range_to: int
:param port_range_to: The last port in the range.
:rtype: bool
:return: True if successful
"""
params = {
'NetworkAclId': network_acl_id,
'RuleNumber': rule_number,
'Protocol': protocol,
'RuleAction': rule_action,
'CidrBlock': cidr_block
}
if egress is not None:
if isinstance(egress, bool):
egress = str(egress).lower()
params['Egress'] = egress
if icmp_code is not None:
params['Icmp.Code'] = icmp_code
if icmp_type is not None:
params['Icmp.Type'] = icmp_type
if port_range_from is not None:
params['PortRange.From'] = port_range_from
if port_range_to is not None:
params['PortRange.To'] = port_range_to
return self.get_status('ReplaceNetworkAclEntry', params)
def delete_network_acl_entry(self, network_acl_id, rule_number, egress=None):
"""
Deletes a network ACL entry from a network ACL within a VPC.
:type network_acl_id: str
:param network_acl_id: The ID of the network ACL with the network ACL entry.
:type rule_number: int
:param rule_number: The rule number for the entry to delete.
:type egress: bool
:param egress: Specifies whether the rule to delete is an egress rule (true)
or ingress rule (false).
:rtype: bool
:return: True if successful
"""
params = {
'NetworkAclId': network_acl_id,
'RuleNumber': rule_number
}
if egress is not None:
if isinstance(egress, bool):
egress = str(egress).lower()
params['Egress'] = egress
return self.get_status('DeleteNetworkAclEntry', params)
# Internet Gateways
def get_all_internet_gateways(self, internet_gateway_ids=None,
filters=None, dry_run=False):
"""
Get a list of internet gateways. You can filter results to return information
about only those gateways that you're interested in.
:type internet_gateway_ids: list
:param internet_gateway_ids: A list of strings with the desired gateway IDs.
:type filters: list of tuples or dict
:param filters: A list of tuples or dict containing filters. Each tuple
or dict item consists of a filter key and a filter value.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
"""
params = {}
if internet_gateway_ids:
self.build_list_params(params, internet_gateway_ids,
'InternetGatewayId')
if filters:
self.build_filter_params(params, filters)
if dry_run:
params['DryRun'] = 'true'
return self.get_list('DescribeInternetGateways', params,
[('item', InternetGateway)])
def create_internet_gateway(self, dry_run=False):
"""
Creates an internet gateway for VPC.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: Newly created internet gateway.
:return: `boto.vpc.internetgateway.InternetGateway`
"""
params = {}
if dry_run:
params['DryRun'] = 'true'
return self.get_object('CreateInternetGateway', params, InternetGateway)
def delete_internet_gateway(self, internet_gateway_id, dry_run=False):
"""
Deletes an internet gateway from the VPC.
:type internet_gateway_id: str
:param internet_gateway_id: The ID of the internet gateway to delete.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: Bool
:return: True if successful
"""
params = {'InternetGatewayId': internet_gateway_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DeleteInternetGateway', params)
def attach_internet_gateway(self, internet_gateway_id, vpc_id,
dry_run=False):
"""
Attach an internet gateway to a specific VPC.
:type internet_gateway_id: str
:param internet_gateway_id: The ID of the internet gateway to attach.
:type vpc_id: str
:param vpc_id: The ID of the VPC to attach to.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: Bool
:return: True if successful
"""
params = {
'InternetGatewayId': internet_gateway_id,
'VpcId': vpc_id
}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('AttachInternetGateway', params)
def detach_internet_gateway(self, internet_gateway_id, vpc_id,
dry_run=False):
"""
Detach an internet gateway from a specific VPC.
:type internet_gateway_id: str
:param internet_gateway_id: The ID of the internet gateway to detach.
:type vpc_id: str
:param vpc_id: The ID of the VPC to attach to.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: Bool
:return: True if successful
"""
params = {
'InternetGatewayId': internet_gateway_id,
'VpcId': vpc_id
}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DetachInternetGateway', params)
# Customer Gateways
def get_all_customer_gateways(self, customer_gateway_ids=None,
filters=None, dry_run=False):
"""
Retrieve information about your CustomerGateways. You can filter
results to return information only about those CustomerGateways that
match your search parameters. Otherwise, all CustomerGateways
associated with your account are returned.
:type customer_gateway_ids: list
:param customer_gateway_ids: A list of strings with the desired
CustomerGateway ID's.
:type filters: list of tuples or dict
:param filters: A list of tuples or dict containing filters. Each tuple
or dict item consists of a filter key and a filter value.
Possible filter keys are:
- *state*, the state of the CustomerGateway
(pending,available,deleting,deleted)
- *type*, the type of customer gateway (ipsec.1)
- *ipAddress* the IP address of customer gateway's
internet-routable external inteface
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: list
:return: A list of :class:`boto.vpc.customergateway.CustomerGateway`
"""
params = {}
if customer_gateway_ids:
self.build_list_params(params, customer_gateway_ids,
'CustomerGatewayId')
if filters:
self.build_filter_params(params, filters)
if dry_run:
params['DryRun'] = 'true'
return self.get_list('DescribeCustomerGateways', params,
[('item', CustomerGateway)])
def create_customer_gateway(self, type, ip_address, bgp_asn, dry_run=False):
"""
Create a new Customer Gateway
:type type: str
:param type: Type of VPN Connection. Only valid value currently is 'ipsec.1'
:type ip_address: str
:param ip_address: Internet-routable IP address for customer's gateway.
Must be a static address.
:type bgp_asn: int
:param bgp_asn: Customer gateway's Border Gateway Protocol (BGP)
Autonomous System Number (ASN)
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: The newly created CustomerGateway
:return: A :class:`boto.vpc.customergateway.CustomerGateway` object
"""
params = {'Type': type,
'IpAddress': ip_address,
'BgpAsn': bgp_asn}
if dry_run:
params['DryRun'] = 'true'
return self.get_object('CreateCustomerGateway', params, CustomerGateway)
def delete_customer_gateway(self, customer_gateway_id, dry_run=False):
"""
Delete a Customer Gateway.
:type customer_gateway_id: str
:param customer_gateway_id: The ID of the customer_gateway to be deleted.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {'CustomerGatewayId': customer_gateway_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DeleteCustomerGateway', params)
# VPN Gateways
def get_all_vpn_gateways(self, vpn_gateway_ids=None, filters=None,
dry_run=False):
"""
Retrieve information about your VpnGateways. You can filter results to
return information only about those VpnGateways that match your search
parameters. Otherwise, all VpnGateways associated with your account
are returned.
:type vpn_gateway_ids: list
:param vpn_gateway_ids: A list of strings with the desired VpnGateway ID's
:type filters: list of tuples or dict
:param filters: A list of tuples or dict containing filters. Each tuple
or dict item consists of a filter key and a filter value.
Possible filter keys are:
- *state*, a list of states of the VpnGateway
(pending,available,deleting,deleted)
- *type*, a list types of customer gateway (ipsec.1)
- *availabilityZone*, a list of Availability zones the
VPN gateway is in.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: list
:return: A list of :class:`boto.vpc.customergateway.VpnGateway`
"""
params = {}
if vpn_gateway_ids:
self.build_list_params(params, vpn_gateway_ids, 'VpnGatewayId')
if filters:
self.build_filter_params(params, filters)
if dry_run:
params['DryRun'] = 'true'
return self.get_list('DescribeVpnGateways', params,
[('item', VpnGateway)])
def create_vpn_gateway(self, type, availability_zone=None, dry_run=False):
"""
Create a new Vpn Gateway
:type type: str
:param type: Type of VPN Connection. Only valid value currently is 'ipsec.1'
:type availability_zone: str
:param availability_zone: The Availability Zone where you want the VPN gateway.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: The newly created VpnGateway
:return: A :class:`boto.vpc.vpngateway.VpnGateway` object
"""
params = {'Type': type}
if availability_zone:
params['AvailabilityZone'] = availability_zone
if dry_run:
params['DryRun'] = 'true'
return self.get_object('CreateVpnGateway', params, VpnGateway)
def delete_vpn_gateway(self, vpn_gateway_id, dry_run=False):
"""
Delete a Vpn Gateway.
:type vpn_gateway_id: str
:param vpn_gateway_id: The ID of the vpn_gateway to be deleted.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {'VpnGatewayId': vpn_gateway_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DeleteVpnGateway', params)
def attach_vpn_gateway(self, vpn_gateway_id, vpc_id, dry_run=False):
"""
Attaches a VPN gateway to a VPC.
:type vpn_gateway_id: str
:param vpn_gateway_id: The ID of the vpn_gateway to attach
:type vpc_id: str
:param vpc_id: The ID of the VPC you want to attach the gateway to.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: An attachment
:return: a :class:`boto.vpc.vpngateway.Attachment`
"""
params = {'VpnGatewayId': vpn_gateway_id,
'VpcId': vpc_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_object('AttachVpnGateway', params, Attachment)
def detach_vpn_gateway(self, vpn_gateway_id, vpc_id, dry_run=False):
"""
Detaches a VPN gateway from a VPC.
:type vpn_gateway_id: str
:param vpn_gateway_id: The ID of the vpn_gateway to detach
:type vpc_id: str
:param vpc_id: The ID of the VPC you want to detach the gateway from.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {'VpnGatewayId': vpn_gateway_id,
'VpcId': vpc_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DetachVpnGateway', params)
# Subnets
def get_all_subnets(self, subnet_ids=None, filters=None, dry_run=False):
"""
Retrieve information about your Subnets. You can filter results to
return information only about those Subnets that match your search
parameters. Otherwise, all Subnets associated with your account
are returned.
:type subnet_ids: list
:param subnet_ids: A list of strings with the desired Subnet ID's
:type filters: list of tuples or dict
:param filters: A list of tuples or dict containing filters. Each tuple
or dict item consists of a filter key and a filter value.
Possible filter keys are:
- *state*, a list of states of the Subnet
(pending,available)
- *vpcId*, a list of IDs of the VPC that the subnet is in.
- *cidrBlock*, a list of CIDR blocks of the subnet
- *availabilityZone*, list of the Availability Zones
the subnet is in.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: list
:return: A list of :class:`boto.vpc.subnet.Subnet`
"""
params = {}
if subnet_ids:
self.build_list_params(params, subnet_ids, 'SubnetId')
if filters:
self.build_filter_params(params, filters)
if dry_run:
params['DryRun'] = 'true'
return self.get_list('DescribeSubnets', params, [('item', Subnet)])
def create_subnet(self, vpc_id, cidr_block, availability_zone=None,
dry_run=False):
"""
Create a new Subnet
:type vpc_id: str
:param vpc_id: The ID of the VPC where you want to create the subnet.
:type cidr_block: str
:param cidr_block: The CIDR block you want the subnet to cover.
:type availability_zone: str
:param availability_zone: The AZ you want the subnet in
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: The newly created Subnet
:return: A :class:`boto.vpc.customergateway.Subnet` object
"""
params = {'VpcId': vpc_id,
'CidrBlock': cidr_block}
if availability_zone:
params['AvailabilityZone'] = availability_zone
if dry_run:
params['DryRun'] = 'true'
return self.get_object('CreateSubnet', params, Subnet)
def delete_subnet(self, subnet_id, dry_run=False):
"""
Delete a subnet.
:type subnet_id: str
:param subnet_id: The ID of the subnet to be deleted.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {'SubnetId': subnet_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DeleteSubnet', params)
# DHCP Options
def get_all_dhcp_options(self, dhcp_options_ids=None, filters=None, dry_run=False):
"""
Retrieve information about your DhcpOptions.
:type dhcp_options_ids: list
:param dhcp_options_ids: A list of strings with the desired DhcpOption ID's
:type filters: list of tuples or dict
:param filters: A list of tuples or dict containing filters. Each tuple
or dict item consists of a filter key and a filter value.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: list
:return: A list of :class:`boto.vpc.dhcpoptions.DhcpOptions`
"""
params = {}
if dhcp_options_ids:
self.build_list_params(params, dhcp_options_ids, 'DhcpOptionsId')
if filters:
self.build_filter_params(params, filters)
if dry_run:
params['DryRun'] = 'true'
return self.get_list('DescribeDhcpOptions', params,
[('item', DhcpOptions)])
def create_dhcp_options(self, domain_name=None, domain_name_servers=None,
ntp_servers=None, netbios_name_servers=None,
netbios_node_type=None, dry_run=False):
"""
Create a new DhcpOption
This corresponds to
http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CreateDhcpOptions.html
:type domain_name: str
:param domain_name: A domain name of your choice (for example,
example.com)
:type domain_name_servers: list of strings
:param domain_name_servers: The IP address of a domain name server. You
can specify up to four addresses.
:type ntp_servers: list of strings
:param ntp_servers: The IP address of a Network Time Protocol (NTP)
server. You can specify up to four addresses.
:type netbios_name_servers: list of strings
:param netbios_name_servers: The IP address of a NetBIOS name server.
You can specify up to four addresses.
:type netbios_node_type: str
:param netbios_node_type: The NetBIOS node type (1, 2, 4, or 8). For
more information about the values, see RFC 2132. We recommend you
only use 2 at this time (broadcast and multicast are currently not
supported).
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: The newly created DhcpOption
:return: A :class:`boto.vpc.customergateway.DhcpOption` object
"""
key_counter = 1
params = {}
def insert_option(params, name, value):
params['DhcpConfiguration.%d.Key' % (key_counter,)] = name
if isinstance(value, (list, tuple)):
for idx, value in enumerate(value, 1):
key_name = 'DhcpConfiguration.%d.Value.%d' % (
key_counter, idx)
params[key_name] = value
else:
key_name = 'DhcpConfiguration.%d.Value.1' % (key_counter,)
params[key_name] = value
return key_counter + 1
if domain_name:
key_counter = insert_option(params,
'domain-name', domain_name)
if domain_name_servers:
key_counter = insert_option(params,
'domain-name-servers', domain_name_servers)
if ntp_servers:
key_counter = insert_option(params,
'ntp-servers', ntp_servers)
if netbios_name_servers:
key_counter = insert_option(params,
'netbios-name-servers', netbios_name_servers)
if netbios_node_type:
key_counter = insert_option(params,
'netbios-node-type', netbios_node_type)
if dry_run:
params['DryRun'] = 'true'
return self.get_object('CreateDhcpOptions', params, DhcpOptions)
def delete_dhcp_options(self, dhcp_options_id, dry_run=False):
"""
Delete a DHCP Options
:type dhcp_options_id: str
:param dhcp_options_id: The ID of the DHCP Options to be deleted.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {'DhcpOptionsId': dhcp_options_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DeleteDhcpOptions', params)
def associate_dhcp_options(self, dhcp_options_id, vpc_id, dry_run=False):
"""
Associate a set of Dhcp Options with a VPC.
:type dhcp_options_id: str
:param dhcp_options_id: The ID of the Dhcp Options
:type vpc_id: str
:param vpc_id: The ID of the VPC.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {'DhcpOptionsId': dhcp_options_id,
'VpcId': vpc_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('AssociateDhcpOptions', params)
# VPN Connection
def get_all_vpn_connections(self, vpn_connection_ids=None, filters=None,
dry_run=False):
"""
Retrieve information about your VPN_CONNECTIONs. You can filter results to
return information only about those VPN_CONNECTIONs that match your search
parameters. Otherwise, all VPN_CONNECTIONs associated with your account
are returned.
:type vpn_connection_ids: list
:param vpn_connection_ids: A list of strings with the desired VPN_CONNECTION ID's
:type filters: list of tuples or dict
:param filters: A list of tuples or dict containing filters. Each tuple
or dict item consists of a filter key and a filter value.
Possible filter keys are:
- *state*, a list of states of the VPN_CONNECTION
pending,available,deleting,deleted
- *type*, a list of types of connection, currently 'ipsec.1'
- *customerGatewayId*, a list of IDs of the customer gateway
associated with the VPN
- *vpnGatewayId*, a list of IDs of the VPN gateway associated
with the VPN connection
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: list
:return: A list of :class:`boto.vpn_connection.vpnconnection.VpnConnection`
"""
params = {}
if vpn_connection_ids:
self.build_list_params(params, vpn_connection_ids,
'VpnConnectionId')
if filters:
self.build_filter_params(params, filters)
if dry_run:
params['DryRun'] = 'true'
return self.get_list('DescribeVpnConnections', params,
[('item', VpnConnection)])
def create_vpn_connection(self, type, customer_gateway_id, vpn_gateway_id,
static_routes_only=None, dry_run=False):
"""
Create a new VPN Connection.
:type type: str
:param type: The type of VPN Connection. Currently only 'ipsec.1'
is supported
:type customer_gateway_id: str
:param customer_gateway_id: The ID of the customer gateway.
:type vpn_gateway_id: str
:param vpn_gateway_id: The ID of the VPN gateway.
:type static_routes_only: bool
:param static_routes_only: Indicates whether the VPN connection
requires static routes. If you are creating a VPN connection
for a device that does not support BGP, you must specify true.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: The newly created VpnConnection
:return: A :class:`boto.vpc.vpnconnection.VpnConnection` object
"""
params = {'Type': type,
'CustomerGatewayId': customer_gateway_id,
'VpnGatewayId': vpn_gateway_id}
if static_routes_only is not None:
if isinstance(static_routes_only, bool):
static_routes_only = str(static_routes_only).lower()
params['Options.StaticRoutesOnly'] = static_routes_only
if dry_run:
params['DryRun'] = 'true'
return self.get_object('CreateVpnConnection', params, VpnConnection)
def delete_vpn_connection(self, vpn_connection_id, dry_run=False):
"""
Delete a VPN Connection.
:type vpn_connection_id: str
:param vpn_connection_id: The ID of the vpn_connection to be deleted.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {'VpnConnectionId': vpn_connection_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DeleteVpnConnection', params)
def disable_vgw_route_propagation(self, route_table_id, gateway_id,
dry_run=False):
"""
Disables a virtual private gateway (VGW) from propagating routes to the
routing tables of an Amazon VPC.
:type route_table_id: str
:param route_table_id: The ID of the routing table.
:type gateway_id: str
:param gateway_id: The ID of the virtual private gateway.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {
'RouteTableId': route_table_id,
'GatewayId': gateway_id,
}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DisableVgwRoutePropagation', params)
def enable_vgw_route_propagation(self, route_table_id, gateway_id,
dry_run=False):
"""
Enables a virtual private gateway (VGW) to propagate routes to the
routing tables of an Amazon VPC.
:type route_table_id: str
:param route_table_id: The ID of the routing table.
:type gateway_id: str
:param gateway_id: The ID of the virtual private gateway.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {
'RouteTableId': route_table_id,
'GatewayId': gateway_id,
}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('EnableVgwRoutePropagation', params)
def create_vpn_connection_route(self, destination_cidr_block,
vpn_connection_id, dry_run=False):
"""
Creates a new static route associated with a VPN connection between an
existing virtual private gateway and a VPN customer gateway. The static
route allows traffic to be routed from the virtual private gateway to
the VPN customer gateway.
:type destination_cidr_block: str
:param destination_cidr_block: The CIDR block associated with the local
subnet of the customer data center.
:type vpn_connection_id: str
:param vpn_connection_id: The ID of the VPN connection.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {
'DestinationCidrBlock': destination_cidr_block,
'VpnConnectionId': vpn_connection_id,
}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('CreateVpnConnectionRoute', params)
def delete_vpn_connection_route(self, destination_cidr_block,
vpn_connection_id, dry_run=False):
"""
Deletes a static route associated with a VPN connection between an
existing virtual private gateway and a VPN customer gateway. The static
route allows traffic to be routed from the virtual private gateway to
the VPN customer gateway.
:type destination_cidr_block: str
:param destination_cidr_block: The CIDR block associated with the local
subnet of the customer data center.
:type vpn_connection_id: str
:param vpn_connection_id: The ID of the VPN connection.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {
'DestinationCidrBlock': destination_cidr_block,
'VpnConnectionId': vpn_connection_id,
}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DeleteVpnConnectionRoute', params)
def get_all_vpc_peering_connections(self, vpc_peering_connection_ids=None,
filters=None, dry_run=False):
"""
Retrieve information about your VPC peering connections. You
can filter results to return information only about those VPC
peering connections that match your search parameters.
Otherwise, all VPC peering connections associated with your
account are returned.
:type vpc_peering_connection_ids: list
:param vpc_peering_connection_ids: A list of strings with the desired VPC
peering connection ID's
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
Possible filter keys are:
* *accepter-vpc-info.cidr-block* - The CIDR block of the peer VPC.
* *accepter-vpc-info.owner-id* - The AWS account ID of the owner
of the peer VPC.
* *accepter-vpc-info.vpc-id* - The ID of the peer VPC.
* *expiration-time* - The expiration date and time for the VPC
peering connection.
* *requester-vpc-info.cidr-block* - The CIDR block of the
requester's VPC.
* *requester-vpc-info.owner-id* - The AWS account ID of the
owner of the requester VPC.
* *requester-vpc-info.vpc-id* - The ID of the requester VPC.
* *status-code* - The status of the VPC peering connection.
* *status-message* - A message that provides more information
about the status of the VPC peering connection, if applicable.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: list
:return: A list of :class:`boto.vpc.vpc.VPC`
"""
params = {}
if vpc_peering_connection_ids:
self.build_list_params(params, vpc_peering_connection_ids, 'VpcPeeringConnectionId')
if filters:
self.build_filter_params(params, dict(filters))
if dry_run:
params['DryRun'] = 'true'
return self.get_list('DescribeVpcPeeringConnections', params, [('item', VpcPeeringConnection)])
def create_vpc_peering_connection(self, vpc_id, peer_vpc_id,
peer_owner_id=None, dry_run=False):
"""
Create a new VPN Peering connection.
:type vpc_id: str
:param vpc_id: The ID of the requester VPC.
:type peer_vpc_id: str
:param vpc_peer_id: The ID of the VPC with which you are creating the peering connection.
:type peer_owner_id: str
:param peer_owner_id: The AWS account ID of the owner of the peer VPC.
:rtype: The newly created VpcPeeringConnection
:return: A :class:`boto.vpc.vpc_peering_connection.VpcPeeringConnection` object
"""
params = {'VpcId': vpc_id,
'PeerVpcId': peer_vpc_id }
if peer_owner_id is not None:
params['PeerOwnerId'] = peer_owner_id
if dry_run:
params['DryRun'] = 'true'
return self.get_object('CreateVpcPeeringConnection', params,
VpcPeeringConnection)
def delete_vpc_peering_connection(self, vpc_peering_connection_id, dry_run=False):
"""
Deletes a VPC peering connection. Either the owner of the requester
VPC or the owner of the peer VPC can delete the VPC peering connection
if it's in the active state. The owner of the requester VPC can delete
a VPC peering connection in the pending-acceptance state.
:type vpc_peering_connection_id: str
:param vpc_peering_connection_id: The ID of the VPC peering connection.
:rtype: bool
:return: True if successful
"""
params = {
'VpcPeeringConnectionId': vpc_peering_connection_id
}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DeleteVpcPeeringConnection', params)
def reject_vpc_peering_connection(self, vpc_peering_connection_id, dry_run=False):
"""
Rejects a VPC peering connection request. The VPC peering connection
must be in the pending-acceptance state.
:type vpc_peering_connection_id: str
:param vpc_peering_connection_id: The ID of the VPC peering connection.
:rtype: bool
:return: True if successful
"""
params = {
'VpcPeeringConnectionId': vpc_peering_connection_id
}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('RejectVpcPeeringConnection', params)
def accept_vpc_peering_connection(self, vpc_peering_connection_id, dry_run=False):
"""
Acceptss a VPC peering connection request. The VPC peering connection
must be in the pending-acceptance state.
:type vpc_peering_connection_id: str
:param vpc_peering_connection_id: The ID of the VPC peering connection.
:rtype: Accepted VpcPeeringConnection
:return: A :class:`boto.vpc.vpc_peering_connection.VpcPeeringConnection` object
"""
params = {
'VpcPeeringConnectionId': vpc_peering_connection_id
}
if dry_run:
params['DryRun'] = 'true'
return self.get_object('AcceptVpcPeeringConnection', params,
VpcPeeringConnection)
def get_all_classic_link_vpcs(self, vpc_ids=None, filters=None,
dry_run=False):
"""
Describes the ClassicLink status of one or more VPCs.
:type vpc_ids: list
:param vpc_ids: A list of strings with the desired VPC ID's
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:type filters: list of tuples or dict
:param filters: A list of tuples or dict containing filters. Each tuple
or dict item consists of a filter key and a filter value.
:rtype: list
:return: A list of :class:`boto.vpc.vpc.VPC`
"""
params = {}
if vpc_ids:
self.build_list_params(params, vpc_ids, 'VpcId')
if filters:
self.build_filter_params(params, filters)
if dry_run:
params['DryRun'] = 'true'
return self.get_list('DescribeVpcClassicLink', params, [('item', VPC)],
verb='POST')
def attach_classic_link_vpc(self, vpc_id, instance_id, groups,
dry_run=False):
"""
Links an EC2-Classic instance to a ClassicLink-enabled VPC through one
or more of the VPC's security groups. You cannot link an EC2-Classic
instance to more than one VPC at a time. You can only link an instance
that's in the running state. An instance is automatically unlinked from
a VPC when it's stopped. You can link it to the VPC again when you
restart it.
After you've linked an instance, you cannot change the VPC security
groups that are associated with it. To change the security groups, you
must first unlink the instance, and then link it again.
Linking your instance to a VPC is sometimes referred to as attaching
your instance.
:type vpc_id: str
:param vpc_id: The ID of a ClassicLink-enabled VPC.
:type intance_id: str
:param instance_is: The ID of a ClassicLink-enabled VPC.
:tye groups: list
:param groups: The ID of one or more of the VPC's security groups.
You cannot specify security groups from a different VPC. The
members of the list can be
:class:`boto.ec2.securitygroup.SecurityGroup` objects or
strings of the id's of the security groups.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {'VpcId': vpc_id, 'InstanceId': instance_id}
if dry_run:
params['DryRun'] = 'true'
l = []
for group in groups:
if hasattr(group, 'id'):
l.append(group.id)
else:
l.append(group)
self.build_list_params(params, l, 'SecurityGroupId')
return self.get_status('AttachClassicLinkVpc', params)
def detach_classic_link_vpc(self, vpc_id, instance_id, dry_run=False):
"""
Unlinks a linked EC2-Classic instance from a VPC. After the instance
has been unlinked, the VPC security groups are no longer associated
with it. An instance is automatically unlinked from a VPC when
it's stopped.
:type vpc_id: str
:param vpc_id: The ID of the instance to unlink from the VPC.
:type intance_id: str
:param instance_is: The ID of the VPC to which the instance is linked.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {'VpcId': vpc_id, 'InstanceId': instance_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DetachClassicLinkVpc', params)
def disable_vpc_classic_link(self, vpc_id, dry_run=False):
"""
Disables ClassicLink for a VPC. You cannot disable ClassicLink for a
VPC that has EC2-Classic instances linked to it.
:type vpc_id: str
:param vpc_id: The ID of the VPC.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {'VpcId': vpc_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('DisableVpcClassicLink', params)
def enable_vpc_classic_link(self, vpc_id, dry_run=False):
"""
Enables a VPC for ClassicLink. You can then link EC2-Classic instances
to your ClassicLink-enabled VPC to allow communication over private IP
addresses. You cannot enable your VPC for ClassicLink if any of your
VPC's route tables have existing routes for address ranges within the
10.0.0.0/8 IP address range, excluding local routes for VPCs in the
10.0.0.0/16 and 10.1.0.0/16 IP address ranges.
:type vpc_id: str
:param vpc_id: The ID of the VPC.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
params = {'VpcId': vpc_id}
if dry_run:
params['DryRun'] = 'true'
return self.get_status('EnableVpcClassicLink', params)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/vpc/__init__.py
| 0.747892 | 0.198452 |
__init__.py
|
|
import os
import datetime
import boto.utils
from boto.compat import json
class Credentials(object):
"""
:ivar access_key: The AccessKeyID.
:ivar secret_key: The SecretAccessKey.
:ivar session_token: The session token that must be passed with
requests to use the temporary credentials
:ivar expiration: The timestamp for when the credentials will expire
"""
def __init__(self, parent=None):
self.parent = parent
self.access_key = None
self.secret_key = None
self.session_token = None
self.expiration = None
self.request_id = None
@classmethod
def from_json(cls, json_doc):
"""
Create and return a new Session Token based on the contents
of a JSON document.
:type json_doc: str
:param json_doc: A string containing a JSON document with a
previously saved Credentials object.
"""
d = json.loads(json_doc)
token = cls()
token.__dict__.update(d)
return token
@classmethod
def load(cls, file_path):
"""
Create and return a new Session Token based on the contents
of a previously saved JSON-format file.
:type file_path: str
:param file_path: The fully qualified path to the JSON-format
file containing the previously saved Session Token information.
"""
fp = open(file_path)
json_doc = fp.read()
fp.close()
return cls.from_json(json_doc)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'AccessKeyId':
self.access_key = value
elif name == 'SecretAccessKey':
self.secret_key = value
elif name == 'SessionToken':
self.session_token = value
elif name == 'Expiration':
self.expiration = value
elif name == 'RequestId':
self.request_id = value
else:
pass
def to_dict(self):
"""
Return a Python dict containing the important information
about this Session Token.
"""
return {'access_key': self.access_key,
'secret_key': self.secret_key,
'session_token': self.session_token,
'expiration': self.expiration,
'request_id': self.request_id}
def save(self, file_path):
"""
Persist a Session Token to a file in JSON format.
:type path: str
:param path: The fully qualified path to the file where the
the Session Token data should be written. Any previous
data in the file will be overwritten. To help protect
the credentials contained in the file, the permissions
of the file will be set to readable/writable by owner only.
"""
fp = open(file_path, 'w')
json.dump(self.to_dict(), fp)
fp.close()
os.chmod(file_path, 0o600)
def is_expired(self, time_offset_seconds=0):
"""
Checks to see if the Session Token is expired or not. By default
it will check to see if the Session Token is expired as of the
moment the method is called. However, you can supply an
optional parameter which is the number of seconds of offset
into the future for the check. For example, if you supply
a value of 5, this method will return a True if the Session
Token will be expired 5 seconds from this moment.
:type time_offset_seconds: int
:param time_offset_seconds: The number of seconds into the future
to test the Session Token for expiration.
"""
now = datetime.datetime.utcnow()
if time_offset_seconds:
now = now + datetime.timedelta(seconds=time_offset_seconds)
ts = boto.utils.parse_ts(self.expiration)
delta = ts - now
return delta.total_seconds() <= 0
class FederationToken(object):
"""
:ivar credentials: A Credentials object containing the credentials.
:ivar federated_user_arn: ARN specifying federated user using credentials.
:ivar federated_user_id: The ID of the federated user using credentials.
:ivar packed_policy_size: A percentage value indicating the size of
the policy in packed form
"""
def __init__(self, parent=None):
self.parent = parent
self.credentials = None
self.federated_user_arn = None
self.federated_user_id = None
self.packed_policy_size = None
self.request_id = None
def startElement(self, name, attrs, connection):
if name == 'Credentials':
self.credentials = Credentials()
return self.credentials
else:
return None
def endElement(self, name, value, connection):
if name == 'Arn':
self.federated_user_arn = value
elif name == 'FederatedUserId':
self.federated_user_id = value
elif name == 'PackedPolicySize':
self.packed_policy_size = int(value)
elif name == 'RequestId':
self.request_id = value
else:
pass
class AssumedRole(object):
"""
:ivar user: The assumed role user.
:ivar credentials: A Credentials object containing the credentials.
"""
def __init__(self, connection=None, credentials=None, user=None):
self._connection = connection
self.credentials = credentials
self.user = user
def startElement(self, name, attrs, connection):
if name == 'Credentials':
self.credentials = Credentials()
return self.credentials
elif name == 'AssumedRoleUser':
self.user = User()
return self.user
def endElement(self, name, value, connection):
pass
class User(object):
"""
:ivar arn: The arn of the user assuming the role.
:ivar assume_role_id: The identifier of the assumed role.
"""
def __init__(self, arn=None, assume_role_id=None):
self.arn = arn
self.assume_role_id = assume_role_id
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Arn':
self.arn = value
elif name == 'AssumedRoleId':
self.assume_role_id = value
class DecodeAuthorizationMessage(object):
"""
:ivar request_id: The request ID.
:ivar decoded_message: The decoded authorization message (may be JSON).
"""
def __init__(self, request_id=None, decoded_message=None):
self.request_id = request_id
self.decoded_message = decoded_message
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'requestId':
self.request_id = value
elif name == 'DecodedMessage':
self.decoded_message = value
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/sts/credentials.py
| 0.736495 | 0.305335 |
credentials.py
|
|
import boto
from boto.cloudformation.stack import Stack, StackSummary, StackEvent
from boto.cloudformation.stack import StackResource, StackResourceSummary
from boto.cloudformation.template import Template
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.compat import json
class CloudFormationConnection(AWSQueryConnection):
"""
AWS CloudFormation
AWS CloudFormation enables you to create and manage AWS
infrastructure deployments predictably and repeatedly. AWS
CloudFormation helps you leverage AWS products such as Amazon EC2,
EBS, Amazon SNS, ELB, and Auto Scaling to build highly-reliable,
highly scalable, cost effective applications without worrying
about creating and configuring the underlying AWS infrastructure.
With AWS CloudFormation, you declare all of your resources and
dependencies in a template file. The template defines a collection
of resources as a single unit called a stack. AWS CloudFormation
creates and deletes all member resources of the stack together and
manages all dependencies between the resources for you.
For more information about this product, go to the `CloudFormation
Product Page`_.
Amazon CloudFormation makes use of other AWS products. If you need
additional technical information about a specific AWS product, you
can find the product's technical documentation at
`http://aws.amazon.com/documentation/`_.
"""
APIVersion = boto.config.get('Boto', 'cfn_version', '2010-05-15')
DefaultRegionName = boto.config.get('Boto', 'cfn_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'cfn_region_endpoint',
'cloudformation.us-east-1.amazonaws.com')
valid_states = (
'CREATE_IN_PROGRESS', 'CREATE_FAILED', 'CREATE_COMPLETE',
'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'ROLLBACK_COMPLETE',
'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE',
'UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_IN_PROGRESS',
'UPDATE_ROLLBACK_FAILED',
'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_ROLLBACK_COMPLETE')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
converter=None, security_token=None, validate_certs=True,
profile_name=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint, CloudFormationConnection)
self.region = region
super(CloudFormationConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def encode_bool(self, v):
v = bool(v)
return {True: "true", False: "false"}[v]
def _build_create_or_update_params(self, stack_name, template_body,
template_url, parameters, disable_rollback, timeout_in_minutes,
notification_arns, capabilities, on_failure, stack_policy_body,
stack_policy_url, tags, use_previous_template=None,
stack_policy_during_update_body=None,
stack_policy_during_update_url=None):
"""
Helper that creates JSON parameters needed by a Stack Create or
Stack Update call.
:type stack_name: string
:param stack_name:
The name associated with the stack. The name must be unique within your
AWS account.
Must contain only alphanumeric characters (case sensitive) and start
with an alpha character. Maximum length of the name is 255
characters.
:type template_body: string
:param template_body: Structure containing the template body. (For more
information, go to `Template Anatomy`_ in the AWS CloudFormation
User Guide.)
Conditional: You must pass either `UsePreviousTemplate` or one of
`TemplateBody` or `TemplateUrl`. If both `TemplateBody` and
`TemplateUrl` are passed, only `TemplateBody` is used.
`TemplateBody`.
:type template_url: string
:param template_url: Location of file containing the template body. The
URL must point to a template (max size: 307,200 bytes) located in
an S3 bucket in the same region as the stack. For more information,
go to the `Template Anatomy`_ in the AWS CloudFormation User Guide.
Conditional: You must pass either `UsePreviousTemplate` or one of
`TemplateBody` or `TemplateUrl`. If both `TemplateBody` and
`TemplateUrl` are passed, only `TemplateBody` is used.
`TemplateBody`.
:type parameters: list
:param parameters: A list of key/value tuples that specify input
parameters for the stack. A 3-tuple (key, value, bool) may be used to
specify the `UsePreviousValue` option.
:type disable_rollback: boolean
:param disable_rollback: Set to `True` to disable rollback of the stack
if stack creation failed. You can specify either `DisableRollback`
or `OnFailure`, but not both.
Default: `False`
:type timeout_in_minutes: integer
:param timeout_in_minutes: The amount of time that can pass before the
stack status becomes CREATE_FAILED; if `DisableRollback` is not set
or is set to `False`, the stack will be rolled back.
:type notification_arns: list
:param notification_arns: The Simple Notification Service (SNS) topic
ARNs to publish stack related events. You can find your SNS topic
ARNs using the `SNS console`_ or your Command Line Interface (CLI).
:type capabilities: list
:param capabilities: The list of capabilities that you want to allow in
the stack. If your template contains certain resources, you must
specify the CAPABILITY_IAM value for this parameter; otherwise,
this action returns an InsufficientCapabilities error. The
following resources require you to specify the capabilities
parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_,
`AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_,
`AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and
`AWS::IAM::UserToGroupAddition`_.
:type on_failure: string
:param on_failure: Determines what action will be taken if stack
creation fails. This must be one of: DO_NOTHING, ROLLBACK, or
DELETE. You can specify either `OnFailure` or `DisableRollback`,
but not both.
Default: `ROLLBACK`
:type stack_policy_body: string
:param stack_policy_body: Structure containing the stack policy body.
(For more information, go to ` Prevent Updates to Stack Resources`_
in the AWS CloudFormation User Guide.)
If you pass `StackPolicyBody` and `StackPolicyURL`, only
`StackPolicyBody` is used.
:type stack_policy_url: string
:param stack_policy_url: Location of a file containing the stack
policy. The URL must point to a policy (max size: 16KB) located in
an S3 bucket in the same region as the stack. If you pass
`StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is
used.
:type tags: list
:param tags: A set of user-defined `Tags` to associate with this stack,
represented by key/value pairs. Tags defined for the stack are
propagated to EC2 resources that are created as part of the stack.
A maximum number of 10 tags can be specified.
:type use_previous_template: boolean
:param use_previous_template: Set to `True` to use the previous
template instead of uploading a new one via `TemplateBody` or
`TemplateURL`.
Conditional: You must pass either `UsePreviousTemplate` or one of
`TemplateBody` or `TemplateUrl`.
:type stack_policy_during_update_body: string
:param stack_policy_during_update_body: Structure containing the
temporary overriding stack policy body. If you pass
`StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`,
only `StackPolicyDuringUpdateBody` is used.
If you want to update protected resources, specify a temporary
overriding stack policy during this update. If you do not specify a
stack policy, the current policy that associated with the stack
will be used.
:type stack_policy_during_update_url: string
:param stack_policy_during_update_url: Location of a file containing
the temporary overriding stack policy. The URL must point to a
policy (max size: 16KB) located in an S3 bucket in the same region
as the stack. If you pass `StackPolicyDuringUpdateBody` and
`StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is
used.
If you want to update protected resources, specify a temporary
overriding stack policy during this update. If you do not specify a
stack policy, the current policy that is associated with the stack
will be used.
:rtype: dict
:return: JSON parameters represented as a Python dict.
"""
params = {'ContentType': "JSON", 'StackName': stack_name,
'DisableRollback': self.encode_bool(disable_rollback)}
if template_body:
params['TemplateBody'] = template_body
if template_url:
params['TemplateURL'] = template_url
if use_previous_template is not None:
params['UsePreviousTemplate'] = self.encode_bool(use_previous_template)
if template_body and template_url:
boto.log.warning("If both TemplateBody and TemplateURL are"
" specified, only TemplateBody will be honored by the API")
if parameters and len(parameters) > 0:
for i, parameter_tuple in enumerate(parameters):
key, value = parameter_tuple[:2]
use_previous = (parameter_tuple[2]
if len(parameter_tuple) > 2 else False)
params['Parameters.member.%d.ParameterKey' % (i + 1)] = key
if use_previous:
params['Parameters.member.%d.UsePreviousValue'
% (i + 1)] = self.encode_bool(use_previous)
else:
params['Parameters.member.%d.ParameterValue' % (i + 1)] = value
if capabilities:
for i, value in enumerate(capabilities):
params['Capabilities.member.%d' % (i + 1)] = value
if tags:
for i, (key, value) in enumerate(tags.items()):
params['Tags.member.%d.Key' % (i + 1)] = key
params['Tags.member.%d.Value' % (i + 1)] = value
if notification_arns and len(notification_arns) > 0:
self.build_list_params(params, notification_arns,
"NotificationARNs.member")
if timeout_in_minutes:
params['TimeoutInMinutes'] = int(timeout_in_minutes)
if disable_rollback is not None:
params['DisableRollback'] = str(
disable_rollback).lower()
if on_failure is not None:
params['OnFailure'] = on_failure
if stack_policy_body is not None:
params['StackPolicyBody'] = stack_policy_body
if stack_policy_url is not None:
params['StackPolicyURL'] = stack_policy_url
if stack_policy_during_update_body is not None:
params['StackPolicyDuringUpdateBody'] = stack_policy_during_update_body
if stack_policy_during_update_url is not None:
params['StackPolicyDuringUpdateURL'] = stack_policy_during_update_url
return params
def _do_request(self, call, params, path, method):
"""
Do a request via ``self.make_request`` and parse the JSON response.
:type call: string
:param call: Call name, e.g. ``CreateStack``
:type params: dict
:param params: Dictionary of call parameters
:type path: string
:param path: Server path
:type method: string
:param method: HTTP method to use
:rtype: dict
:return: Parsed JSON response data
"""
response = self.make_request(call, params, path, method)
body = response.read().decode('utf-8')
if response.status == 200:
body = json.loads(body)
return body
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body=body)
def create_stack(self, stack_name, template_body=None, template_url=None,
parameters=None, notification_arns=None, disable_rollback=None,
timeout_in_minutes=None, capabilities=None, tags=None,
on_failure=None, stack_policy_body=None, stack_policy_url=None):
"""
Creates a stack as specified in the template. After the call
completes successfully, the stack creation starts. You can
check the status of the stack via the DescribeStacks API.
Currently, the limit for stacks is 20 stacks per account per
region.
:type stack_name: string
:param stack_name:
The name associated with the stack. The name must be unique within your
AWS account.
Must contain only alphanumeric characters (case sensitive) and start
with an alpha character. Maximum length of the name is 255
characters.
:type template_body: string
:param template_body: Structure containing the template body. (For more
information, go to `Template Anatomy`_ in the AWS CloudFormation
User Guide.)
Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are
passed, only `TemplateBody` is used.
:type template_url: string
:param template_url: Location of file containing the template body. The
URL must point to a template (max size: 307,200 bytes) located in
an S3 bucket in the same region as the stack. For more information,
go to the `Template Anatomy`_ in the AWS CloudFormation User Guide.
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
passed, only `TemplateBody` is used.
:type parameters: list
:param parameters: A list of key/value tuples that specify input
parameters for the stack.
:type disable_rollback: boolean
:param disable_rollback: Set to `True` to disable rollback of the stack
if stack creation failed. You can specify either `DisableRollback`
or `OnFailure`, but not both.
Default: `False`
:type timeout_in_minutes: integer
:param timeout_in_minutes: The amount of time that can pass before the
stack status becomes CREATE_FAILED; if `DisableRollback` is not set
or is set to `False`, the stack will be rolled back.
:type notification_arns: list
:param notification_arns: The Simple Notification Service (SNS) topic
ARNs to publish stack related events. You can find your SNS topic
ARNs using the `SNS console`_ or your Command Line Interface (CLI).
:type capabilities: list
:param capabilities: The list of capabilities that you want to allow in
the stack. If your template contains certain resources, you must
specify the CAPABILITY_IAM value for this parameter; otherwise,
this action returns an InsufficientCapabilities error. The
following resources require you to specify the capabilities
parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_,
`AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_,
`AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and
`AWS::IAM::UserToGroupAddition`_.
:type on_failure: string
:param on_failure: Determines what action will be taken if stack
creation fails. This must be one of: DO_NOTHING, ROLLBACK, or
DELETE. You can specify either `OnFailure` or `DisableRollback`,
but not both.
Default: `ROLLBACK`
:type stack_policy_body: string
:param stack_policy_body: Structure containing the stack policy body.
(For more information, go to ` Prevent Updates to Stack Resources`_
in the AWS CloudFormation User Guide.)
If you pass `StackPolicyBody` and `StackPolicyURL`, only
`StackPolicyBody` is used.
:type stack_policy_url: string
:param stack_policy_url: Location of a file containing the stack
policy. The URL must point to a policy (max size: 16KB) located in
an S3 bucket in the same region as the stack. If you pass
`StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is
used.
:type tags: dict
:param tags: A set of user-defined `Tags` to associate with this stack,
represented by key/value pairs. Tags defined for the stack are
propagated to EC2 resources that are created as part of the stack.
A maximum number of 10 tags can be specified.
"""
params = self._build_create_or_update_params(stack_name, template_body,
template_url, parameters, disable_rollback, timeout_in_minutes,
notification_arns, capabilities, on_failure, stack_policy_body,
stack_policy_url, tags)
body = self._do_request('CreateStack', params, '/', 'POST')
return body['CreateStackResponse']['CreateStackResult']['StackId']
def update_stack(self, stack_name, template_body=None, template_url=None,
parameters=None, notification_arns=None, disable_rollback=False,
timeout_in_minutes=None, capabilities=None, tags=None,
use_previous_template=None,
stack_policy_during_update_body=None,
stack_policy_during_update_url=None,
stack_policy_body=None, stack_policy_url=None):
"""
Updates a stack as specified in the template. After the call
completes successfully, the stack update starts. You can check
the status of the stack via the DescribeStacks action.
**Note: **You cannot update `AWS::S3::Bucket`_ resources, for
example, to add or modify tags.
To get a copy of the template for an existing stack, you can
use the GetTemplate action.
Tags that were associated with this stack during creation time
will still be associated with the stack after an `UpdateStack`
operation.
For more information about creating an update template,
updating a stack, and monitoring the progress of the update,
see `Updating a Stack`_.
:type stack_name: string
:param stack_name:
The name or stack ID of the stack to update.
Must contain only alphanumeric characters (case sensitive) and start
with an alpha character. Maximum length of the name is 255
characters.
:type template_body: string
:param template_body: Structure containing the template body. (For more
information, go to `Template Anatomy`_ in the AWS CloudFormation
User Guide.)
Conditional: You must pass either `UsePreviousTemplate` or one of
`TemplateBody` or `TemplateUrl`. If both `TemplateBody` and
`TemplateUrl` are passed, only `TemplateBody` is used.
:type template_url: string
:param template_url: Location of file containing the template body. The
URL must point to a template (max size: 307,200 bytes) located in
an S3 bucket in the same region as the stack. For more information,
go to the `Template Anatomy`_ in the AWS CloudFormation User Guide.
Conditional: You must pass either `UsePreviousTemplate` or one of
`TemplateBody` or `TemplateUrl`. If both `TemplateBody` and
`TemplateUrl` are passed, only `TemplateBody` is used.
`TemplateBody`.
:type use_previous_template: boolean
:param use_previous_template: Set to `True` to use the previous
template instead of uploading a new one via `TemplateBody` or
`TemplateURL`.
Conditional: You must pass either `UsePreviousTemplate` or one of
`TemplateBody` or `TemplateUrl`.
:type parameters: list
:param parameters: A list of key/value tuples that specify input
parameters for the stack. A 3-tuple (key, value, bool) may be used to
specify the `UsePreviousValue` option.
:type notification_arns: list
:param notification_arns: The Simple Notification Service (SNS) topic
ARNs to publish stack related events. You can find your SNS topic
ARNs using the `SNS console`_ or your Command Line Interface (CLI).
:type disable_rollback: bool
:param disable_rollback: Indicates whether or not to rollback on
failure.
:type timeout_in_minutes: integer
:param timeout_in_minutes: The amount of time that can pass before the
stack status becomes CREATE_FAILED; if `DisableRollback` is not set
or is set to `False`, the stack will be rolled back.
:type capabilities: list
:param capabilities: The list of capabilities you want to allow in
the stack. Currently, the only valid capability is
'CAPABILITY_IAM'.
:type tags: dict
:param tags: A set of user-defined `Tags` to associate with this stack,
represented by key/value pairs. Tags defined for the stack are
propagated to EC2 resources that are created as part of the stack.
A maximum number of 10 tags can be specified.
:type template_url: string
:param template_url: Location of file containing the template body. The
URL must point to a template located in an S3 bucket in the same
region as the stack. For more information, go to `Template
Anatomy`_ in the AWS CloudFormation User Guide.
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
passed, only `TemplateBody` is used.
:type stack_policy_during_update_body: string
:param stack_policy_during_update_body: Structure containing the
temporary overriding stack policy body. If you pass
`StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`,
only `StackPolicyDuringUpdateBody` is used.
If you want to update protected resources, specify a temporary
overriding stack policy during this update. If you do not specify a
stack policy, the current policy that associated with the stack
will be used.
:type stack_policy_during_update_url: string
:param stack_policy_during_update_url: Location of a file containing
the temporary overriding stack policy. The URL must point to a
policy (max size: 16KB) located in an S3 bucket in the same region
as the stack. If you pass `StackPolicyDuringUpdateBody` and
`StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is
used.
If you want to update protected resources, specify a temporary
overriding stack policy during this update. If you do not specify a
stack policy, the current policy that is associated with the stack
will be used.
:rtype: string
:return: The unique Stack ID.
"""
params = self._build_create_or_update_params(stack_name, template_body,
template_url, parameters, disable_rollback, timeout_in_minutes,
notification_arns, capabilities, None, stack_policy_body,
stack_policy_url, tags, use_previous_template,
stack_policy_during_update_body, stack_policy_during_update_url)
body = self._do_request('UpdateStack', params, '/', 'POST')
return body['UpdateStackResponse']['UpdateStackResult']['StackId']
def delete_stack(self, stack_name_or_id):
"""
Deletes a specified stack. Once the call completes
successfully, stack deletion starts. Deleted stacks do not
show up in the DescribeStacks API if the deletion has been
completed successfully.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack.
"""
params = {'ContentType': "JSON", 'StackName': stack_name_or_id}
return self._do_request('DeleteStack', params, '/', 'GET')
def describe_stack_events(self, stack_name_or_id=None, next_token=None):
"""
Returns all stack related events for a specified stack. For
more information about a stack's event history, go to
`Stacks`_ in the AWS CloudFormation User Guide.
Events are returned, even if the stack never existed or has
been successfully deleted.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack.
Default: There is no default value.
:type next_token: string
:param next_token: String that identifies the start of the next list of
events, if there is one.
Default: There is no default value.
"""
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeStackEvents', params, [('member',
StackEvent)])
def describe_stack_resource(self, stack_name_or_id, logical_resource_id):
"""
Returns a description of the specified resource in the
specified stack.
For deleted stacks, DescribeStackResource returns resource
information for up to 90 days after the stack has been
deleted.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack.
Default: There is no default value.
:type logical_resource_id: string
:param logical_resource_id: The logical name of the resource as
specified in the template.
Default: There is no default value.
"""
params = {'ContentType': "JSON", 'StackName': stack_name_or_id,
'LogicalResourceId': logical_resource_id}
return self._do_request('DescribeStackResource', params, '/', 'GET')
def describe_stack_resources(self, stack_name_or_id=None,
logical_resource_id=None,
physical_resource_id=None):
"""
Returns AWS resource descriptions for running and deleted
stacks. If `StackName` is specified, all the associated
resources that are part of the stack are returned. If
`PhysicalResourceId` is specified, the associated resources of
the stack that the resource belongs to are returned.
Only the first 100 resources will be returned. If your stack
has more resources than this, you should use
`ListStackResources` instead.
For deleted stacks, `DescribeStackResources` returns resource
information for up to 90 days after the stack has been
deleted.
You must specify either `StackName` or `PhysicalResourceId`,
but not both. In addition, you can specify `LogicalResourceId`
to filter the returned result. For more information about
resources, the `LogicalResourceId` and `PhysicalResourceId`,
go to the `AWS CloudFormation User Guide`_.
A `ValidationError` is returned if you specify both
`StackName` and `PhysicalResourceId` in the same request.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack.
Required: Conditional. If you do not specify `StackName`, you must
specify `PhysicalResourceId`.
Default: There is no default value.
:type logical_resource_id: string
:param logical_resource_id: The logical name of the resource as
specified in the template.
Default: There is no default value.
:type physical_resource_id: string
:param physical_resource_id: The name or unique identifier that
corresponds to a physical instance ID of a resource supported by
AWS CloudFormation.
For example, for an Amazon Elastic Compute Cloud (EC2) instance,
`PhysicalResourceId` corresponds to the `InstanceId`. You can pass
the EC2 `InstanceId` to `DescribeStackResources` to find which
stack the instance belongs to and what other resources are part of
the stack.
Required: Conditional. If you do not specify `PhysicalResourceId`, you
must specify `StackName`.
Default: There is no default value.
"""
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
if logical_resource_id:
params['LogicalResourceId'] = logical_resource_id
if physical_resource_id:
params['PhysicalResourceId'] = physical_resource_id
return self.get_list('DescribeStackResources', params,
[('member', StackResource)])
def describe_stacks(self, stack_name_or_id=None, next_token=None):
"""
Returns the description for the specified stack; if no stack
name was specified, then it returns the description for all
the stacks created.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack.
Default: There is no default value.
:type next_token: string
:param next_token: String that identifies the start of the next list of
stacks, if there is one.
"""
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
if next_token is not None:
params['NextToken'] = next_token
return self.get_list('DescribeStacks', params, [('member', Stack)])
def get_template(self, stack_name_or_id):
"""
Returns the template body for a specified stack. You can get
the template for running or deleted stacks.
For deleted stacks, GetTemplate returns the template for up to
90 days after the stack has been deleted.
If the template does not exist, a `ValidationError` is
returned.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack, which are not always interchangeable:
+ Running stacks: You can specify either the stack's name or its unique
stack ID.
+ Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
"""
params = {'ContentType': "JSON", 'StackName': stack_name_or_id}
return self._do_request('GetTemplate', params, '/', 'GET')
def list_stack_resources(self, stack_name_or_id, next_token=None):
"""
Returns descriptions of all resources of the specified stack.
For deleted stacks, ListStackResources returns resource
information for up to 90 days after the stack has been
deleted.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack, which are not always interchangeable:
+ Running stacks: You can specify either the stack's name or its unique
stack ID.
+ Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
:type next_token: string
:param next_token: String that identifies the start of the next list of
stack resource summaries, if there is one.
Default: There is no default value.
"""
params = {'StackName': stack_name_or_id}
if next_token:
params['NextToken'] = next_token
return self.get_list('ListStackResources', params,
[('member', StackResourceSummary)])
def list_stacks(self, stack_status_filters=None, next_token=None):
"""
Returns the summary information for stacks whose status
matches the specified StackStatusFilter. Summary information
for stacks that have been deleted is kept for 90 days after
the stack is deleted. If no StackStatusFilter is specified,
summary information for all stacks is returned (including
existing stacks and stacks that have been deleted).
:type next_token: string
:param next_token: String that identifies the start of the next list of
stacks, if there is one.
Default: There is no default value.
:type stack_status_filter: list
:param stack_status_filter: Stack status to use as a filter. Specify
one or more stack status codes to list only stacks with the
specified status codes. For a complete list of stack status codes,
see the `StackStatus` parameter of the Stack data type.
"""
params = {}
if next_token:
params['NextToken'] = next_token
if stack_status_filters and len(stack_status_filters) > 0:
self.build_list_params(params, stack_status_filters,
"StackStatusFilter.member")
return self.get_list('ListStacks', params,
[('member', StackSummary)])
def validate_template(self, template_body=None, template_url=None):
"""
Validates a specified template.
:type template_body: string
:param template_body: String containing the template body. (For more
information, go to `Template Anatomy`_ in the AWS CloudFormation
User Guide.)
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
passed, only `TemplateBody` is used.
:type template_url: string
:param template_url: Location of file containing the template body. The
URL must point to a template (max size: 307,200 bytes) located in
an S3 bucket in the same region as the stack. For more information,
go to `Template Anatomy`_ in the AWS CloudFormation User Guide.
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
passed, only `TemplateBody` is used.
"""
params = {}
if template_body:
params['TemplateBody'] = template_body
if template_url:
params['TemplateURL'] = template_url
if template_body and template_url:
boto.log.warning("If both TemplateBody and TemplateURL are"
" specified, only TemplateBody will be honored by the API")
return self.get_object('ValidateTemplate', params, Template,
verb="POST")
def cancel_update_stack(self, stack_name_or_id=None):
"""
Cancels an update on the specified stack. If the call
completes successfully, the stack will roll back the update
and revert to the previous stack configuration.
Only stacks that are in the UPDATE_IN_PROGRESS state can be
canceled.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated with
the stack.
"""
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
return self.get_status('CancelUpdateStack', params)
def estimate_template_cost(self, template_body=None, template_url=None,
parameters=None):
"""
Returns the estimated monthly cost of a template. The return
value is an AWS Simple Monthly Calculator URL with a query
string that describes the resources required to run the
template.
:type template_body: string
:param template_body: Structure containing the template body. (For more
information, go to `Template Anatomy`_ in the AWS CloudFormation
User Guide.)
Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are
passed, only `TemplateBody` is used.
:type template_url: string
:param template_url: Location of file containing the template body. The
URL must point to a template located in an S3 bucket in the same
region as the stack. For more information, go to `Template
Anatomy`_ in the AWS CloudFormation User Guide.
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
passed, only `TemplateBody` is used.
:type parameters: list
:param parameters: A list of key/value tuples that specify input
parameters for the template.
:rtype: string
:returns: URL to pre-filled cost calculator
"""
params = {'ContentType': "JSON"}
if template_body is not None:
params['TemplateBody'] = template_body
if template_url is not None:
params['TemplateURL'] = template_url
if parameters and len(parameters) > 0:
for i, (key, value) in enumerate(parameters):
params['Parameters.member.%d.ParameterKey' % (i + 1)] = key
params['Parameters.member.%d.ParameterValue' % (i + 1)] = value
response = self._do_request('EstimateTemplateCost', params, '/', 'POST')
return response['EstimateTemplateCostResponse']\
['EstimateTemplateCostResult']\
['Url']
def get_stack_policy(self, stack_name_or_id):
"""
Returns the stack policy for a specified stack. If a stack
doesn't have a policy, a null value is returned.
:type stack_name_or_id: string
:param stack_name_or_id: The name or stack ID that is associated with
the stack whose policy you want to get.
:rtype: string
:return: The policy JSON document
"""
params = {'ContentType': "JSON", 'StackName': stack_name_or_id, }
response = self._do_request('GetStackPolicy', params, '/', 'POST')
return response['GetStackPolicyResponse']\
['GetStackPolicyResult']\
['StackPolicyBody']
def set_stack_policy(self, stack_name_or_id, stack_policy_body=None,
stack_policy_url=None):
"""
Sets a stack policy for a specified stack.
:type stack_name_or_id: string
:param stack_name_or_id: The name or stack ID that you want to
associate a policy with.
:type stack_policy_body: string
:param stack_policy_body: Structure containing the stack policy body.
(For more information, go to ` Prevent Updates to Stack Resources`_
in the AWS CloudFormation User Guide.)
You must pass `StackPolicyBody` or `StackPolicyURL`. If both are
passed, only `StackPolicyBody` is used.
:type stack_policy_url: string
:param stack_policy_url: Location of a file containing the stack
policy. The URL must point to a policy (max size: 16KB) located in
an S3 bucket in the same region as the stack. You must pass
`StackPolicyBody` or `StackPolicyURL`. If both are passed, only
`StackPolicyBody` is used.
"""
params = {'ContentType': "JSON", 'StackName': stack_name_or_id, }
if stack_policy_body is not None:
params['StackPolicyBody'] = stack_policy_body
if stack_policy_url is not None:
params['StackPolicyURL'] = stack_policy_url
response = self._do_request('SetStackPolicy', params, '/', 'POST')
return response['SetStackPolicyResponse']
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cloudformation/connection.py
| 0.748168 | 0.219902 |
connection.py
|
|
from datetime import datetime
from boto.resultset import ResultSet
class Stack(object):
def __init__(self, connection=None):
self.connection = connection
self.creation_time = None
self.description = None
self.disable_rollback = None
self.notification_arns = []
self.outputs = []
self.parameters = []
self.capabilities = []
self.tags = []
self.stack_id = None
self.stack_status = None
self.stack_status_reason = None
self.stack_name = None
self.timeout_in_minutes = None
@property
def stack_name_reason(self):
return self.stack_status_reason
@stack_name_reason.setter
def stack_name_reason(self, value):
self.stack_status_reason = value
def startElement(self, name, attrs, connection):
if name == "Parameters":
self.parameters = ResultSet([('member', Parameter)])
return self.parameters
elif name == "Outputs":
self.outputs = ResultSet([('member', Output)])
return self.outputs
elif name == "Capabilities":
self.capabilities = ResultSet([('member', Capability)])
return self.capabilities
elif name == "Tags":
self.tags = Tag()
return self.tags
elif name == 'NotificationARNs':
self.notification_arns = ResultSet([('member', NotificationARN)])
return self.notification_arns
else:
return None
def endElement(self, name, value, connection):
if name == 'CreationTime':
try:
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
elif name == "Description":
self.description = value
elif name == "DisableRollback":
if str(value).lower() == 'true':
self.disable_rollback = True
else:
self.disable_rollback = False
elif name == 'StackId':
self.stack_id = value
elif name == 'StackName':
self.stack_name = value
elif name == 'StackStatus':
self.stack_status = value
elif name == "StackStatusReason":
self.stack_status_reason = value
elif name == "TimeoutInMinutes":
self.timeout_in_minutes = int(value)
elif name == "member":
pass
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_stack(stack_name_or_id=self.stack_id)
def describe_events(self, next_token=None):
return self.connection.describe_stack_events(
stack_name_or_id=self.stack_id,
next_token=next_token
)
def describe_resource(self, logical_resource_id):
return self.connection.describe_stack_resource(
stack_name_or_id=self.stack_id,
logical_resource_id=logical_resource_id
)
def describe_resources(self, logical_resource_id=None,
physical_resource_id=None):
return self.connection.describe_stack_resources(
stack_name_or_id=self.stack_id,
logical_resource_id=logical_resource_id,
physical_resource_id=physical_resource_id
)
def list_resources(self, next_token=None):
return self.connection.list_stack_resources(
stack_name_or_id=self.stack_id,
next_token=next_token
)
def update(self):
rs = self.connection.describe_stacks(self.stack_id)
if len(rs) == 1 and rs[0].stack_id == self.stack_id:
self.__dict__.update(rs[0].__dict__)
else:
raise ValueError("%s is not a valid Stack ID or Name" %
self.stack_id)
def get_template(self):
return self.connection.get_template(stack_name_or_id=self.stack_id)
def get_policy(self):
"""
Returns the stack policy for this stack. If it has no policy
then, a null value is returned.
"""
return self.connection.get_stack_policy(self.stack_id)
def set_policy(self, stack_policy_body=None, stack_policy_url=None):
"""
Sets a stack policy for this stack.
:type stack_policy_body: string
:param stack_policy_body: Structure containing the stack policy body.
(For more information, go to ` Prevent Updates to Stack Resources`_
in the AWS CloudFormation User Guide.)
You must pass `StackPolicyBody` or `StackPolicyURL`. If both are
passed, only `StackPolicyBody` is used.
:type stack_policy_url: string
:param stack_policy_url: Location of a file containing the stack
policy. The URL must point to a policy (max size: 16KB) located in
an S3 bucket in the same region as the stack. You must pass
`StackPolicyBody` or `StackPolicyURL`. If both are passed, only
`StackPolicyBody` is used.
"""
return self.connection.set_stack_policy(self.stack_id,
stack_policy_body=stack_policy_body,
stack_policy_url=stack_policy_url)
class StackSummary(object):
def __init__(self, connection=None):
self.connection = connection
self.stack_id = None
self.stack_status = None
self.stack_name = None
self.creation_time = None
self.deletion_time = None
self.template_description = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'StackId':
self.stack_id = value
elif name == 'StackStatus':
self.stack_status = value
elif name == 'StackName':
self.stack_name = value
elif name == 'CreationTime':
try:
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
elif name == "DeletionTime":
try:
self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
elif name == 'TemplateDescription':
self.template_description = value
elif name == "member":
pass
else:
setattr(self, name, value)
class Parameter(object):
def __init__(self, connection=None):
self.connection = None
self.key = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "ParameterKey":
self.key = value
elif name == "ParameterValue":
self.value = value
else:
setattr(self, name, value)
def __repr__(self):
return "Parameter:\"%s\"=\"%s\"" % (self.key, self.value)
class Output(object):
def __init__(self, connection=None):
self.connection = connection
self.description = None
self.key = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "Description":
self.description = value
elif name == "OutputKey":
self.key = value
elif name == "OutputValue":
self.value = value
else:
setattr(self, name, value)
def __repr__(self):
return "Output:\"%s\"=\"%s\"" % (self.key, self.value)
class Capability(object):
def __init__(self, connection=None):
self.connection = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
self.value = value
def __repr__(self):
return "Capability:\"%s\"" % (self.value)
class Tag(dict):
def __init__(self, connection=None):
dict.__init__(self)
self.connection = connection
self._current_key = None
self._current_value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "Key":
self._current_key = value
elif name == "Value":
self._current_value = value
else:
setattr(self, name, value)
if self._current_key and self._current_value:
self[self._current_key] = self._current_value
self._current_key = None
self._current_value = None
class NotificationARN(object):
def __init__(self, connection=None):
self.connection = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
self.value = value
def __repr__(self):
return "NotificationARN:\"%s\"" % (self.value)
class StackResource(object):
def __init__(self, connection=None):
self.connection = connection
self.description = None
self.logical_resource_id = None
self.physical_resource_id = None
self.resource_status = None
self.resource_status_reason = None
self.resource_type = None
self.stack_id = None
self.stack_name = None
self.timestamp = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "Description":
self.description = value
elif name == "LogicalResourceId":
self.logical_resource_id = value
elif name == "PhysicalResourceId":
self.physical_resource_id = value
elif name == "ResourceStatus":
self.resource_status = value
elif name == "ResourceStatusReason":
self.resource_status_reason = value
elif name == "ResourceType":
self.resource_type = value
elif name == "StackId":
self.stack_id = value
elif name == "StackName":
self.stack_name = value
elif name == "Timestamp":
try:
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
setattr(self, name, value)
def __repr__(self):
return "StackResource:%s (%s)" % (self.logical_resource_id,
self.resource_type)
class StackResourceSummary(object):
def __init__(self, connection=None):
self.connection = connection
self.last_updated_time = None
self.logical_resource_id = None
self.physical_resource_id = None
self.resource_status = None
self.resource_status_reason = None
self.resource_type = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "LastUpdatedTime":
try:
self.last_updated_time = datetime.strptime(
value,
'%Y-%m-%dT%H:%M:%SZ'
)
except ValueError:
self.last_updated_time = datetime.strptime(
value,
'%Y-%m-%dT%H:%M:%S.%fZ'
)
elif name == "LogicalResourceId":
self.logical_resource_id = value
elif name == "PhysicalResourceId":
self.physical_resource_id = value
elif name == "ResourceStatus":
self.resource_status = value
elif name == "ResourceStatusReason":
self.resource_status_reason = value
elif name == "ResourceType":
self.resource_type = value
else:
setattr(self, name, value)
def __repr__(self):
return "StackResourceSummary:%s (%s)" % (self.logical_resource_id,
self.resource_type)
class StackEvent(object):
valid_states = ("CREATE_IN_PROGRESS", "CREATE_FAILED", "CREATE_COMPLETE",
"DELETE_IN_PROGRESS", "DELETE_FAILED", "DELETE_COMPLETE")
def __init__(self, connection=None):
self.connection = connection
self.event_id = None
self.logical_resource_id = None
self.physical_resource_id = None
self.resource_properties = None
self.resource_status = None
self.resource_status_reason = None
self.resource_type = None
self.stack_id = None
self.stack_name = None
self.timestamp = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "EventId":
self.event_id = value
elif name == "LogicalResourceId":
self.logical_resource_id = value
elif name == "PhysicalResourceId":
self.physical_resource_id = value
elif name == "ResourceProperties":
self.resource_properties = value
elif name == "ResourceStatus":
self.resource_status = value
elif name == "ResourceStatusReason":
self.resource_status_reason = value
elif name == "ResourceType":
self.resource_type = value
elif name == "StackId":
self.stack_id = value
elif name == "StackName":
self.stack_name = value
elif name == "Timestamp":
try:
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
setattr(self, name, value)
def __repr__(self):
return "StackEvent %s %s %s" % (self.resource_type,
self.logical_resource_id, self.resource_status)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cloudformation/stack.py
| 0.675872 | 0.152979 |
stack.py
|
|
from boto.exception import BotoServerError
class ResponseErrorFactory(BotoServerError):
def __new__(cls, *args, **kw):
error = BotoServerError(*args, **kw)
newclass = globals().get(error.error_code, ResponseError)
obj = newclass.__new__(newclass, *args, **kw)
obj.__dict__.update(error.__dict__)
return obj
class ResponseError(BotoServerError):
"""Undefined response error.
"""
retry = False
def __repr__(self):
return '{0}({1}, {2},\n\t{3})'.format(self.__class__.__name__,
self.status, self.reason,
self.error_message)
def __str__(self):
return 'FPS Response Error: {0.status} {0.__class__.__name__} {1}\n' \
'{2}\n' \
'{0.error_message}'.format(self,
self.retry and '(Retriable)' or '',
self.__doc__.strip())
class RetriableResponseError(ResponseError):
retry = True
class AccessFailure(RetriableResponseError):
"""Account cannot be accessed.
"""
class AccountClosed(RetriableResponseError):
"""Account is not active.
"""
class AccountLimitsExceeded(RetriableResponseError):
"""The spending or receiving limit on the account is exceeded.
"""
class AmountOutOfRange(ResponseError):
"""The transaction amount is more than the allowed range.
"""
class AuthFailure(RetriableResponseError):
"""AWS was not able to validate the provided access credentials.
"""
class ConcurrentModification(RetriableResponseError):
"""A retriable error can happen when two processes try to modify the
same data at the same time.
"""
class DuplicateRequest(ResponseError):
"""A different request associated with this caller reference already
exists.
"""
class InactiveInstrument(ResponseError):
"""Payment instrument is inactive.
"""
class IncompatibleTokens(ResponseError):
"""The transaction could not be completed because the tokens have
incompatible payment instructions.
"""
class InstrumentAccessDenied(ResponseError):
"""The external calling application is not the recipient for this
postpaid or prepaid instrument.
"""
class InstrumentExpired(ResponseError):
"""The prepaid or the postpaid instrument has expired.
"""
class InsufficientBalance(RetriableResponseError):
"""The sender, caller, or recipient's account balance has
insufficient funds to complete the transaction.
"""
class InternalError(RetriableResponseError):
"""A retriable error that happens due to some transient problem in
the system.
"""
class InvalidAccountState(RetriableResponseError):
"""The account is either suspended or closed.
"""
class InvalidAccountState_Caller(RetriableResponseError):
"""The developer account cannot participate in the transaction.
"""
class InvalidAccountState_Recipient(RetriableResponseError):
"""Recipient account cannot participate in the transaction.
"""
class InvalidAccountState_Sender(RetriableResponseError):
"""Sender account cannot participate in the transaction.
"""
class InvalidCallerReference(ResponseError):
"""The Caller Reference does not have a token associated with it.
"""
class InvalidClientTokenId(ResponseError):
"""The AWS Access Key Id you provided does not exist in our records.
"""
class InvalidDateRange(ResponseError):
"""The end date specified is before the start date or the start date
is in the future.
"""
class InvalidParams(ResponseError):
"""One or more parameters in the request is invalid.
"""
class InvalidPaymentInstrument(ResponseError):
"""The payment method used in the transaction is invalid.
"""
class InvalidPaymentMethod(ResponseError):
"""Specify correct payment method.
"""
class InvalidRecipientForCCTransaction(ResponseError):
"""This account cannot receive credit card payments.
"""
class InvalidSenderRoleForAccountType(ResponseError):
"""This token cannot be used for this operation.
"""
class InvalidTokenId(ResponseError):
"""You did not install the token that you are trying to cancel.
"""
class InvalidTokenId_Recipient(ResponseError):
"""The recipient token specified is either invalid or canceled.
"""
class InvalidTokenId_Sender(ResponseError):
"""The sender token specified is either invalid or canceled or the
token is not active.
"""
class InvalidTokenType(ResponseError):
"""An invalid operation was performed on the token, for example,
getting the token usage information on a single use token.
"""
class InvalidTransactionId(ResponseError):
"""The specified transaction could not be found or the caller did not
execute the transaction or this is not a Pay or Reserve call.
"""
class InvalidTransactionState(ResponseError):
"""The transaction is not complete, or it has temporarily failed.
"""
class NotMarketplaceApp(RetriableResponseError):
"""This is not an marketplace application or the caller does not
match either the sender or the recipient.
"""
class OriginalTransactionFailed(ResponseError):
"""The original transaction has failed.
"""
class OriginalTransactionIncomplete(RetriableResponseError):
"""The original transaction is still in progress.
"""
class PaymentInstrumentNotCC(ResponseError):
"""The payment method specified in the transaction is not a credit
card. You can only use a credit card for this transaction.
"""
class PaymentMethodNotDefined(ResponseError):
"""Payment method is not defined in the transaction.
"""
class PrepaidFundingLimitExceeded(RetriableResponseError):
"""An attempt has been made to fund the prepaid instrument
at a level greater than its recharge limit.
"""
class RefundAmountExceeded(ResponseError):
"""The refund amount is more than the refundable amount.
"""
class SameSenderAndRecipient(ResponseError):
"""The sender and receiver are identical, which is not allowed.
"""
class SameTokenIdUsedMultipleTimes(ResponseError):
"""This token is already used in earlier transactions.
"""
class SenderNotOriginalRecipient(ResponseError):
"""The sender in the refund transaction is not
the recipient of the original transaction.
"""
class SettleAmountGreaterThanDebt(ResponseError):
"""The amount being settled or written off is
greater than the current debt.
"""
class SettleAmountGreaterThanReserveAmount(ResponseError):
"""The amount being settled is greater than the reserved amount.
"""
class SignatureDoesNotMatch(ResponseError):
"""The request signature calculated by Amazon does not match the
signature you provided.
"""
class TokenAccessDenied(ResponseError):
"""Permission to cancel the token is denied.
"""
class TokenNotActive(ResponseError):
"""The token is canceled.
"""
class TokenNotActive_Recipient(ResponseError):
"""The recipient token is canceled.
"""
class TokenNotActive_Sender(ResponseError):
"""The sender token is canceled.
"""
class TokenUsageError(ResponseError):
"""The token usage limit is exceeded.
"""
class TransactionDenied(ResponseError):
"""The transaction is not allowed.
"""
class TransactionFullyRefundedAlready(ResponseError):
"""The transaction has already been completely refunded.
"""
class TransactionTypeNotRefundable(ResponseError):
"""You cannot refund this transaction.
"""
class UnverifiedAccount_Recipient(ResponseError):
"""The recipient's account must have a verified bank account or a
credit card before this transaction can be initiated.
"""
class UnverifiedAccount_Sender(ResponseError):
"""The sender's account must have a verified U.S. credit card or
a verified U.S bank account before this transaction can be
initiated.
"""
class UnverifiedBankAccount(ResponseError):
"""A verified bank account should be used for this transaction.
"""
class UnverifiedEmailAddress_Caller(ResponseError):
"""The caller account must have a verified email address.
"""
class UnverifiedEmailAddress_Recipient(ResponseError):
"""The recipient account must have a verified
email address for receiving payments.
"""
class UnverifiedEmailAddress_Sender(ResponseError):
"""The sender account must have a verified
email address for this payment.
"""
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/fps/exception.py
| 0.727975 | 0.237366 |
exception.py
|
|
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.rds2 import exceptions
from boto.compat import json
class RDSConnection(AWSQueryConnection):
"""
Amazon Relational Database Service
Amazon Relational Database Service (Amazon RDS) is a web service
that makes it easier to set up, operate, and scale a relational
database in the cloud. It provides cost-efficient, resizable
capacity for an industry-standard relational database and manages
common database administration tasks, freeing up developers to
focus on what makes their applications and businesses unique.
Amazon RDS gives you access to the capabilities of a familiar
MySQL or Oracle database server. This means the code,
applications, and tools you already use today with your existing
MySQL or Oracle databases work with Amazon RDS without
modification. Amazon RDS automatically backs up your database and
maintains the database software that powers your DB instance.
Amazon RDS is flexible: you can scale your database instance's
compute resources and storage capacity to meet your application's
demand. As with all Amazon Web Services, there are no up-front
investments, and you pay only for the resources you use.
This is the Amazon RDS API Reference . It contains a comprehensive
description of all Amazon RDS Query APIs and data types. Note that
this API is asynchronous and some actions may require polling to
determine when an action has been applied. See the parameter
description to determine if a change is applied immediately or on
the next instance reboot or during the maintenance window. For
more information on Amazon RDS concepts and usage scenarios, go to
the `Amazon RDS User Guide`_.
"""
APIVersion = "2013-09-09"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "rds.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"InvalidSubnet": exceptions.InvalidSubnet,
"DBParameterGroupQuotaExceeded": exceptions.DBParameterGroupQuotaExceeded,
"DBSubnetGroupAlreadyExists": exceptions.DBSubnetGroupAlreadyExists,
"DBSubnetGroupQuotaExceeded": exceptions.DBSubnetGroupQuotaExceeded,
"InstanceQuotaExceeded": exceptions.InstanceQuotaExceeded,
"InvalidRestore": exceptions.InvalidRestore,
"InvalidDBParameterGroupState": exceptions.InvalidDBParameterGroupState,
"AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded,
"DBSecurityGroupAlreadyExists": exceptions.DBSecurityGroupAlreadyExists,
"InsufficientDBInstanceCapacity": exceptions.InsufficientDBInstanceCapacity,
"ReservedDBInstanceQuotaExceeded": exceptions.ReservedDBInstanceQuotaExceeded,
"DBSecurityGroupNotFound": exceptions.DBSecurityGroupNotFound,
"DBInstanceAlreadyExists": exceptions.DBInstanceAlreadyExists,
"ReservedDBInstanceNotFound": exceptions.ReservedDBInstanceNotFound,
"DBSubnetGroupDoesNotCoverEnoughAZs": exceptions.DBSubnetGroupDoesNotCoverEnoughAZs,
"InvalidDBSecurityGroupState": exceptions.InvalidDBSecurityGroupState,
"InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState,
"ReservedDBInstancesOfferingNotFound": exceptions.ReservedDBInstancesOfferingNotFound,
"SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound,
"SNSNoAuthorization": exceptions.SNSNoAuthorization,
"SnapshotQuotaExceeded": exceptions.SnapshotQuotaExceeded,
"OptionGroupQuotaExceeded": exceptions.OptionGroupQuotaExceeded,
"DBParameterGroupNotFound": exceptions.DBParameterGroupNotFound,
"SNSInvalidTopic": exceptions.SNSInvalidTopic,
"InvalidDBSubnetGroupState": exceptions.InvalidDBSubnetGroupState,
"DBSubnetGroupNotFound": exceptions.DBSubnetGroupNotFound,
"InvalidOptionGroupState": exceptions.InvalidOptionGroupState,
"SourceNotFound": exceptions.SourceNotFound,
"SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound,
"EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded,
"DBSecurityGroupNotSupported": exceptions.DBSecurityGroupNotSupported,
"InvalidEventSubscriptionState": exceptions.InvalidEventSubscriptionState,
"InvalidDBSubnetState": exceptions.InvalidDBSubnetState,
"InvalidDBSnapshotState": exceptions.InvalidDBSnapshotState,
"SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist,
"DBSecurityGroupQuotaExceeded": exceptions.DBSecurityGroupQuotaExceeded,
"ProvisionedIopsNotAvailableInAZ": exceptions.ProvisionedIopsNotAvailableInAZ,
"AuthorizationNotFound": exceptions.AuthorizationNotFound,
"OptionGroupAlreadyExists": exceptions.OptionGroupAlreadyExists,
"SubscriptionNotFound": exceptions.SubscriptionNotFound,
"DBUpgradeDependencyFailure": exceptions.DBUpgradeDependencyFailure,
"PointInTimeRestoreNotEnabled": exceptions.PointInTimeRestoreNotEnabled,
"AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists,
"DBSubnetQuotaExceeded": exceptions.DBSubnetQuotaExceeded,
"OptionGroupNotFound": exceptions.OptionGroupNotFound,
"DBParameterGroupAlreadyExists": exceptions.DBParameterGroupAlreadyExists,
"DBInstanceNotFound": exceptions.DBInstanceNotFound,
"ReservedDBInstanceAlreadyExists": exceptions.ReservedDBInstanceAlreadyExists,
"InvalidDBInstanceState": exceptions.InvalidDBInstanceState,
"DBSnapshotNotFound": exceptions.DBSnapshotNotFound,
"DBSnapshotAlreadyExists": exceptions.DBSnapshotAlreadyExists,
"StorageQuotaExceeded": exceptions.StorageQuotaExceeded,
"SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(RDSConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_source_identifier_to_subscription(self, subscription_name,
source_identifier):
"""
Adds a source identifier to an existing RDS event notification
subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to add a source identifier to.
:type source_identifier: string
:param source_identifier:
The identifier of the event source to be added. An identifier must
begin with a letter and must contain only ASCII letters, digits,
and hyphens; it cannot end with a hyphen or contain two consecutive
hyphens.
Constraints:
+ If the source type is a DB instance, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is a DB security group, a `DBSecurityGroupName`
must be supplied.
+ If the source type is a DB parameter group, a `DBParameterGroupName`
must be supplied.
+ If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be
supplied.
"""
params = {
'SubscriptionName': subscription_name,
'SourceIdentifier': source_identifier,
}
return self._make_request(
action='AddSourceIdentifierToSubscription',
verb='POST',
path='/', params=params)
def add_tags_to_resource(self, resource_name, tags):
"""
Adds metadata tags to an Amazon RDS resource. These tags can
also be used with cost allocation reporting to track cost
associated with Amazon RDS resources, or used in Condition
statement in IAM policy for Amazon RDS.
For an overview on tagging Amazon RDS resources, see `Tagging
Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource the tags will be added
to. This value is an Amazon Resource Name (ARN). For information
about creating an ARN, see ` Constructing an RDS Amazon Resource
Name (ARN)`_.
:type tags: list
:param tags: The tags to be assigned to the Amazon RDS resource.
Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {'ResourceName': resource_name, }
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='AddTagsToResource',
verb='POST',
path='/', params=params)
def authorize_db_security_group_ingress(self, db_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_id=None,
ec2_security_group_owner_id=None):
"""
Enables ingress to a DBSecurityGroup using one of two forms of
authorization. First, EC2 or VPC security groups can be added
to the DBSecurityGroup if the application using the database
is running on EC2 or VPC instances. Second, IP ranges are
available if the application accessing your database is
running on the Internet. Required parameters for this API are
one of CIDR range, EC2SecurityGroupId for VPC, or
(EC2SecurityGroupOwnerId and either EC2SecurityGroupName or
EC2SecurityGroupId for non-VPC).
You cannot authorize ingress from an EC2 security group in one
Region to an Amazon RDS DB instance in another. You cannot
authorize ingress from a VPC security group in one VPC to an
Amazon RDS DB instance in another.
For an overview of CIDR ranges, go to the `Wikipedia
Tutorial`_.
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to add
authorization to.
:type cidrip: string
:param cidrip: The IP range to authorize.
:type ec2_security_group_name: string
:param ec2_security_group_name: Name of the EC2 security group to
authorize. For VPC DB security groups, `EC2SecurityGroupId` must be
provided. Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
:type ec2_security_group_id: string
:param ec2_security_group_id: Id of the EC2 security group to
authorize. For VPC DB security groups, `EC2SecurityGroupId` must be
provided. Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: AWS Account Number of the owner of
the EC2 security group specified in the EC2SecurityGroupName
parameter. The AWS Access Key ID is not an acceptable value. For
VPC DB security groups, `EC2SecurityGroupId` must be provided.
Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
"""
params = {'DBSecurityGroupName': db_security_group_name, }
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_id is not None:
params['EC2SecurityGroupId'] = ec2_security_group_id
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='AuthorizeDBSecurityGroupIngress',
verb='POST',
path='/', params=params)
def copy_db_snapshot(self, source_db_snapshot_identifier,
target_db_snapshot_identifier, tags=None):
"""
Copies the specified DBSnapshot. The source DBSnapshot must be
in the "available" state.
:type source_db_snapshot_identifier: string
:param source_db_snapshot_identifier: The identifier for the source DB
snapshot.
Constraints:
+ Must be the identifier for a valid system snapshot in the "available"
state.
Example: `rds:mydb-2012-04-02-00-01`
:type target_db_snapshot_identifier: string
:param target_db_snapshot_identifier: The identifier for the copied
snapshot.
Constraints:
+ Cannot be null, empty, or blank
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-db-snapshot`
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'SourceDBSnapshotIdentifier': source_db_snapshot_identifier,
'TargetDBSnapshotIdentifier': target_db_snapshot_identifier,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CopyDBSnapshot',
verb='POST',
path='/', params=params)
def create_db_instance(self, db_instance_identifier, allocated_storage,
db_instance_class, engine, master_username,
master_user_password, db_name=None,
db_security_groups=None,
vpc_security_group_ids=None,
availability_zone=None, db_subnet_group_name=None,
preferred_maintenance_window=None,
db_parameter_group_name=None,
backup_retention_period=None,
preferred_backup_window=None, port=None,
multi_az=None, engine_version=None,
auto_minor_version_upgrade=None,
license_model=None, iops=None,
option_group_name=None, character_set_name=None,
publicly_accessible=None, tags=None):
"""
Creates a new DB instance.
:type db_name: string
:param db_name: The meaning of this parameter differs according to the
database engine you use.
**MySQL**
The name of the database to create when the DB instance is created. If
this parameter is not specified, no database is created in the DB
instance.
Constraints:
+ Must contain 1 to 64 alphanumeric characters
+ Cannot be a word reserved by the specified database engine
Type: String
**Oracle**
The Oracle System ID (SID) of the created DB instance.
Default: `ORCL`
Constraints:
+ Cannot be longer than 8 characters
**SQL Server**
Not applicable. Must be null.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier. This
parameter is stored as a lowercase string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15
for SQL Server).
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
Example: `mydbinstance`
:type allocated_storage: integer
:param allocated_storage: The amount of storage (in gigabytes) to be
initially allocated for the database instance.
**MySQL**
Constraints: Must be an integer from 5 to 1024.
Type: Integer
**Oracle**
Constraints: Must be an integer from 10 to 1024.
**SQL Server**
Constraints: Must be an integer from 200 to 1024 (Standard Edition and
Enterprise Edition) or from 30 to 1024 (Express Edition and Web
Edition)
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the DB
instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge`
:type engine: string
:param engine: The name of the database engine to be used for this
instance.
Valid Values: `MySQL` | `oracle-se1` | `oracle-se` | `oracle-ee` |
`sqlserver-ee` | `sqlserver-se` | `sqlserver-ex` | `sqlserver-web`
:type master_username: string
:param master_username:
The name of master user for the client DB instance.
**MySQL**
Constraints:
+ Must be 1 to 16 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
Type: String
**Oracle**
Constraints:
+ Must be 1 to 30 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
**SQL Server**
Constraints:
+ Must be 1 to 128 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
:type master_user_password: string
:param master_user_password: The password for the master database user.
Can be any printable ASCII character except "/", '"', or "@".
Type: String
**MySQL**
Constraints: Must contain from 8 to 41 characters.
**Oracle**
Constraints: Must contain from 8 to 30 characters.
**SQL Server**
Constraints: Must contain from 8 to 128 characters.
:type db_security_groups: list
:param db_security_groups: A list of DB security groups to associate
with this DB instance.
Default: The default DB security group for the database engine.
:type vpc_security_group_ids: list
:param vpc_security_group_ids: A list of EC2 VPC security groups to
associate with this DB instance.
Default: The default EC2 VPC security group for the DB subnet group's
VPC.
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone in the endpoint's
region.
Example: `us-east-1d`
Constraint: The AvailabilityZone parameter cannot be specified if the
MultiAZ parameter is set to `True`. The specified Availability Zone
must be in the same region as the current endpoint.
:type db_subnet_group_name: string
:param db_subnet_group_name: A DB subnet group to associate with this
DB instance.
If there is no DB subnet group, then it is a non-VPC DB instance.
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur.
Format: `ddd:hh24:mi-ddd:hh24:mi`
Default: A 30-minute window selected at random from an 8-hour block of
time per region, occurring on a random day of the week. To see the
time blocks available, see ` Adjusting the Preferred Maintenance
Window`_ in the Amazon RDS User Guide.
Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun
Constraints: Minimum 30-minute window.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group to associate with this DB instance.
If this argument is omitted, the default DBParameterGroup for the
specified engine will be used.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type backup_retention_period: integer
:param backup_retention_period:
The number of days for which automated backups are retained. Setting
this parameter to a positive number enables backups. Setting this
parameter to 0 disables automated backups.
Default: 1
Constraints:
+ Must be a value from 0 to 8
+ Cannot be set to 0 if the DB instance is a master instance with read
replicas
:type preferred_backup_window: string
:param preferred_backup_window: The daily time range during which
automated backups are created if automated backups are enabled,
using the `BackupRetentionPeriod` parameter.
Default: A 30-minute window selected at random from an 8-hour block of
time per region. See the Amazon RDS User Guide for the time blocks
for each region from which the default backup windows are assigned.
Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be
Universal Time Coordinated (UTC). Must not conflict with the
preferred maintenance window. Must be at least 30 minutes.
:type port: integer
:param port: The port number on which the database accepts connections.
**MySQL**
Default: `3306`
Valid Values: `1150-65535`
Type: Integer
**Oracle**
Default: `1521`
Valid Values: `1150-65535`
**SQL Server**
Default: `1433`
Valid Values: `1150-65535` except for `1434` and `3389`.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
You cannot set the AvailabilityZone parameter if the MultiAZ
parameter is set to true.
:type engine_version: string
:param engine_version: The version number of the database engine to
use.
**MySQL**
Example: `5.1.42`
Type: String
**Oracle**
Example: `11.2.0.2.v2`
Type: String
**SQL Server**
Example: `10.50.2789.0.v1`
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor engine upgrades
will be applied automatically to the DB instance during the
maintenance window.
Default: `True`
:type license_model: string
:param license_model: License model information for this DB instance.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: Indicates that the DB instance should be
associated with the specified option group.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type character_set_name: string
:param character_set_name: For supported engines, indicates that the DB
instance should be associated with the specified CharacterSet.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'AllocatedStorage': allocated_storage,
'DBInstanceClass': db_instance_class,
'Engine': engine,
'MasterUsername': master_username,
'MasterUserPassword': master_user_password,
}
if db_name is not None:
params['DBName'] = db_name
if db_security_groups is not None:
self.build_list_params(params,
db_security_groups,
'DBSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
if port is not None:
params['Port'] = port
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if character_set_name is not None:
params['CharacterSetName'] = character_set_name
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBInstance',
verb='POST',
path='/', params=params)
def create_db_instance_read_replica(self, db_instance_identifier,
source_db_instance_identifier,
db_instance_class=None,
availability_zone=None, port=None,
auto_minor_version_upgrade=None,
iops=None, option_group_name=None,
publicly_accessible=None, tags=None):
"""
Creates a DB instance that acts as a read replica of a source
DB instance.
All read replica DB instances are created as Single-AZ
deployments with backups disabled. All other DB instance
attributes (including DB security groups and DB parameter
groups) are inherited from the source DB instance, except as
specified below.
The source DB instance must have backup retention enabled.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier of the read
replica. This is the unique key that identifies a DB instance. This
parameter is stored as a lowercase string.
:type source_db_instance_identifier: string
:param source_db_instance_identifier: The identifier of the DB instance
that will act as the source for the read replica. Each DB instance
can have up to five read replicas.
Constraints: Must be the identifier of an existing DB instance that is
not already a read replica DB instance.
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the read
replica.
Valid Values: `db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge
| db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge`
Default: Inherits from the source DB instance.
:type availability_zone: string
:param availability_zone: The Amazon EC2 Availability Zone that the
read replica will be created in.
Default: A random, system-chosen Availability Zone in the endpoint's
region.
Example: `us-east-1d`
:type port: integer
:param port: The port number that the DB instance uses for connections.
Default: Inherits from the source DB instance
Valid Values: `1150-65535`
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor engine upgrades
will be applied automatically to the read replica during the
maintenance window.
Default: Inherits from the source DB instance
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
:type option_group_name: string
:param option_group_name: The option group the DB instance will be
associated with. If omitted, the default option group for the
engine specified will be used.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'SourceDBInstanceIdentifier': source_db_instance_identifier,
}
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if port is not None:
params['Port'] = port
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBInstanceReadReplica',
verb='POST',
path='/', params=params)
def create_db_parameter_group(self, db_parameter_group_name,
db_parameter_group_family, description,
tags=None):
"""
Creates a new DB parameter group.
A DB parameter group is initially created with the default
parameters for the database engine used by the DB instance. To
provide custom values for any of the parameters, you must
modify the group after creating it using
ModifyDBParameterGroup . Once you've created a DB parameter
group, you need to associate it with your DB instance using
ModifyDBInstance . When you associate a new DB parameter group
with a running DB instance, you need to reboot the DB Instance
for the new DB parameter group and associated settings to take
effect.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
This value is stored as a lower-case string.
:type db_parameter_group_family: string
:param db_parameter_group_family: The DB parameter group family name. A
DB parameter group can be associated with one and only one DB
parameter group family, and can be applied only to a DB instance
running a database engine and engine version compatible with that
DB parameter group family.
:type description: string
:param description: The description for the DB parameter group.
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'DBParameterGroupName': db_parameter_group_name,
'DBParameterGroupFamily': db_parameter_group_family,
'Description': description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBParameterGroup',
verb='POST',
path='/', params=params)
def create_db_security_group(self, db_security_group_name,
db_security_group_description, tags=None):
"""
Creates a new DB security group. DB security groups control
access to a DB instance.
:type db_security_group_name: string
:param db_security_group_name: The name for the DB security group. This
value is stored as a lowercase string.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ Must not be "Default"
+ May not contain spaces
Example: `mysecuritygroup`
:type db_security_group_description: string
:param db_security_group_description: The description for the DB
security group.
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'DBSecurityGroupName': db_security_group_name,
'DBSecurityGroupDescription': db_security_group_description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSecurityGroup',
verb='POST',
path='/', params=params)
def create_db_snapshot(self, db_snapshot_identifier,
db_instance_identifier, tags=None):
"""
Creates a DBSnapshot. The source DBInstance must be in
"available" state.
:type db_snapshot_identifier: string
:param db_snapshot_identifier: The identifier for the DB snapshot.
Constraints:
+ Cannot be null, empty, or blank
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-snapshot-id`
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This is the unique key that identifies a DB
instance. This parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'DBSnapshotIdentifier': db_snapshot_identifier,
'DBInstanceIdentifier': db_instance_identifier,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSnapshot',
verb='POST',
path='/', params=params)
def create_db_subnet_group(self, db_subnet_group_name,
db_subnet_group_description, subnet_ids,
tags=None):
"""
Creates a new DB subnet group. DB subnet groups must contain
at least one subnet in at least two AZs in the region.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name for the DB subnet group. This
value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens. Must not be "Default".
Example: `mySubnetgroup`
:type db_subnet_group_description: string
:param db_subnet_group_description: The description for the DB subnet
group.
:type subnet_ids: list
:param subnet_ids: The EC2 Subnet IDs for the DB subnet group.
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'DBSubnetGroupName': db_subnet_group_name,
'DBSubnetGroupDescription': db_subnet_group_description,
}
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSubnetGroup',
verb='POST',
path='/', params=params)
def create_event_subscription(self, subscription_name, sns_topic_arn,
source_type=None, event_categories=None,
source_ids=None, enabled=None, tags=None):
"""
Creates an RDS event notification subscription. This action
requires a topic ARN (Amazon Resource Name) created by either
the RDS console, the SNS console, or the SNS API. To obtain an
ARN with SNS, you must create a topic in Amazon SNS and
subscribe to the topic. The ARN is displayed in the SNS
console.
You can specify the type of source (SourceType) you want to be
notified of, provide a list of RDS sources (SourceIds) that
triggers the events, and provide a list of event categories
(EventCategories) for events you want to be notified of. For
example, you can specify SourceType = db-instance, SourceIds =
mydbinstance1, mydbinstance2 and EventCategories =
Availability, Backup.
If you specify both the SourceType and SourceIds, such as
SourceType = db-instance and SourceIdentifier = myDBInstance1,
you will be notified of all the db-instance events for the
specified source. If you specify a SourceType but do not
specify a SourceIdentifier, you will receive notice of the
events for that source type for all your RDS sources. If you
do not specify either the SourceType nor the SourceIdentifier,
you will be notified of events generated from all RDS sources
belonging to your customer account.
:type subscription_name: string
:param subscription_name: The name of the subscription.
Constraints: The name must be less than 255 characters.
:type sns_topic_arn: string
:param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
created for event notification. The ARN is created by Amazon SNS
when you create a topic and subscribe to it.
:type source_type: string
:param source_type: The type of source that will be generating the
events. For example, if you want to be notified of events generated
by a DB instance, you would set this parameter to db-instance. if
this value is not specified, all events are returned.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
:type event_categories: list
:param event_categories: A list of event categories for a SourceType
that you want to subscribe to. You can see a list of the categories
for a given SourceType in the `Events`_ topic in the Amazon RDS
User Guide or by using the **DescribeEventCategories** action.
:type source_ids: list
:param source_ids:
The list of identifiers of the event sources for which events will be
returned. If not specified, then all sources are included in the
response. An identifier must begin with a letter and must contain
only ASCII letters, digits, and hyphens; it cannot end with a
hyphen or contain two consecutive hyphens.
Constraints:
+ If SourceIds are supplied, SourceType must also be provided.
+ If the source type is a DB instance, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is a DB security group, a `DBSecurityGroupName`
must be supplied.
+ If the source type is a DB parameter group, a `DBParameterGroupName`
must be supplied.
+ If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be
supplied.
:type enabled: boolean
:param enabled: A Boolean value; set to **true** to activate the
subscription, set to **false** to create the subscription but not
active it.
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'SubscriptionName': subscription_name,
'SnsTopicArn': sns_topic_arn,
}
if source_type is not None:
params['SourceType'] = source_type
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if source_ids is not None:
self.build_list_params(params,
source_ids,
'SourceIds.member')
if enabled is not None:
params['Enabled'] = str(
enabled).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateEventSubscription',
verb='POST',
path='/', params=params)
def create_option_group(self, option_group_name, engine_name,
major_engine_version, option_group_description,
tags=None):
"""
Creates a new option group. You can create up to 20 option
groups.
:type option_group_name: string
:param option_group_name: Specifies the name of the option group to be
created.
Constraints:
+ Must be 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `myoptiongroup`
:type engine_name: string
:param engine_name: Specifies the name of the engine that this option
group should be associated with.
:type major_engine_version: string
:param major_engine_version: Specifies the major version of the engine
that this option group should be associated with.
:type option_group_description: string
:param option_group_description: The description of the option group.
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'OptionGroupName': option_group_name,
'EngineName': engine_name,
'MajorEngineVersion': major_engine_version,
'OptionGroupDescription': option_group_description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateOptionGroup',
verb='POST',
path='/', params=params)
def delete_db_instance(self, db_instance_identifier,
skip_final_snapshot=None,
final_db_snapshot_identifier=None):
"""
The DeleteDBInstance action deletes a previously provisioned
DB instance. A successful response from the web service
indicates the request was received correctly. When you delete
a DB instance, all automated backups for that instance are
deleted and cannot be recovered. Manual DB snapshots of the DB
instance to be deleted are not deleted.
If a final DB snapshot is requested the status of the RDS
instance will be "deleting" until the DB snapshot is created.
The API action `DescribeDBInstance` is used to monitor the
status of this operation. The action cannot be canceled or
reverted once submitted.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier for the DB instance to be deleted. This
parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type skip_final_snapshot: boolean
:param skip_final_snapshot: Determines whether a final DB snapshot is
created before the DB instance is deleted. If `True` is specified,
no DBSnapshot is created. If false is specified, a DB snapshot is
created before the DB instance is deleted.
The FinalDBSnapshotIdentifier parameter must be specified if
SkipFinalSnapshot is `False`.
Default: `False`
:type final_db_snapshot_identifier: string
:param final_db_snapshot_identifier:
The DBSnapshotIdentifier of the new DBSnapshot created when
SkipFinalSnapshot is set to `False`.
Specifying this parameter and also setting the SkipFinalShapshot
parameter to true results in an error.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if skip_final_snapshot is not None:
params['SkipFinalSnapshot'] = str(
skip_final_snapshot).lower()
if final_db_snapshot_identifier is not None:
params['FinalDBSnapshotIdentifier'] = final_db_snapshot_identifier
return self._make_request(
action='DeleteDBInstance',
verb='POST',
path='/', params=params)
def delete_db_parameter_group(self, db_parameter_group_name):
"""
Deletes a specified DBParameterGroup. The DBParameterGroup
cannot be associated with any RDS instances to be deleted.
The specified DB parameter group cannot be associated with any
DB instances.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be the name of an existing DB parameter group
+ You cannot delete a default DB parameter group
+ Cannot be associated with any DB instances
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
return self._make_request(
action='DeleteDBParameterGroup',
verb='POST',
path='/', params=params)
def delete_db_security_group(self, db_security_group_name):
"""
Deletes a DB security group.
The specified DB security group must not be associated with
any DB instances.
:type db_security_group_name: string
:param db_security_group_name:
The name of the DB security group to delete.
You cannot delete the default DB security group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ Must not be "Default"
+ May not contain spaces
"""
params = {'DBSecurityGroupName': db_security_group_name, }
return self._make_request(
action='DeleteDBSecurityGroup',
verb='POST',
path='/', params=params)
def delete_db_snapshot(self, db_snapshot_identifier):
"""
Deletes a DBSnapshot.
The DBSnapshot must be in the `available` state to be deleted.
:type db_snapshot_identifier: string
:param db_snapshot_identifier: The DBSnapshot identifier.
Constraints: Must be the name of an existing DB snapshot in the
`available` state.
"""
params = {'DBSnapshotIdentifier': db_snapshot_identifier, }
return self._make_request(
action='DeleteDBSnapshot',
verb='POST',
path='/', params=params)
def delete_db_subnet_group(self, db_subnet_group_name):
"""
Deletes a DB subnet group.
The specified database subnet group must not be associated
with any DB instances.
:type db_subnet_group_name: string
:param db_subnet_group_name:
The name of the database subnet group to delete.
You cannot delete the default subnet group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBSubnetGroupName': db_subnet_group_name, }
return self._make_request(
action='DeleteDBSubnetGroup',
verb='POST',
path='/', params=params)
def delete_event_subscription(self, subscription_name):
"""
Deletes an RDS event notification subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to delete.
"""
params = {'SubscriptionName': subscription_name, }
return self._make_request(
action='DeleteEventSubscription',
verb='POST',
path='/', params=params)
def delete_option_group(self, option_group_name):
"""
Deletes an existing option group.
:type option_group_name: string
:param option_group_name:
The name of the option group to be deleted.
You cannot delete default option groups.
"""
params = {'OptionGroupName': option_group_name, }
return self._make_request(
action='DeleteOptionGroup',
verb='POST',
path='/', params=params)
def describe_db_engine_versions(self, engine=None, engine_version=None,
db_parameter_group_family=None,
max_records=None, marker=None,
default_only=None,
list_supported_character_sets=None):
"""
Returns a list of the available DB engines.
:type engine: string
:param engine: The database engine to return.
:type engine_version: string
:param engine_version: The database engine version to return.
Example: `5.1.49`
:type db_parameter_group_family: string
:param db_parameter_group_family:
The name of a specific DB parameter group family to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
:type default_only: boolean
:param default_only: Indicates that only the default version of the
specified engine or engine and major version combination is
returned.
:type list_supported_character_sets: boolean
:param list_supported_character_sets: If this parameter is specified,
and if the requested engine supports the CharacterSetName parameter
for CreateDBInstance, the response includes a list of supported
character sets for each engine version.
"""
params = {}
if engine is not None:
params['Engine'] = engine
if engine_version is not None:
params['EngineVersion'] = engine_version
if db_parameter_group_family is not None:
params['DBParameterGroupFamily'] = db_parameter_group_family
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
if default_only is not None:
params['DefaultOnly'] = str(
default_only).lower()
if list_supported_character_sets is not None:
params['ListSupportedCharacterSets'] = str(
list_supported_character_sets).lower()
return self._make_request(
action='DescribeDBEngineVersions',
verb='POST',
path='/', params=params)
def describe_db_instances(self, db_instance_identifier=None,
filters=None, max_records=None, marker=None):
"""
Returns information about provisioned RDS instances. This API
supports pagination.
:type db_instance_identifier: string
:param db_instance_identifier:
The user-supplied instance identifier. If this parameter is specified,
information from only the specific DB instance is returned. This
parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBInstances request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords` .
"""
params = {}
if db_instance_identifier is not None:
params['DBInstanceIdentifier'] = db_instance_identifier
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBInstances',
verb='POST',
path='/', params=params)
def describe_db_log_files(self, db_instance_identifier,
filename_contains=None, file_last_written=None,
file_size=None, max_records=None, marker=None):
"""
Returns a list of DB log files for the DB instance.
:type db_instance_identifier: string
:param db_instance_identifier:
The customer-assigned name of the DB instance that contains the log
files you want to list.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filename_contains: string
:param filename_contains: Filters the available log files for log file
names that contain the specified string.
:type file_last_written: long
:param file_last_written: Filters the available log files for files
written since the specified date, in POSIX timestamp format.
:type file_size: long
:param file_size: Filters the available log files for files larger than
the specified size.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
:type marker: string
:param marker: The pagination token provided in the previous request.
If this parameter is specified the response includes only records
beyond the marker, up to MaxRecords.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if filename_contains is not None:
params['FilenameContains'] = filename_contains
if file_last_written is not None:
params['FileLastWritten'] = file_last_written
if file_size is not None:
params['FileSize'] = file_size
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBLogFiles',
verb='POST',
path='/', params=params)
def describe_db_parameter_groups(self, db_parameter_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of `DBParameterGroup` descriptions. If a
`DBParameterGroupName` is specified, the list will contain
only the description of the specified DB parameter group.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of a specific DB parameter group to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBParameterGroups` request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords`.
"""
params = {}
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBParameterGroups',
verb='POST',
path='/', params=params)
def describe_db_parameters(self, db_parameter_group_name, source=None,
max_records=None, marker=None):
"""
Returns the detailed parameter list for a particular DB
parameter group.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of a specific DB parameter group to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type source: string
:param source: The parameter types to return.
Default: All parameter types returned
Valid Values: `user | system | engine-default`
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBParameters` request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
if source is not None:
params['Source'] = source
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBParameters',
verb='POST',
path='/', params=params)
def describe_db_security_groups(self, db_security_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of `DBSecurityGroup` descriptions. If a
`DBSecurityGroupName` is specified, the list will contain only
the descriptions of the specified DB security group.
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to
return details for.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBSecurityGroups request. If this parameter is specified,
the response includes only records beyond the marker, up to the
value specified by `MaxRecords`.
"""
params = {}
if db_security_group_name is not None:
params['DBSecurityGroupName'] = db_security_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSecurityGroups',
verb='POST',
path='/', params=params)
def describe_db_snapshots(self, db_instance_identifier=None,
db_snapshot_identifier=None,
snapshot_type=None, filters=None,
max_records=None, marker=None):
"""
Returns information about DB snapshots. This API supports
pagination.
:type db_instance_identifier: string
:param db_instance_identifier:
A DB instance identifier to retrieve the list of DB snapshots for.
Cannot be used in conjunction with `DBSnapshotIdentifier`. This
parameter is not case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type db_snapshot_identifier: string
:param db_snapshot_identifier:
A specific DB snapshot identifier to describe. Cannot be used in
conjunction with `DBInstanceIdentifier`. This value is stored as a
lowercase string.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ If this is the identifier of an automated snapshot, the
`SnapshotType` parameter must also be specified.
:type snapshot_type: string
:param snapshot_type: The type of snapshots that will be returned.
Values can be "automated" or "manual." If not specified, the
returned results will include all snapshots types.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBSnapshots` request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if db_instance_identifier is not None:
params['DBInstanceIdentifier'] = db_instance_identifier
if db_snapshot_identifier is not None:
params['DBSnapshotIdentifier'] = db_snapshot_identifier
if snapshot_type is not None:
params['SnapshotType'] = snapshot_type
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSnapshots',
verb='POST',
path='/', params=params)
def describe_db_subnet_groups(self, db_subnet_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of DBSubnetGroup descriptions. If a
DBSubnetGroupName is specified, the list will contain only the
descriptions of the specified DBSubnetGroup.
For an overview of CIDR ranges, go to the `Wikipedia
Tutorial`_.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name of the DB subnet group to return
details for.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBSubnetGroups request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSubnetGroups',
verb='POST',
path='/', params=params)
def describe_engine_default_parameters(self, db_parameter_group_family,
max_records=None, marker=None):
"""
Returns the default engine and system parameter information
for the specified database engine.
:type db_parameter_group_family: string
:param db_parameter_group_family: The name of the DB parameter group
family.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeEngineDefaultParameters` request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords`.
"""
params = {
'DBParameterGroupFamily': db_parameter_group_family,
}
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEngineDefaultParameters',
verb='POST',
path='/', params=params)
def describe_event_categories(self, source_type=None):
"""
Displays a list of categories for all event source types, or,
if specified, for a specified source type. You can see a list
of the event categories and source types in the ` Events`_
topic in the Amazon RDS User Guide.
:type source_type: string
:param source_type: The type of source that will be generating the
events.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
"""
params = {}
if source_type is not None:
params['SourceType'] = source_type
return self._make_request(
action='DescribeEventCategories',
verb='POST',
path='/', params=params)
def describe_event_subscriptions(self, subscription_name=None,
filters=None, max_records=None,
marker=None):
"""
Lists all the subscription descriptions for a customer
account. The description for a subscription includes
SubscriptionName, SNSTopicARN, CustomerID, SourceType,
SourceID, CreationTime, and Status.
If you specify a SubscriptionName, lists the description for
that subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to describe.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOrderableDBInstanceOptions request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords` .
"""
params = {}
if subscription_name is not None:
params['SubscriptionName'] = subscription_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEventSubscriptions',
verb='POST',
path='/', params=params)
def describe_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None, duration=None,
event_categories=None, max_records=None, marker=None):
"""
Returns events related to DB instances, DB security groups, DB
snapshots, and DB parameter groups for the past 14 days.
Events specific to a particular DB instance, DB security
group, database snapshot, or DB parameter group can be
obtained by providing the name as a parameter. By default, the
past hour of events are returned.
:type source_identifier: string
:param source_identifier:
The identifier of the event source for which events will be returned.
If not specified, then all sources are included in the response.
Constraints:
+ If SourceIdentifier is supplied, SourceType must also be provided.
+ If the source type is `DBInstance`, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is `DBSecurityGroup`, a `DBSecurityGroupName` must
be supplied.
+ If the source type is `DBParameterGroup`, a `DBParameterGroupName`
must be supplied.
+ If the source type is `DBSnapshot`, a `DBSnapshotIdentifier` must be
supplied.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type source_type: string
:param source_type: The event source to retrieve events for. If no
value is specified, all events are returned.
:type start_time: timestamp
:param start_time: The beginning of the time interval to retrieve
events for, specified in ISO 8601 format. For more information
about ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: 2009-07-08T18:00Z
:type end_time: timestamp
:param end_time: The end of the time interval for which to retrieve
events, specified in ISO 8601 format. For more information about
ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: 2009-07-08T18:00Z
:type duration: integer
:param duration: The number of minutes to retrieve events for.
Default: 60
:type event_categories: list
:param event_categories: A list of event categories that trigger
notifications for a event notification subscription.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeEvents request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if source_identifier is not None:
params['SourceIdentifier'] = source_identifier
if source_type is not None:
params['SourceType'] = source_type
if start_time is not None:
params['StartTime'] = start_time
if end_time is not None:
params['EndTime'] = end_time
if duration is not None:
params['Duration'] = duration
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEvents',
verb='POST',
path='/', params=params)
def describe_option_group_options(self, engine_name,
major_engine_version=None,
max_records=None, marker=None):
"""
Describes all available options.
:type engine_name: string
:param engine_name: A required parameter. Options available for the
given Engine name will be described.
:type major_engine_version: string
:param major_engine_version: If specified, filters the results to
include only options for the specified major engine version.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {'EngineName': engine_name, }
if major_engine_version is not None:
params['MajorEngineVersion'] = major_engine_version
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeOptionGroupOptions',
verb='POST',
path='/', params=params)
def describe_option_groups(self, option_group_name=None, filters=None,
marker=None, max_records=None,
engine_name=None, major_engine_version=None):
"""
Describes the available option groups.
:type option_group_name: string
:param option_group_name: The name of the option group to describe.
Cannot be supplied together with EngineName or MajorEngineVersion.
:type filters: list
:param filters:
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOptionGroups request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type engine_name: string
:param engine_name: Filters the list of option groups to only include
groups associated with a specific database engine.
:type major_engine_version: string
:param major_engine_version: Filters the list of option groups to only
include groups associated with a specific database engine version.
If specified, then EngineName must also be specified.
"""
params = {}
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if marker is not None:
params['Marker'] = marker
if max_records is not None:
params['MaxRecords'] = max_records
if engine_name is not None:
params['EngineName'] = engine_name
if major_engine_version is not None:
params['MajorEngineVersion'] = major_engine_version
return self._make_request(
action='DescribeOptionGroups',
verb='POST',
path='/', params=params)
def describe_orderable_db_instance_options(self, engine,
engine_version=None,
db_instance_class=None,
license_model=None, vpc=None,
max_records=None, marker=None):
"""
Returns a list of orderable DB instance options for the
specified engine.
:type engine: string
:param engine: The name of the engine to retrieve DB instance options
for.
:type engine_version: string
:param engine_version: The engine version filter value. Specify this
parameter to show only the available offerings matching the
specified engine version.
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only the available offerings matching the
specified DB instance class.
:type license_model: string
:param license_model: The license model filter value. Specify this
parameter to show only the available offerings matching the
specified license model.
:type vpc: boolean
:param vpc: The VPC filter value. Specify this parameter to show only
the available VPC or non-VPC offerings.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOrderableDBInstanceOptions request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords` .
"""
params = {'Engine': engine, }
if engine_version is not None:
params['EngineVersion'] = engine_version
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if license_model is not None:
params['LicenseModel'] = license_model
if vpc is not None:
params['Vpc'] = str(
vpc).lower()
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeOrderableDBInstanceOptions',
verb='POST',
path='/', params=params)
def describe_reserved_db_instances(self, reserved_db_instance_id=None,
reserved_db_instances_offering_id=None,
db_instance_class=None, duration=None,
product_description=None,
offering_type=None, multi_az=None,
filters=None, max_records=None,
marker=None):
"""
Returns information about reserved DB instances for this
account, or about a specified reserved DB instance.
:type reserved_db_instance_id: string
:param reserved_db_instance_id: The reserved DB instance identifier
filter value. Specify this parameter to show only the reservation
that matches the specified reservation ID.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The offering identifier
filter value. Specify this parameter to show only purchased
reservations matching the specified offering identifier.
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only those reservations matching the
specified DB instances class.
:type duration: string
:param duration: The duration filter value, specified in years or
seconds. Specify this parameter to show only reservations for this
duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: The product description filter value.
Specify this parameter to show only those reservations matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Specify this
parameter to show only the available offerings matching the
specified offering type.
Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type multi_az: boolean
:param multi_az: The Multi-AZ filter value. Specify this parameter to
show only those reservations matching the specified Multi-AZ
parameter.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {}
if reserved_db_instance_id is not None:
params['ReservedDBInstanceId'] = reserved_db_instance_id
if reserved_db_instances_offering_id is not None:
params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedDBInstances',
verb='POST',
path='/', params=params)
def describe_reserved_db_instances_offerings(self,
reserved_db_instances_offering_id=None,
db_instance_class=None,
duration=None,
product_description=None,
offering_type=None,
multi_az=None,
max_records=None,
marker=None):
"""
Lists available reserved DB instance offerings.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The offering identifier
filter value. Specify this parameter to show only the available
offering that matches the specified reservation identifier.
Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706`
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only the available offerings matching the
specified DB instance class.
:type duration: string
:param duration: Duration filter value, specified in years or seconds.
Specify this parameter to show only reservations for this duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: Product description filter value. Specify
this parameter to show only the available offerings matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Specify this
parameter to show only the available offerings matching the
specified offering type.
Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type multi_az: boolean
:param multi_az: The Multi-AZ filter value. Specify this parameter to
show only the available offerings matching the specified Multi-AZ
parameter.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {}
if reserved_db_instances_offering_id is not None:
params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedDBInstancesOfferings',
verb='POST',
path='/', params=params)
def download_db_log_file_portion(self, db_instance_identifier,
log_file_name, marker=None,
number_of_lines=None):
"""
Downloads the last line of the specified log file.
:type db_instance_identifier: string
:param db_instance_identifier:
The customer-assigned name of the DB instance that contains the log
files you want to list.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type log_file_name: string
:param log_file_name: The name of the log file to be downloaded.
:type marker: string
:param marker: The pagination token provided in the previous request.
If this parameter is specified the response includes only records
beyond the marker, up to MaxRecords.
:type number_of_lines: integer
:param number_of_lines: The number of lines remaining to be downloaded.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'LogFileName': log_file_name,
}
if marker is not None:
params['Marker'] = marker
if number_of_lines is not None:
params['NumberOfLines'] = number_of_lines
return self._make_request(
action='DownloadDBLogFilePortion',
verb='POST',
path='/', params=params)
def list_tags_for_resource(self, resource_name):
"""
Lists all tags on an Amazon RDS resource.
For an overview on tagging an Amazon RDS resource, see
`Tagging Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource with tags to be listed.
This value is an Amazon Resource Name (ARN). For information about
creating an ARN, see ` Constructing an RDS Amazon Resource Name
(ARN)`_.
"""
params = {'ResourceName': resource_name, }
return self._make_request(
action='ListTagsForResource',
verb='POST',
path='/', params=params)
def modify_db_instance(self, db_instance_identifier,
allocated_storage=None, db_instance_class=None,
db_security_groups=None,
vpc_security_group_ids=None,
apply_immediately=None, master_user_password=None,
db_parameter_group_name=None,
backup_retention_period=None,
preferred_backup_window=None,
preferred_maintenance_window=None, multi_az=None,
engine_version=None,
allow_major_version_upgrade=None,
auto_minor_version_upgrade=None, iops=None,
option_group_name=None,
new_db_instance_identifier=None):
"""
Modify settings for a DB instance. You can change one or more
database configuration parameters by specifying these
parameters and the new values in the request.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This value is stored as a lowercase string.
Constraints:
+ Must be the identifier for an existing DB instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type allocated_storage: integer
:param allocated_storage: The new storage capacity of the RDS instance.
Changing this parameter does not result in an outage and the change
is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
**MySQL**
Default: Uses existing setting
Valid Values: 5-1024
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
Type: Integer
**Oracle**
Default: Uses existing setting
Valid Values: 10-1024
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
**SQL Server**
Cannot be modified.
If you choose to migrate your DB instance from using standard storage
to using Provisioned IOPS, or from using Provisioned IOPS to using
standard storage, the process can take time. The duration of the
migration depends on several factors such as database load, storage
size, storage type (standard or Provisioned IOPS), amount of IOPS
provisioned (if any), and the number of prior scale storage
operations. Typical migration times are under 24 hours, but the
process can take up to several days in some cases. During the
migration, the DB instance will be available for use, but may
experience performance degradation. While the migration takes
place, nightly backups for the instance will be suspended. No other
Amazon RDS operations can take place for the instance, including
modifying the instance, rebooting the instance, deleting the
instance, creating a read replica for the instance, and creating a
DB snapshot of the instance.
:type db_instance_class: string
:param db_instance_class: The new compute and memory capacity of the DB
instance. To determine the instance classes that are available for
a particular DB engine, use the DescribeOrderableDBInstanceOptions
action.
Passing a value for this parameter causes an outage during the change
and is applied during the next maintenance window, unless the
`ApplyImmediately` parameter is specified as `True` for this
request.
Default: Uses existing setting
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge`
:type db_security_groups: list
:param db_security_groups:
A list of DB security groups to authorize on this DB instance. Changing
this parameter does not result in an outage and the change is
asynchronously applied as soon as possible.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type vpc_security_group_ids: list
:param vpc_security_group_ids:
A list of EC2 VPC security groups to authorize on this DB instance.
This change is asynchronously applied as soon as possible.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type apply_immediately: boolean
:param apply_immediately: Specifies whether or not the modifications in
this request and any pending modifications are asynchronously
applied as soon as possible, regardless of the
`PreferredMaintenanceWindow` setting for the DB instance.
If this parameter is passed as `False`, changes to the DB instance are
applied on the next call to RebootDBInstance, the next maintenance
reboot, or the next failure reboot, whichever occurs first. See
each parameter to determine when a change is applied.
Default: `False`
:type master_user_password: string
:param master_user_password:
The new password for the DB instance master user. Can be any printable
ASCII character except "/", '"', or "@".
Changing this parameter does not result in an outage and the change is
asynchronously applied as soon as possible. Between the time of the
request and the completion of the request, the `MasterUserPassword`
element exists in the `PendingModifiedValues` element of the
operation response.
Default: Uses existing setting
Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30
alphanumeric characters (Oracle), or 8 to 128 alphanumeric
characters (SQL Server).
Amazon RDS API actions never return the password, so this action
provides a way to regain access to a master instance user if the
password is lost.
:type db_parameter_group_name: string
:param db_parameter_group_name: The name of the DB parameter group to
apply to this DB instance. Changing this parameter does not result
in an outage and the change is applied during the next maintenance
window unless the `ApplyImmediately` parameter is set to `True` for
this request.
Default: Uses existing setting
Constraints: The DB parameter group must be in the same DB parameter
group family as this DB instance.
:type backup_retention_period: integer
:param backup_retention_period:
The number of days to retain automated backups. Setting this parameter
to a positive number enables backups. Setting this parameter to 0
disables automated backups.
Changing this parameter can result in an outage if you change from 0 to
a non-zero value or from a non-zero value to 0. These changes are
applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request. If
you change the parameter from one non-zero value to another non-
zero value, the change is asynchronously applied as soon as
possible.
Default: Uses existing setting
Constraints:
+ Must be a value from 0 to 8
+ Cannot be set to 0 if the DB instance is a master instance with read
replicas or if the DB instance is a read replica
:type preferred_backup_window: string
:param preferred_backup_window:
The daily time range during which automated backups are created if
automated backups are enabled, as determined by the
`BackupRetentionPeriod`. Changing this parameter does not result in
an outage and the change is asynchronously applied as soon as
possible.
Constraints:
+ Must be in the format hh24:mi-hh24:mi
+ Times should be Universal Time Coordinated (UTC)
+ Must not conflict with the preferred maintenance window
+ Must be at least 30 minutes
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur, which may result in an
outage. Changing this parameter does not result in an outage,
except in the following situation, and the change is asynchronously
applied as soon as possible. If there are pending actions that
cause a reboot, and the maintenance window is changed to include
the current time, then changing this parameter will cause a reboot
of the DB instance. If moving this window to the current time,
there must be at least 30 minutes between the current time and end
of the window to ensure pending changes are applied.
Default: Uses existing setting
Format: ddd:hh24:mi-ddd:hh24:mi
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Must be at least 30 minutes
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Changing this parameter does not result in an outage and the change
is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
Constraints: Cannot be specified if the DB instance is a read replica.
:type engine_version: string
:param engine_version: The version number of the database engine to
upgrade to. Changing this parameter results in an outage and the
change is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
For major version upgrades, if a non-default DB parameter group is
currently in use, a new DB parameter group in the DB parameter
group family for the new engine version must be specified. The new
DB parameter group can be the default for that DB parameter group
family.
Example: `5.1.42`
:type allow_major_version_upgrade: boolean
:param allow_major_version_upgrade: Indicates that major version
upgrades are allowed. Changing this parameter does not result in an
outage and the change is asynchronously applied as soon as
possible.
Constraints: This parameter must be set to true when specifying a value
for the EngineVersion parameter that is a different major version
than the DB instance's current version.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window. Changing this parameter does not result in
an outage except in the following case and the change is
asynchronously applied as soon as possible. An outage will result
if this parameter is set to `True` during the maintenance window,
and a newer minor version is available, and RDS has enabled auto
patching for that engine version.
:type iops: integer
:param iops: The new Provisioned IOPS (I/O operations per second) value
for the RDS instance. Changing this parameter does not result in an
outage and the change is applied during the next maintenance window
unless the `ApplyImmediately` parameter is set to `True` for this
request.
Default: Uses existing setting
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
Type: Integer
If you choose to migrate your DB instance from using standard storage
to using Provisioned IOPS, or from using Provisioned IOPS to using
standard storage, the process can take time. The duration of the
migration depends on several factors such as database load, storage
size, storage type (standard or Provisioned IOPS), amount of IOPS
provisioned (if any), and the number of prior scale storage
operations. Typical migration times are under 24 hours, but the
process can take up to several days in some cases. During the
migration, the DB instance will be available for use, but may
experience performance degradation. While the migration takes
place, nightly backups for the instance will be suspended. No other
Amazon RDS operations can take place for the instance, including
modifying the instance, rebooting the instance, deleting the
instance, creating a read replica for the instance, and creating a
DB snapshot of the instance.
:type option_group_name: string
:param option_group_name: Indicates that the DB instance should be
associated with the specified option group. Changing this parameter
does not result in an outage except in the following case and the
change is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request. If
the parameter change results in an option group that enables OEM,
this change can cause a brief (sub-second) period during which new
connections are rejected but existing connections are not
interrupted.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type new_db_instance_identifier: string
:param new_db_instance_identifier:
The new DB instance identifier for the DB instance when renaming a DB
Instance. This value is stored as a lowercase string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if allocated_storage is not None:
params['AllocatedStorage'] = allocated_storage
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if db_security_groups is not None:
self.build_list_params(params,
db_security_groups,
'DBSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
if master_user_password is not None:
params['MasterUserPassword'] = master_user_password
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if allow_major_version_upgrade is not None:
params['AllowMajorVersionUpgrade'] = str(
allow_major_version_upgrade).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if new_db_instance_identifier is not None:
params['NewDBInstanceIdentifier'] = new_db_instance_identifier
return self._make_request(
action='ModifyDBInstance',
verb='POST',
path='/', params=params)
def modify_db_parameter_group(self, db_parameter_group_name, parameters):
"""
Modifies the parameters of a DB parameter group. To modify
more than one parameter, submit a list of the following:
`ParameterName`, `ParameterValue`, and `ApplyMethod`. A
maximum of 20 parameters can be modified in a single request.
The `apply-immediate` method can be used only for dynamic
parameters; the `pending-reboot` method can be used with MySQL
and Oracle DB instances for either dynamic or static
parameters. For Microsoft SQL Server DB instances, the
`pending-reboot` method can be used only for static
parameters.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be the name of an existing DB parameter group
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type parameters: list
:param parameters:
An array of parameter names, values, and the apply method for the
parameter update. At least one parameter name, value, and apply
method must be supplied; subsequent arguments are optional. A
maximum of 20 parameters may be modified in a single request.
Valid Values (for the application method): `immediate | pending-reboot`
You can use the immediate value with dynamic parameters only. You can
use the pending-reboot value for both dynamic and static
parameters, and changes are applied when DB instance reboots.
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod'))
return self._make_request(
action='ModifyDBParameterGroup',
verb='POST',
path='/', params=params)
def modify_db_subnet_group(self, db_subnet_group_name, subnet_ids,
db_subnet_group_description=None):
"""
Modifies an existing DB subnet group. DB subnet groups must
contain at least one subnet in at least two AZs in the region.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name for the DB subnet group. This
value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens. Must not be "Default".
Example: `mySubnetgroup`
:type db_subnet_group_description: string
:param db_subnet_group_description: The description for the DB subnet
group.
:type subnet_ids: list
:param subnet_ids: The EC2 subnet IDs for the DB subnet group.
"""
params = {'DBSubnetGroupName': db_subnet_group_name, }
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
if db_subnet_group_description is not None:
params['DBSubnetGroupDescription'] = db_subnet_group_description
return self._make_request(
action='ModifyDBSubnetGroup',
verb='POST',
path='/', params=params)
def modify_event_subscription(self, subscription_name,
sns_topic_arn=None, source_type=None,
event_categories=None, enabled=None):
"""
Modifies an existing RDS event notification subscription. Note
that you cannot modify the source identifiers using this call;
to change source identifiers for a subscription, use the
AddSourceIdentifierToSubscription and
RemoveSourceIdentifierFromSubscription calls.
You can see a list of the event categories for a given
SourceType in the `Events`_ topic in the Amazon RDS User Guide
or by using the **DescribeEventCategories** action.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription.
:type sns_topic_arn: string
:param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
created for event notification. The ARN is created by Amazon SNS
when you create a topic and subscribe to it.
:type source_type: string
:param source_type: The type of source that will be generating the
events. For example, if you want to be notified of events generated
by a DB instance, you would set this parameter to db-instance. if
this value is not specified, all events are returned.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
:type event_categories: list
:param event_categories: A list of event categories for a SourceType
that you want to subscribe to. You can see a list of the categories
for a given SourceType in the `Events`_ topic in the Amazon RDS
User Guide or by using the **DescribeEventCategories** action.
:type enabled: boolean
:param enabled: A Boolean value; set to **true** to activate the
subscription.
"""
params = {'SubscriptionName': subscription_name, }
if sns_topic_arn is not None:
params['SnsTopicArn'] = sns_topic_arn
if source_type is not None:
params['SourceType'] = source_type
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if enabled is not None:
params['Enabled'] = str(
enabled).lower()
return self._make_request(
action='ModifyEventSubscription',
verb='POST',
path='/', params=params)
def modify_option_group(self, option_group_name, options_to_include=None,
options_to_remove=None, apply_immediately=None):
"""
Modifies an existing option group.
:type option_group_name: string
:param option_group_name: The name of the option group to be modified.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type options_to_include: list
:param options_to_include: Options in this list are added to the option
group or, if already present, the specified configuration is used
to update the existing configuration.
:type options_to_remove: list
:param options_to_remove: Options in this list are removed from the
option group.
:type apply_immediately: boolean
:param apply_immediately: Indicates whether the changes should be
applied immediately, or during the next maintenance window for each
instance associated with the option group.
"""
params = {'OptionGroupName': option_group_name, }
if options_to_include is not None:
self.build_complex_list_params(
params, options_to_include,
'OptionsToInclude.member',
('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings'))
if options_to_remove is not None:
self.build_list_params(params,
options_to_remove,
'OptionsToRemove.member')
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
return self._make_request(
action='ModifyOptionGroup',
verb='POST',
path='/', params=params)
def promote_read_replica(self, db_instance_identifier,
backup_retention_period=None,
preferred_backup_window=None):
"""
Promotes a read replica DB instance to a standalone DB
instance.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier. This value
is stored as a lowercase string.
Constraints:
+ Must be the identifier for an existing read replica DB instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: mydbinstance
:type backup_retention_period: integer
:param backup_retention_period:
The number of days to retain automated backups. Setting this parameter
to a positive number enables backups. Setting this parameter to 0
disables automated backups.
Default: 1
Constraints:
+ Must be a value from 0 to 8
:type preferred_backup_window: string
:param preferred_backup_window: The daily time range during which
automated backups are created if automated backups are enabled,
using the `BackupRetentionPeriod` parameter.
Default: A 30-minute window selected at random from an 8-hour block of
time per region. See the Amazon RDS User Guide for the time blocks
for each region from which the default backup windows are assigned.
Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be
Universal Time Coordinated (UTC). Must not conflict with the
preferred maintenance window. Must be at least 30 minutes.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
return self._make_request(
action='PromoteReadReplica',
verb='POST',
path='/', params=params)
def purchase_reserved_db_instances_offering(self,
reserved_db_instances_offering_id,
reserved_db_instance_id=None,
db_instance_count=None,
tags=None):
"""
Purchases a reserved DB instance offering.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The ID of the Reserved DB
instance offering to purchase.
Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
:type reserved_db_instance_id: string
:param reserved_db_instance_id: Customer-specified identifier to track
this reservation.
Example: myreservationID
:type db_instance_count: integer
:param db_instance_count: The number of instances to reserve.
Default: `1`
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'ReservedDBInstancesOfferingId': reserved_db_instances_offering_id,
}
if reserved_db_instance_id is not None:
params['ReservedDBInstanceId'] = reserved_db_instance_id
if db_instance_count is not None:
params['DBInstanceCount'] = db_instance_count
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='PurchaseReservedDBInstancesOffering',
verb='POST',
path='/', params=params)
def reboot_db_instance(self, db_instance_identifier, force_failover=None):
"""
Rebooting a DB instance restarts the database engine service.
A reboot also applies to the DB instance any modifications to
the associated DB parameter group that were pending. Rebooting
a DB instance results in a momentary outage of the instance,
during which the DB instance status is set to rebooting. If
the RDS instance is configured for MultiAZ, it is possible
that the reboot will be conducted through a failover. An
Amazon RDS event is created when the reboot is completed.
If your DB instance is deployed in multiple Availability
Zones, you can force a failover from one AZ to the other
during the reboot. You might force a failover to test the
availability of your DB instance deployment or to restore
operations to the original AZ after a failover occurs.
The time required to reboot is a function of the specific
database engine's crash recovery process. To improve the
reboot time, we recommend that you reduce database activities
as much as possible during the reboot process to reduce
rollback activity for in-transit transactions.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This parameter is stored as a lowercase
string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type force_failover: boolean
:param force_failover: When `True`, the reboot will be conducted
through a MultiAZ failover.
Constraint: You cannot specify `True` if the instance is not configured
for MultiAZ.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if force_failover is not None:
params['ForceFailover'] = str(
force_failover).lower()
return self._make_request(
action='RebootDBInstance',
verb='POST',
path='/', params=params)
def remove_source_identifier_from_subscription(self, subscription_name,
source_identifier):
"""
Removes a source identifier from an existing RDS event
notification subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to remove a source identifier from.
:type source_identifier: string
:param source_identifier: The source identifier to be removed from the
subscription, such as the **DB instance identifier** for a DB
instance or the name of a security group.
"""
params = {
'SubscriptionName': subscription_name,
'SourceIdentifier': source_identifier,
}
return self._make_request(
action='RemoveSourceIdentifierFromSubscription',
verb='POST',
path='/', params=params)
def remove_tags_from_resource(self, resource_name, tag_keys):
"""
Removes metadata tags from an Amazon RDS resource.
For an overview on tagging an Amazon RDS resource, see
`Tagging Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource the tags will be removed
from. This value is an Amazon Resource Name (ARN). For information
about creating an ARN, see ` Constructing an RDS Amazon Resource
Name (ARN)`_.
:type tag_keys: list
:param tag_keys: The tag key (name) of the tag to be removed.
"""
params = {'ResourceName': resource_name, }
self.build_list_params(params,
tag_keys,
'TagKeys.member')
return self._make_request(
action='RemoveTagsFromResource',
verb='POST',
path='/', params=params)
def reset_db_parameter_group(self, db_parameter_group_name,
reset_all_parameters=None, parameters=None):
"""
Modifies the parameters of a DB parameter group to the
engine/system default value. To reset specific parameters
submit a list of the following: `ParameterName` and
`ApplyMethod`. To reset the entire DB parameter group, specify
the `DBParameterGroup` name and `ResetAllParameters`
parameters. When resetting the entire group, dynamic
parameters are updated immediately and static parameters are
set to `pending-reboot` to take effect on the next DB instance
restart or `RebootDBInstance` request.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type reset_all_parameters: boolean
:param reset_all_parameters: Specifies whether ( `True`) or not (
`False`) to reset all parameters in the DB parameter group to
default values.
Default: `True`
:type parameters: list
:param parameters: An array of parameter names, values, and the apply
method for the parameter update. At least one parameter name,
value, and apply method must be supplied; subsequent arguments are
optional. A maximum of 20 parameters may be modified in a single
request.
**MySQL**
Valid Values (for Apply method): `immediate` | `pending-reboot`
You can use the immediate value with dynamic parameters only. You can
use the `pending-reboot` value for both dynamic and static
parameters, and changes are applied when DB instance reboots.
**Oracle**
Valid Values (for Apply method): `pending-reboot`
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
if reset_all_parameters is not None:
params['ResetAllParameters'] = str(
reset_all_parameters).lower()
if parameters is not None:
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod'))
return self._make_request(
action='ResetDBParameterGroup',
verb='POST',
path='/', params=params)
def restore_db_instance_from_db_snapshot(self, db_instance_identifier,
db_snapshot_identifier,
db_instance_class=None,
port=None,
availability_zone=None,
db_subnet_group_name=None,
multi_az=None,
publicly_accessible=None,
auto_minor_version_upgrade=None,
license_model=None,
db_name=None, engine=None,
iops=None,
option_group_name=None,
tags=None):
"""
Creates a new DB instance from a DB snapshot. The target
database is created from the source database restore point
with the same configuration as the original source database,
except that the new RDS instance is created with the default
security group.
:type db_instance_identifier: string
:param db_instance_identifier:
The identifier for the DB snapshot to restore from.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type db_snapshot_identifier: string
:param db_snapshot_identifier: Name of the DB instance to create from
the DB snapshot. This parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-snapshot-id`
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the Amazon
RDS DB instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge`
:type port: integer
:param port: The port number on which the database accepts connections.
Default: The same port as the original DB instance
Constraints: Value must be `1150-65535`
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
Example: `us-east-1a`
:type db_subnet_group_name: string
:param db_subnet_group_name: The DB subnet group name to use for the
new instance.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window.
:type license_model: string
:param license_model: License model information for the restored DB
instance.
Default: Same as source.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type db_name: string
:param db_name:
The database name for the restored DB instance.
This parameter doesn't apply to the MySQL engine.
:type engine: string
:param engine: The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source
Example: `oracle-ee`
:type iops: integer
:param iops: Specifies the amount of provisioned IOPS for the DB
instance, expressed in I/O operations per second. If this parameter
is not specified, the IOPS value will be taken from the backup. If
this parameter is set to 0, the new instance will be converted to a
non-PIOPS instance, which will take additional time, though your DB
instance will be available for connections before the conversion
starts.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: The name of the option group to be used for
the restored DB instance.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'DBSnapshotIdentifier': db_snapshot_identifier,
}
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if port is not None:
params['Port'] = port
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if db_name is not None:
params['DBName'] = db_name
if engine is not None:
params['Engine'] = engine
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='RestoreDBInstanceFromDBSnapshot',
verb='POST',
path='/', params=params)
def restore_db_instance_to_point_in_time(self,
source_db_instance_identifier,
target_db_instance_identifier,
restore_time=None,
use_latest_restorable_time=None,
db_instance_class=None,
port=None,
availability_zone=None,
db_subnet_group_name=None,
multi_az=None,
publicly_accessible=None,
auto_minor_version_upgrade=None,
license_model=None,
db_name=None, engine=None,
iops=None,
option_group_name=None,
tags=None):
"""
Restores a DB instance to an arbitrary point-in-time. Users
can restore to any point in time before the
latestRestorableTime for up to backupRetentionPeriod days. The
target database is created from the source database with the
same configuration as the original database except that the DB
instance is created with the default DB security group.
:type source_db_instance_identifier: string
:param source_db_instance_identifier:
The identifier of the source DB instance from which to restore.
Constraints:
+ Must be the identifier of an existing database instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type target_db_instance_identifier: string
:param target_db_instance_identifier:
The name of the new database instance to be created.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type restore_time: timestamp
:param restore_time: The date and time to restore from.
Valid Values: Value must be a UTC time
Constraints:
+ Must be before the latest restorable time for the DB instance
+ Cannot be specified if UseLatestRestorableTime parameter is true
Example: `2009-09-07T23:45:00Z`
:type use_latest_restorable_time: boolean
:param use_latest_restorable_time: Specifies whether ( `True`) or not (
`False`) the DB instance is restored from the latest backup time.
Default: `False`
Constraints: Cannot be specified if RestoreTime parameter is provided.
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the Amazon
RDS DB instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge`
Default: The same DBInstanceClass as the original DB instance.
:type port: integer
:param port: The port number on which the database accepts connections.
Constraints: Value must be `1150-65535`
Default: The same port as the original DB instance.
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to true.
Example: `us-east-1a`
:type db_subnet_group_name: string
:param db_subnet_group_name: The DB subnet group name to use for the
new instance.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window.
:type license_model: string
:param license_model: License model information for the restored DB
instance.
Default: Same as source.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type db_name: string
:param db_name:
The database name for the restored DB instance.
This parameter is not used for the MySQL engine.
:type engine: string
:param engine: The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source
Example: `oracle-ee`
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: The name of the option group to be used for
the restored DB instance.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'SourceDBInstanceIdentifier': source_db_instance_identifier,
'TargetDBInstanceIdentifier': target_db_instance_identifier,
}
if restore_time is not None:
params['RestoreTime'] = restore_time
if use_latest_restorable_time is not None:
params['UseLatestRestorableTime'] = str(
use_latest_restorable_time).lower()
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if port is not None:
params['Port'] = port
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if db_name is not None:
params['DBName'] = db_name
if engine is not None:
params['Engine'] = engine
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='RestoreDBInstanceToPointInTime',
verb='POST',
path='/', params=params)
def revoke_db_security_group_ingress(self, db_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_id=None,
ec2_security_group_owner_id=None):
"""
Revokes ingress from a DBSecurityGroup for previously
authorized IP ranges or EC2 or VPC Security Groups. Required
parameters for this API are one of CIDRIP, EC2SecurityGroupId
for VPC, or (EC2SecurityGroupOwnerId and either
EC2SecurityGroupName or EC2SecurityGroupId).
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to
revoke ingress from.
:type cidrip: string
:param cidrip: The IP range to revoke access from. Must be a valid CIDR
range. If `CIDRIP` is specified, `EC2SecurityGroupName`,
`EC2SecurityGroupId` and `EC2SecurityGroupOwnerId` cannot be
provided.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 security group to
revoke access from. For VPC DB security groups,
`EC2SecurityGroupId` must be provided. Otherwise,
EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or
`EC2SecurityGroupId` must be provided.
:type ec2_security_group_id: string
:param ec2_security_group_id: The id of the EC2 security group to
revoke access from. For VPC DB security groups,
`EC2SecurityGroupId` must be provided. Otherwise,
EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or
`EC2SecurityGroupId` must be provided.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS Account Number of the owner
of the EC2 security group specified in the `EC2SecurityGroupName`
parameter. The AWS Access Key ID is not an acceptable value. For
VPC DB security groups, `EC2SecurityGroupId` must be provided.
Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
"""
params = {'DBSecurityGroupName': db_security_group_name, }
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_id is not None:
params['EC2SecurityGroupId'] = ec2_security_group_id
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='RevokeDBSecurityGroupIngress',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/rds2/layer1.py
| 0.601828 | 0.269933 |
layer1.py
|
|
import os
class Converter(object):
@classmethod
def convert_string(cls, param, value):
# TODO: could do length validation, etc. here
if not isinstance(value, basestring):
raise ValueError
return value
@classmethod
def convert_integer(cls, param, value):
# TODO: could do range checking here
return int(value)
@classmethod
def convert_boolean(cls, param, value):
"""
For command line arguments, just the presence
of the option means True so just return True
"""
return True
@classmethod
def convert_file(cls, param, value):
if os.path.exists(value) and not os.path.isdir(value):
return value
raise ValueError
@classmethod
def convert_dir(cls, param, value):
if os.path.isdir(value):
return value
raise ValueError
@classmethod
def convert(cls, param, value):
try:
if hasattr(cls, 'convert_'+param.ptype):
mthd = getattr(cls, 'convert_'+param.ptype)
else:
mthd = cls.convert_string
return mthd(param, value)
except:
raise ValidationException(param, '')
class Param(Converter):
def __init__(self, name=None, ptype='string', optional=True,
short_name=None, long_name=None, doc='',
metavar=None, cardinality=1, default=None,
choices=None, encoder=None, request_param=True):
self.name = name
self.ptype = ptype
self.optional = optional
self.short_name = short_name
self.long_name = long_name
self.doc = doc
self.metavar = metavar
self.cardinality = cardinality
self.default = default
self.choices = choices
self.encoder = encoder
self.request_param = request_param
@property
def optparse_long_name(self):
ln = None
if self.long_name:
ln = '--%s' % self.long_name
return ln
@property
def synopsis_long_name(self):
ln = None
if self.long_name:
ln = '--%s' % self.long_name
return ln
@property
def getopt_long_name(self):
ln = None
if self.long_name:
ln = '%s' % self.long_name
if self.ptype != 'boolean':
ln += '='
return ln
@property
def optparse_short_name(self):
sn = None
if self.short_name:
sn = '-%s' % self.short_name
return sn
@property
def synopsis_short_name(self):
sn = None
if self.short_name:
sn = '-%s' % self.short_name
return sn
@property
def getopt_short_name(self):
sn = None
if self.short_name:
sn = '%s' % self.short_name
if self.ptype != 'boolean':
sn += ':'
return sn
def convert(self, value):
"""
Convert a string value as received in the command line
tools and convert to the appropriate type of value.
Raise a ValidationError if the value can't be converted.
:type value: str
:param value: The value to convert. This should always
be a string.
"""
return super(Param, self).convert(self,value)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/roboto/param.py
| 0.452536 | 0.192938 |
param.py
|
|
from boto.s3 import user
from boto.s3 import key
from boto import handler
import xml.sax
class CompleteMultiPartUpload(object):
"""
Represents a completed MultiPart Upload. Contains the
following useful attributes:
* location - The URI of the completed upload
* bucket_name - The name of the bucket in which the upload
is contained
* key_name - The name of the new, completed key
* etag - The MD5 hash of the completed, combined upload
* version_id - The version_id of the completed upload
* encrypted - The value of the encryption header
"""
def __init__(self, bucket=None):
self.bucket = bucket
self.location = None
self.bucket_name = None
self.key_name = None
self.etag = None
self.version_id = None
self.encrypted = None
def __repr__(self):
return '<CompleteMultiPartUpload: %s.%s>' % (self.bucket_name,
self.key_name)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Location':
self.location = value
elif name == 'Bucket':
self.bucket_name = value
elif name == 'Key':
self.key_name = value
elif name == 'ETag':
self.etag = value
else:
setattr(self, name, value)
class Part(object):
"""
Represents a single part in a MultiPart upload.
Attributes include:
* part_number - The integer part number
* last_modified - The last modified date of this part
* etag - The MD5 hash of this part
* size - The size, in bytes, of this part
"""
def __init__(self, bucket=None):
self.bucket = bucket
self.part_number = None
self.last_modified = None
self.etag = None
self.size = None
def __repr__(self):
if isinstance(self.part_number, int):
return '<Part %d>' % self.part_number
else:
return '<Part %s>' % None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'PartNumber':
self.part_number = int(value)
elif name == 'LastModified':
self.last_modified = value
elif name == 'ETag':
self.etag = value
elif name == 'Size':
self.size = int(value)
else:
setattr(self, name, value)
def part_lister(mpupload, part_number_marker=None):
"""
A generator function for listing parts of a multipart upload.
"""
more_results = True
part = None
while more_results:
parts = mpupload.get_all_parts(None, part_number_marker)
for part in parts:
yield part
part_number_marker = mpupload.next_part_number_marker
more_results = mpupload.is_truncated
class MultiPartUpload(object):
"""
Represents a MultiPart Upload operation.
"""
def __init__(self, bucket=None):
self.bucket = bucket
self.bucket_name = None
self.key_name = None
self.id = id
self.initiator = None
self.owner = None
self.storage_class = None
self.initiated = None
self.part_number_marker = None
self.next_part_number_marker = None
self.max_parts = None
self.is_truncated = False
self._parts = None
def __repr__(self):
return '<MultiPartUpload %s>' % self.key_name
def __iter__(self):
return part_lister(self)
def to_xml(self):
s = '<CompleteMultipartUpload>\n'
for part in self:
s += ' <Part>\n'
s += ' <PartNumber>%d</PartNumber>\n' % part.part_number
s += ' <ETag>%s</ETag>\n' % part.etag
s += ' </Part>\n'
s += '</CompleteMultipartUpload>'
return s
def startElement(self, name, attrs, connection):
if name == 'Initiator':
self.initiator = user.User(self)
return self.initiator
elif name == 'Owner':
self.owner = user.User(self)
return self.owner
elif name == 'Part':
part = Part(self.bucket)
self._parts.append(part)
return part
return None
def endElement(self, name, value, connection):
if name == 'Bucket':
self.bucket_name = value
elif name == 'Key':
self.key_name = value
elif name == 'UploadId':
self.id = value
elif name == 'StorageClass':
self.storage_class = value
elif name == 'PartNumberMarker':
self.part_number_marker = value
elif name == 'NextPartNumberMarker':
self.next_part_number_marker = value
elif name == 'MaxParts':
self.max_parts = int(value)
elif name == 'IsTruncated':
if value == 'true':
self.is_truncated = True
else:
self.is_truncated = False
elif name == 'Initiated':
self.initiated = value
else:
setattr(self, name, value)
def get_all_parts(self, max_parts=None, part_number_marker=None,
encoding_type=None):
"""
Return the uploaded parts of this MultiPart Upload. This is
a lower-level method that requires you to manually page through
results. To simplify this process, you can just use the
object itself as an iterator and it will automatically handle
all of the paging with S3.
"""
self._parts = []
query_args = 'uploadId=%s' % self.id
if max_parts:
query_args += '&max-parts=%d' % max_parts
if part_number_marker:
query_args += '&part-number-marker=%s' % part_number_marker
if encoding_type:
query_args += '&encoding-type=%s' % encoding_type
response = self.bucket.connection.make_request('GET', self.bucket.name,
self.key_name,
query_args=query_args)
body = response.read()
if response.status == 200:
h = handler.XmlHandler(self, self)
xml.sax.parseString(body, h)
return self._parts
def upload_part_from_file(self, fp, part_num, headers=None, replace=True,
cb=None, num_cb=10, md5=None, size=None):
"""
Upload another part of this MultiPart Upload.
.. note::
After you initiate multipart upload and upload one or more parts,
you must either complete or abort multipart upload in order to stop
getting charged for storage of the uploaded parts. Only after you
either complete or abort multipart upload, Amazon S3 frees up the
parts storage and stops charging you for the parts storage.
:type fp: file
:param fp: The file object you want to upload.
:type part_num: int
:param part_num: The number of this part.
The other parameters are exactly as defined for the
:class:`boto.s3.key.Key` set_contents_from_file method.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: The uploaded part containing the etag.
"""
if part_num < 1:
raise ValueError('Part numbers must be greater than zero')
query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num)
key = self.bucket.new_key(self.key_name)
key.set_contents_from_file(fp, headers=headers, replace=replace,
cb=cb, num_cb=num_cb, md5=md5,
reduced_redundancy=False,
query_args=query_args, size=size)
return key
def copy_part_from_key(self, src_bucket_name, src_key_name, part_num,
start=None, end=None, src_version_id=None,
headers=None):
"""
Copy another part of this MultiPart Upload.
:type src_bucket_name: string
:param src_bucket_name: Name of the bucket containing the source key
:type src_key_name: string
:param src_key_name: Name of the source key
:type part_num: int
:param part_num: The number of this part.
:type start: int
:param start: Zero-based byte offset to start copying from
:type end: int
:param end: Zero-based byte offset to copy to
:type src_version_id: string
:param src_version_id: version_id of source object to copy from
:type headers: dict
:param headers: Any headers to pass along in the request
"""
if part_num < 1:
raise ValueError('Part numbers must be greater than zero')
query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num)
if start is not None and end is not None:
rng = 'bytes=%s-%s' % (start, end)
provider = self.bucket.connection.provider
if headers is None:
headers = {}
else:
headers = headers.copy()
headers[provider.copy_source_range_header] = rng
return self.bucket.copy_key(self.key_name, src_bucket_name,
src_key_name,
src_version_id=src_version_id,
storage_class=None,
headers=headers,
query_args=query_args)
def complete_upload(self):
"""
Complete the MultiPart Upload operation. This method should
be called when all parts of the file have been successfully
uploaded to S3.
:rtype: :class:`boto.s3.multipart.CompletedMultiPartUpload`
:returns: An object representing the completed upload.
"""
xml = self.to_xml()
return self.bucket.complete_multipart_upload(self.key_name,
self.id, xml)
def cancel_upload(self):
"""
Cancels a MultiPart Upload operation. The storage consumed by
any previously uploaded parts will be freed. However, if any
part uploads are currently in progress, those part uploads
might or might not succeed. As a result, it might be necessary
to abort a given multipart upload multiple times in order to
completely free all storage consumed by all parts.
"""
self.bucket.cancel_multipart_upload(self.key_name, self.id)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/s3/multipart.py
| 0.68721 | 0.150247 |
multipart.py
|
|
from boto.compat import unquote_str
def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None,
encoding_type=None):
"""
A generator function for listing keys in a bucket.
"""
more_results = True
k = None
while more_results:
rs = bucket.get_all_keys(prefix=prefix, marker=marker,
delimiter=delimiter, headers=headers,
encoding_type=encoding_type)
for k in rs:
yield k
if k:
marker = rs.next_marker or k.name
if marker and encoding_type == "url":
marker = unquote_str(marker)
more_results= rs.is_truncated
class BucketListResultSet(object):
"""
A resultset for listing keys within a bucket. Uses the bucket_lister
generator function and implements the iterator interface. This
transparently handles the results paging from S3 so even if you have
many thousands of keys within the bucket you can iterate over all
keys in a reasonably efficient manner.
"""
def __init__(self, bucket=None, prefix='', delimiter='', marker='',
headers=None, encoding_type=None):
self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.marker = marker
self.headers = headers
self.encoding_type = encoding_type
def __iter__(self):
return bucket_lister(self.bucket, prefix=self.prefix,
delimiter=self.delimiter, marker=self.marker,
headers=self.headers,
encoding_type=self.encoding_type)
def versioned_bucket_lister(bucket, prefix='', delimiter='',
key_marker='', version_id_marker='', headers=None,
encoding_type=None):
"""
A generator function for listing versions in a bucket.
"""
more_results = True
k = None
while more_results:
rs = bucket.get_all_versions(prefix=prefix, key_marker=key_marker,
version_id_marker=version_id_marker,
delimiter=delimiter, headers=headers,
max_keys=999, encoding_type=encoding_type)
for k in rs:
yield k
key_marker = rs.next_key_marker
if key_marker and encoding_type == "url":
key_marker = unquote_str(key_marker)
version_id_marker = rs.next_version_id_marker
more_results= rs.is_truncated
class VersionedBucketListResultSet(object):
"""
A resultset for listing versions within a bucket. Uses the bucket_lister
generator function and implements the iterator interface. This
transparently handles the results paging from S3 so even if you have
many thousands of keys within the bucket you can iterate over all
keys in a reasonably efficient manner.
"""
def __init__(self, bucket=None, prefix='', delimiter='', key_marker='',
version_id_marker='', headers=None, encoding_type=None):
self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.key_marker = key_marker
self.version_id_marker = version_id_marker
self.headers = headers
self.encoding_type = encoding_type
def __iter__(self):
return versioned_bucket_lister(self.bucket, prefix=self.prefix,
delimiter=self.delimiter,
key_marker=self.key_marker,
version_id_marker=self.version_id_marker,
headers=self.headers,
encoding_type=self.encoding_type)
def multipart_upload_lister(bucket, key_marker='',
upload_id_marker='',
headers=None, encoding_type=None):
"""
A generator function for listing multipart uploads in a bucket.
"""
more_results = True
k = None
while more_results:
rs = bucket.get_all_multipart_uploads(key_marker=key_marker,
upload_id_marker=upload_id_marker,
headers=headers,
encoding_type=encoding_type)
for k in rs:
yield k
key_marker = rs.next_key_marker
if key_marker and encoding_type == "url":
key_marker = unquote_str(key_marker)
upload_id_marker = rs.next_upload_id_marker
more_results= rs.is_truncated
class MultiPartUploadListResultSet(object):
"""
A resultset for listing multipart uploads within a bucket.
Uses the multipart_upload_lister generator function and
implements the iterator interface. This
transparently handles the results paging from S3 so even if you have
many thousands of uploads within the bucket you can iterate over all
keys in a reasonably efficient manner.
"""
def __init__(self, bucket=None, key_marker='',
upload_id_marker='', headers=None, encoding_type=None):
self.bucket = bucket
self.key_marker = key_marker
self.upload_id_marker = upload_id_marker
self.headers = headers
self.encoding_type = encoding_type
def __iter__(self):
return multipart_upload_lister(self.bucket,
key_marker=self.key_marker,
upload_id_marker=self.upload_id_marker,
headers=self.headers,
encoding_type=self.encoding_type)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/s3/bucketlistresultset.py
| 0.721449 | 0.167253 |
bucketlistresultset.py
|
|
class CORSRule(object):
"""
CORS rule for a bucket.
:ivar id: A unique identifier for the rule. The ID value can be
up to 255 characters long. The IDs help you find a rule in
the configuration.
:ivar allowed_methods: An HTTP method that you want to allow the
origin to execute. Each CORSRule must identify at least one
origin and one method. Valid values are:
GET|PUT|HEAD|POST|DELETE
:ivar allowed_origin: An origin that you want to allow cross-domain
requests from. This can contain at most one * wild character.
Each CORSRule must identify at least one origin and one method.
The origin value can include at most one '*' wild character.
For example, "http://*.example.com". You can also specify
only * as the origin value allowing all origins cross-domain access.
:ivar allowed_header: Specifies which headers are allowed in a
pre-flight OPTIONS request via the
Access-Control-Request-Headers header. Each header name
specified in the Access-Control-Request-Headers header must
have a corresponding entry in the rule. Amazon S3 will send
only the allowed headers in a response that were requested.
This can contain at most one * wild character.
:ivar max_age_seconds: The time in seconds that your browser is to
cache the preflight response for the specified resource.
:ivar expose_header: One or more headers in the response that you
want customers to be able to access from their applications
(for example, from a JavaScript XMLHttpRequest object). You
add one ExposeHeader element in the rule for each header.
"""
def __init__(self, allowed_method=None, allowed_origin=None,
id=None, allowed_header=None, max_age_seconds=None,
expose_header=None):
if allowed_method is None:
allowed_method = []
self.allowed_method = allowed_method
if allowed_origin is None:
allowed_origin = []
self.allowed_origin = allowed_origin
self.id = id
if allowed_header is None:
allowed_header = []
self.allowed_header = allowed_header
self.max_age_seconds = max_age_seconds
if expose_header is None:
expose_header = []
self.expose_header = expose_header
def __repr__(self):
return '<Rule: %s>' % self.id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'ID':
self.id = value
elif name == 'AllowedMethod':
self.allowed_method.append(value)
elif name == 'AllowedOrigin':
self.allowed_origin.append(value)
elif name == 'AllowedHeader':
self.allowed_header.append(value)
elif name == 'MaxAgeSeconds':
self.max_age_seconds = int(value)
elif name == 'ExposeHeader':
self.expose_header.append(value)
else:
setattr(self, name, value)
def to_xml(self):
s = '<CORSRule>'
for allowed_method in self.allowed_method:
s += '<AllowedMethod>%s</AllowedMethod>' % allowed_method
for allowed_origin in self.allowed_origin:
s += '<AllowedOrigin>%s</AllowedOrigin>' % allowed_origin
for allowed_header in self.allowed_header:
s += '<AllowedHeader>%s</AllowedHeader>' % allowed_header
for expose_header in self.expose_header:
s += '<ExposeHeader>%s</ExposeHeader>' % expose_header
if self.max_age_seconds:
s += '<MaxAgeSeconds>%d</MaxAgeSeconds>' % self.max_age_seconds
if self.id:
s += '<ID>%s</ID>' % self.id
s += '</CORSRule>'
return s
class CORSConfiguration(list):
"""
A container for the rules associated with a CORS configuration.
"""
def startElement(self, name, attrs, connection):
if name == 'CORSRule':
rule = CORSRule()
self.append(rule)
return rule
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
def to_xml(self):
"""
Returns a string containing the XML version of the Lifecycle
configuration as defined by S3.
"""
s = '<CORSConfiguration>'
for rule in self:
s += rule.to_xml()
s += '</CORSConfiguration>'
return s
def add_rule(self, allowed_method, allowed_origin,
id=None, allowed_header=None, max_age_seconds=None,
expose_header=None):
"""
Add a rule to this CORS configuration. This only adds
the rule to the local copy. To install the new rule(s) on
the bucket, you need to pass this CORS config object
to the set_cors method of the Bucket object.
:type allowed_methods: list of str
:param allowed_methods: An HTTP method that you want to allow the
origin to execute. Each CORSRule must identify at least one
origin and one method. Valid values are:
GET|PUT|HEAD|POST|DELETE
:type allowed_origin: list of str
:param allowed_origin: An origin that you want to allow cross-domain
requests from. This can contain at most one * wild character.
Each CORSRule must identify at least one origin and one method.
The origin value can include at most one '*' wild character.
For example, "http://*.example.com". You can also specify
only * as the origin value allowing all origins
cross-domain access.
:type id: str
:param id: A unique identifier for the rule. The ID value can be
up to 255 characters long. The IDs help you find a rule in
the configuration.
:type allowed_header: list of str
:param allowed_header: Specifies which headers are allowed in a
pre-flight OPTIONS request via the
Access-Control-Request-Headers header. Each header name
specified in the Access-Control-Request-Headers header must
have a corresponding entry in the rule. Amazon S3 will send
only the allowed headers in a response that were requested.
This can contain at most one * wild character.
:type max_age_seconds: int
:param max_age_seconds: The time in seconds that your browser is to
cache the preflight response for the specified resource.
:type expose_header: list of str
:param expose_header: One or more headers in the response that you
want customers to be able to access from their applications
(for example, from a JavaScript XMLHttpRequest object). You
add one ExposeHeader element in the rule for each header.
"""
if not isinstance(allowed_method, (list, tuple)):
allowed_method = [allowed_method]
if not isinstance(allowed_origin, (list, tuple)):
allowed_origin = [allowed_origin]
if not isinstance(allowed_origin, (list, tuple)):
if allowed_origin is None:
allowed_origin = []
else:
allowed_origin = [allowed_origin]
if not isinstance(expose_header, (list, tuple)):
if expose_header is None:
expose_header = []
else:
expose_header = [expose_header]
rule = CORSRule(allowed_method, allowed_origin, id, allowed_header,
max_age_seconds, expose_header)
self.append(rule)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/s3/cors.py
| 0.826257 | 0.322859 |
cors.py
|
|
from boto.compat import six
class Rule(object):
"""
A Lifecycle rule for an S3 bucket.
:ivar id: Unique identifier for the rule. The value cannot be longer
than 255 characters. This value is optional. The server will
generate a unique value for the rule if no value is provided.
:ivar prefix: Prefix identifying one or more objects to which the
rule applies. If prefix is not provided, Boto generates a default
prefix which will match all objects.
:ivar status: If 'Enabled', the rule is currently being applied.
If 'Disabled', the rule is not currently being applied.
:ivar expiration: An instance of `Expiration`. This indicates
the lifetime of the objects that are subject to the rule.
:ivar transition: An instance of `Transition`. This indicates
when to transition to a different storage class.
"""
def __init__(self, id=None, prefix=None, status=None, expiration=None,
transition=None):
self.id = id
self.prefix = '' if prefix is None else prefix
self.status = status
if isinstance(expiration, six.integer_types):
# retain backwards compatibility???
self.expiration = Expiration(days=expiration)
else:
# None or object
self.expiration = expiration
# retain backwards compatibility
if isinstance(transition, Transition):
self.transition = Transitions()
self.transition.append(transition)
elif transition:
self.transition = transition
else:
self.transition = Transitions()
def __repr__(self):
return '<Rule: %s>' % self.id
def startElement(self, name, attrs, connection):
if name == 'Transition':
return self.transition
elif name == 'Expiration':
self.expiration = Expiration()
return self.expiration
return None
def endElement(self, name, value, connection):
if name == 'ID':
self.id = value
elif name == 'Prefix':
self.prefix = value
elif name == 'Status':
self.status = value
else:
setattr(self, name, value)
def to_xml(self):
s = '<Rule>'
if self.id is not None:
s += '<ID>%s</ID>' % self.id
s += '<Prefix>%s</Prefix>' % self.prefix
s += '<Status>%s</Status>' % self.status
if self.expiration is not None:
s += self.expiration.to_xml()
if self.transition is not None:
s += self.transition.to_xml()
s += '</Rule>'
return s
class Expiration(object):
"""
When an object will expire.
:ivar days: The number of days until the object expires
:ivar date: The date when the object will expire. Must be
in ISO 8601 format.
"""
def __init__(self, days=None, date=None):
self.days = days
self.date = date
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Days':
self.days = int(value)
elif name == 'Date':
self.date = value
def __repr__(self):
if self.days is None:
how_long = "on: %s" % self.date
else:
how_long = "in: %s days" % self.days
return '<Expiration: %s>' % how_long
def to_xml(self):
s = '<Expiration>'
if self.days is not None:
s += '<Days>%s</Days>' % self.days
elif self.date is not None:
s += '<Date>%s</Date>' % self.date
s += '</Expiration>'
return s
class Transition(object):
"""
A transition to a different storage class.
:ivar days: The number of days until the object should be moved.
:ivar date: The date when the object should be moved. Should be
in ISO 8601 format.
:ivar storage_class: The storage class to transition to. Valid
values are GLACIER, STANDARD_IA.
"""
def __init__(self, days=None, date=None, storage_class=None):
self.days = days
self.date = date
self.storage_class = storage_class
def __repr__(self):
if self.days is None:
how_long = "on: %s" % self.date
else:
how_long = "in: %s days" % self.days
return '<Transition: %s, %s>' % (how_long, self.storage_class)
def to_xml(self):
s = '<Transition>'
s += '<StorageClass>%s</StorageClass>' % self.storage_class
if self.days is not None:
s += '<Days>%s</Days>' % self.days
elif self.date is not None:
s += '<Date>%s</Date>' % self.date
s += '</Transition>'
return s
class Transitions(list):
"""
A container for the transitions associated with a Lifecycle's Rule configuration.
"""
def __init__(self):
self.transition_properties = 3
self.current_transition_property = 1
self.temp_days = None
self.temp_date = None
self.temp_storage_class = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Days':
self.temp_days = int(value)
elif name == 'Date':
self.temp_date = value
elif name == 'StorageClass':
self.temp_storage_class = value
# the XML does not contain a <Transitions> tag
# but rather N number of <Transition> tags not
# structured in any sort of hierarchy.
if self.current_transition_property == self.transition_properties:
self.append(Transition(self.temp_days, self.temp_date, self.temp_storage_class))
self.temp_days = self.temp_date = self.temp_storage_class = None
self.current_transition_property = 1
else:
self.current_transition_property += 1
def to_xml(self):
"""
Returns a string containing the XML version of the Lifecycle
configuration as defined by S3.
"""
s = ''
for transition in self:
s += transition.to_xml()
return s
def add_transition(self, days=None, date=None, storage_class=None):
"""
Add a transition to this Lifecycle configuration. This only adds
the rule to the local copy. To install the new rule(s) on
the bucket, you need to pass this Lifecycle config object
to the configure_lifecycle method of the Bucket object.
:ivar days: The number of days until the object should be moved.
:ivar date: The date when the object should be moved. Should be
in ISO 8601 format.
:ivar storage_class: The storage class to transition to. Valid
values are GLACIER, STANDARD_IA.
"""
transition = Transition(days, date, storage_class)
self.append(transition)
def __first_or_default(self, prop):
for transition in self:
return getattr(transition, prop)
return None
# maintain backwards compatibility so that we can continue utilizing
# 'rule.transition.days' syntax
@property
def days(self):
return self.__first_or_default('days')
@property
def date(self):
return self.__first_or_default('date')
@property
def storage_class(self):
return self.__first_or_default('storage_class')
class Lifecycle(list):
"""
A container for the rules associated with a Lifecycle configuration.
"""
def startElement(self, name, attrs, connection):
if name == 'Rule':
rule = Rule()
self.append(rule)
return rule
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
def to_xml(self):
"""
Returns a string containing the XML version of the Lifecycle
configuration as defined by S3.
"""
s = '<?xml version="1.0" encoding="UTF-8"?>'
s += '<LifecycleConfiguration>'
for rule in self:
s += rule.to_xml()
s += '</LifecycleConfiguration>'
return s
def add_rule(self, id=None, prefix='', status='Enabled',
expiration=None, transition=None):
"""
Add a rule to this Lifecycle configuration. This only adds
the rule to the local copy. To install the new rule(s) on
the bucket, you need to pass this Lifecycle config object
to the configure_lifecycle method of the Bucket object.
:type id: str
:param id: Unique identifier for the rule. The value cannot be longer
than 255 characters. This value is optional. The server will
generate a unique value for the rule if no value is provided.
:type prefix: str
:iparam prefix: Prefix identifying one or more objects to which the
rule applies.
:type status: str
:param status: If 'Enabled', the rule is currently being applied.
If 'Disabled', the rule is not currently being applied.
:type expiration: int
:param expiration: Indicates the lifetime, in days, of the objects
that are subject to the rule. The value must be a non-zero
positive integer. A Expiration object instance is also perfect.
:type transition: Transitions
:param transition: Indicates when an object transitions to a
different storage class.
"""
rule = Rule(id, prefix, status, expiration, transition)
self.append(rule)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/s3/lifecycle.py
| 0.835114 | 0.275916 |
lifecycle.py
|
|
def tag(key, value):
start = '<%s>' % key
end = '</%s>' % key
return '%s%s%s' % (start, value, end)
class WebsiteConfiguration(object):
"""
Website configuration for a bucket.
:ivar suffix: Suffix that is appended to a request that is for a
"directory" on the website endpoint (e.g. if the suffix is
index.html and you make a request to samplebucket/images/
the data that is returned will be for the object with the
key name images/index.html). The suffix must not be empty
and must not include a slash character.
:ivar error_key: The object key name to use when a 4xx class error
occurs. This key identifies the page that is returned when
such an error occurs.
:ivar redirect_all_requests_to: Describes the redirect behavior for every
request to this bucket's website endpoint. If this value is non None,
no other values are considered when configuring the website
configuration for the bucket. This is an instance of
``RedirectLocation``.
:ivar routing_rules: ``RoutingRules`` object which specifies conditions
and redirects that apply when the conditions are met.
"""
def __init__(self, suffix=None, error_key=None,
redirect_all_requests_to=None, routing_rules=None):
self.suffix = suffix
self.error_key = error_key
self.redirect_all_requests_to = redirect_all_requests_to
if routing_rules is not None:
self.routing_rules = routing_rules
else:
self.routing_rules = RoutingRules()
def startElement(self, name, attrs, connection):
if name == 'RoutingRules':
self.routing_rules = RoutingRules()
return self.routing_rules
elif name == 'IndexDocument':
return _XMLKeyValue([('Suffix', 'suffix')], container=self)
elif name == 'ErrorDocument':
return _XMLKeyValue([('Key', 'error_key')], container=self)
def endElement(self, name, value, connection):
pass
def to_xml(self):
parts = ['<?xml version="1.0" encoding="UTF-8"?>',
'<WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">']
if self.suffix is not None:
parts.append(tag('IndexDocument', tag('Suffix', self.suffix)))
if self.error_key is not None:
parts.append(tag('ErrorDocument', tag('Key', self.error_key)))
if self.redirect_all_requests_to is not None:
parts.append(self.redirect_all_requests_to.to_xml())
if self.routing_rules:
parts.append(self.routing_rules.to_xml())
parts.append('</WebsiteConfiguration>')
return ''.join(parts)
class _XMLKeyValue(object):
def __init__(self, translator, container=None):
self.translator = translator
if container:
self.container = container
else:
self.container = self
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
for xml_key, attr_name in self.translator:
if name == xml_key:
setattr(self.container, attr_name, value)
def to_xml(self):
parts = []
for xml_key, attr_name in self.translator:
content = getattr(self.container, attr_name)
if content is not None:
parts.append(tag(xml_key, content))
return ''.join(parts)
class RedirectLocation(_XMLKeyValue):
"""Specify redirect behavior for every request to a bucket's endpoint.
:ivar hostname: Name of the host where requests will be redirected.
:ivar protocol: Protocol to use (http, https) when redirecting requests.
The default is the protocol that is used in the original request.
"""
TRANSLATOR = [('HostName', 'hostname'),
('Protocol', 'protocol'),
]
def __init__(self, hostname=None, protocol=None):
self.hostname = hostname
self.protocol = protocol
super(RedirectLocation, self).__init__(self.TRANSLATOR)
def to_xml(self):
return tag('RedirectAllRequestsTo',
super(RedirectLocation, self).to_xml())
class RoutingRules(list):
def add_rule(self, rule):
"""
:type rule: :class:`boto.s3.website.RoutingRule`
:param rule: A routing rule.
:return: This ``RoutingRules`` object is returned,
so that it can chain subsequent calls.
"""
self.append(rule)
return self
def startElement(self, name, attrs, connection):
if name == 'RoutingRule':
rule = RoutingRule(Condition(), Redirect())
self.add_rule(rule)
return rule
def endElement(self, name, value, connection):
pass
def __repr__(self):
return "RoutingRules(%s)" % super(RoutingRules, self).__repr__()
def to_xml(self):
inner_text = []
for rule in self:
inner_text.append(rule.to_xml())
return tag('RoutingRules', '\n'.join(inner_text))
class RoutingRule(object):
"""Represents a single routing rule.
There are convenience methods to making creating rules
more concise::
rule = RoutingRule.when(key_prefix='foo/').then_redirect('example.com')
:ivar condition: Describes condition that must be met for the
specified redirect to apply.
:ivar redirect: Specifies redirect behavior. You can redirect requests to
another host, to another page, or with another protocol. In the event
of an error, you can can specify a different error code to return.
"""
def __init__(self, condition=None, redirect=None):
self.condition = condition
self.redirect = redirect
def startElement(self, name, attrs, connection):
if name == 'Condition':
return self.condition
elif name == 'Redirect':
return self.redirect
def endElement(self, name, value, connection):
pass
def to_xml(self):
parts = []
if self.condition:
parts.append(self.condition.to_xml())
if self.redirect:
parts.append(self.redirect.to_xml())
return tag('RoutingRule', '\n'.join(parts))
@classmethod
def when(cls, key_prefix=None, http_error_code=None):
return cls(Condition(key_prefix=key_prefix,
http_error_code=http_error_code), None)
def then_redirect(self, hostname=None, protocol=None, replace_key=None,
replace_key_prefix=None, http_redirect_code=None):
self.redirect = Redirect(
hostname=hostname, protocol=protocol,
replace_key=replace_key,
replace_key_prefix=replace_key_prefix,
http_redirect_code=http_redirect_code)
return self
class Condition(_XMLKeyValue):
"""
:ivar key_prefix: The object key name prefix when the redirect is applied.
For example, to redirect requests for ExamplePage.html, the key prefix
will be ExamplePage.html. To redirect request for all pages with the
prefix docs/, the key prefix will be /docs, which identifies all
objects in the docs/ folder.
:ivar http_error_code: The HTTP error code when the redirect is applied. In
the event of an error, if the error code equals this value, then the
specified redirect is applied.
"""
TRANSLATOR = [
('KeyPrefixEquals', 'key_prefix'),
('HttpErrorCodeReturnedEquals', 'http_error_code'),
]
def __init__(self, key_prefix=None, http_error_code=None):
self.key_prefix = key_prefix
self.http_error_code = http_error_code
super(Condition, self).__init__(self.TRANSLATOR)
def to_xml(self):
return tag('Condition', super(Condition, self).to_xml())
class Redirect(_XMLKeyValue):
"""
:ivar hostname: The host name to use in the redirect request.
:ivar protocol: The protocol to use in the redirect request. Can be either
'http' or 'https'.
:ivar replace_key: The specific object key to use in the redirect request.
For example, redirect request to error.html.
:ivar replace_key_prefix: The object key prefix to use in the redirect
request. For example, to redirect requests for all pages with prefix
docs/ (objects in the docs/ folder) to documents/, you can set a
condition block with KeyPrefixEquals set to docs/ and in the Redirect
set ReplaceKeyPrefixWith to /documents.
:ivar http_redirect_code: The HTTP redirect code to use on the response.
"""
TRANSLATOR = [
('Protocol', 'protocol'),
('HostName', 'hostname'),
('ReplaceKeyWith', 'replace_key'),
('ReplaceKeyPrefixWith', 'replace_key_prefix'),
('HttpRedirectCode', 'http_redirect_code'),
]
def __init__(self, hostname=None, protocol=None, replace_key=None,
replace_key_prefix=None, http_redirect_code=None):
self.hostname = hostname
self.protocol = protocol
self.replace_key = replace_key
self.replace_key_prefix = replace_key_prefix
self.http_redirect_code = http_redirect_code
super(Redirect, self).__init__(self.TRANSLATOR)
def to_xml(self):
return tag('Redirect', super(Redirect, self).to_xml())
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/s3/website.py
| 0.793146 | 0.213746 |
website.py
|
|
# File representation of key, for use with "file://" URIs.
import os, shutil
import sys
from boto.compat import StringIO
class Key(object):
KEY_STREAM_READABLE = 0x01
KEY_STREAM_WRITABLE = 0x02
KEY_STREAM = (KEY_STREAM_READABLE | KEY_STREAM_WRITABLE)
KEY_REGULAR_FILE = 0x00
def __init__(self, bucket, name, fp=None, key_type=KEY_REGULAR_FILE):
self.bucket = bucket
self.full_path = name
if name == '-':
self.name = None
self.size = None
else:
self.name = name
self.size = os.stat(name).st_size
self.key_type = key_type
if key_type == self.KEY_STREAM_READABLE:
self.fp = sys.stdin
self.full_path = '<STDIN>'
elif key_type == self.KEY_STREAM_WRITABLE:
self.fp = sys.stdout
self.full_path = '<STDOUT>'
else:
self.fp = fp
def __str__(self):
return 'file://' + self.full_path
def get_file(self, fp, headers=None, cb=None, num_cb=10, torrent=False):
"""
Retrieves a file from a Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: ignored in this subclass.
:type cb: function
:param cb: ignored in this subclass.
:type cb: int
:param num_cb: ignored in this subclass.
"""
if self.key_type & self.KEY_STREAM_WRITABLE:
raise BotoClientError('Stream is not readable')
elif self.key_type & self.KEY_STREAM_READABLE:
key_file = self.fp
else:
key_file = open(self.full_path, 'rb')
try:
shutil.copyfileobj(key_file, fp)
finally:
key_file.close()
def set_contents_from_file(self, fp, headers=None, replace=True, cb=None,
num_cb=10, policy=None, md5=None):
"""
Store an object in a file using the name of the Key object as the
key in file URI and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: ignored in this subclass.
:type replace: bool
:param replace: If this parameter is False, the method
will first check to see if an object exists in the
bucket with the same key. If it does, it won't
overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: ignored in this subclass.
:type cb: int
:param num_cb: ignored in this subclass.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: ignored in this subclass.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded
version of the plain checksum as the second element.
This is the same format returned by the compute_md5 method.
:param md5: ignored in this subclass.
"""
if self.key_type & self.KEY_STREAM_READABLE:
raise BotoClientError('Stream is not writable')
elif self.key_type & self.KEY_STREAM_WRITABLE:
key_file = self.fp
else:
if not replace and os.path.exists(self.full_path):
return
key_file = open(self.full_path, 'wb')
try:
shutil.copyfileobj(fp, key_file)
finally:
key_file.close()
def get_contents_to_file(self, fp, headers=None, cb=None, num_cb=None,
torrent=False, version_id=None,
res_download_handler=None, response_headers=None):
"""
Copy contents from the current file to the file pointed to by 'fp'.
:type fp: File-like object
:param fp:
:type headers: dict
:param headers: Unused in this subclass.
:type cb: function
:param cb: Unused in this subclass.
:type cb: int
:param num_cb: Unused in this subclass.
:type torrent: bool
:param torrent: Unused in this subclass.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: Unused in this subclass.
:type response_headers: dict
:param response_headers: Unused in this subclass.
"""
shutil.copyfileobj(self.fp, fp)
def get_contents_as_string(self, headers=None, cb=None, num_cb=10,
torrent=False):
"""
Retrieve file data from the Key, and return contents as a string.
:type headers: dict
:param headers: ignored in this subclass.
:type cb: function
:param cb: ignored in this subclass.
:type cb: int
:param num_cb: ignored in this subclass.
:type cb: int
:param num_cb: ignored in this subclass.
:type torrent: bool
:param torrent: ignored in this subclass.
:rtype: string
:returns: The contents of the file as a string
"""
fp = StringIO()
self.get_contents_to_file(fp)
return fp.getvalue()
def is_stream(self):
return (self.key_type & self.KEY_STREAM)
def close(self):
"""
Closes fp associated with underlying file.
Caller should call this method when done with this class, to avoid
using up OS resources (e.g., when iterating over a large number
of files).
"""
self.fp.close()
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/file/key.py
| 0.525125 | 0.159971 |
key.py
|
|
# File representation of bucket, for use with "file://" URIs.
import os
from boto.file.key import Key
from boto.file.simpleresultset import SimpleResultSet
from boto.s3.bucketlistresultset import BucketListResultSet
class Bucket(object):
def __init__(self, name, contained_key):
"""Instantiate an anonymous file-based Bucket around a single key.
"""
self.name = name
self.contained_key = contained_key
def __iter__(self):
return iter(BucketListResultSet(self))
def __str__(self):
return 'anonymous bucket for file://' + self.contained_key
def delete_key(self, key_name, headers=None,
version_id=None, mfa_token=None):
"""
Deletes a key from the bucket.
:type key_name: string
:param key_name: The key name to delete
:type version_id: string
:param version_id: Unused in this subclass.
:type mfa_token: tuple or list of strings
:param mfa_token: Unused in this subclass.
"""
os.remove(key_name)
def get_all_keys(self, headers=None, **params):
"""
This method returns the single key around which this anonymous Bucket
was instantiated.
:rtype: SimpleResultSet
:return: The result from file system listing the keys requested
"""
key = Key(self.name, self.contained_key)
return SimpleResultSet([key])
def get_key(self, key_name, headers=None, version_id=None,
key_type=Key.KEY_REGULAR_FILE):
"""
Check to see if a particular key exists within the bucket.
Returns: An instance of a Key object or None
:type key_name: string
:param key_name: The name of the key to retrieve
:type version_id: string
:param version_id: Unused in this subclass.
:type stream_type: integer
:param stream_type: Type of the Key - Regular File or input/output Stream
:rtype: :class:`boto.file.key.Key`
:returns: A Key object from this bucket.
"""
if key_name == '-':
return Key(self.name, '-', key_type=Key.KEY_STREAM_READABLE)
else:
fp = open(key_name, 'rb')
return Key(self.name, key_name, fp)
def new_key(self, key_name=None, key_type=Key.KEY_REGULAR_FILE):
"""
Creates a new key
:type key_name: string
:param key_name: The name of the key to create
:rtype: :class:`boto.file.key.Key`
:returns: An instance of the newly created key object
"""
if key_name == '-':
return Key(self.name, '-', key_type=Key.KEY_STREAM_WRITABLE)
else:
dir_name = os.path.dirname(key_name)
if dir_name and not os.path.exists(dir_name):
os.makedirs(dir_name)
fp = open(key_name, 'wb')
return Key(self.name, key_name, fp)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/file/bucket.py
| 0.73848 | 0.176849 |
bucket.py
|
|
from boto.dynamodb.types import dynamize_value
class Condition(object):
"""
Base class for conditions. Doesn't do a darn thing but allows
is to test if something is a Condition instance or not.
"""
def __eq__(self, other):
if isinstance(other, Condition):
return self.to_dict() == other.to_dict()
class ConditionNoArgs(Condition):
"""
Abstract class for Conditions that require no arguments, such
as NULL or NOT_NULL.
"""
def __repr__(self):
return '%s' % self.__class__.__name__
def to_dict(self):
return {'ComparisonOperator': self.__class__.__name__}
class ConditionOneArg(Condition):
"""
Abstract class for Conditions that require a single argument
such as EQ or NE.
"""
def __init__(self, v1):
self.v1 = v1
def __repr__(self):
return '%s:%s' % (self.__class__.__name__, self.v1)
def to_dict(self):
return {'AttributeValueList': [dynamize_value(self.v1)],
'ComparisonOperator': self.__class__.__name__}
class ConditionTwoArgs(Condition):
"""
Abstract class for Conditions that require two arguments.
The only example of this currently is BETWEEN.
"""
def __init__(self, v1, v2):
self.v1 = v1
self.v2 = v2
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.v1, self.v2)
def to_dict(self):
values = (self.v1, self.v2)
return {'AttributeValueList': [dynamize_value(v) for v in values],
'ComparisonOperator': self.__class__.__name__}
class ConditionSeveralArgs(Condition):
"""
Abstract class for conditions that require several argument (ex: IN).
"""
def __init__(self, values):
self.values = values
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__,
', '.join(self.values))
def to_dict(self):
return {'AttributeValueList': [dynamize_value(v) for v in self.values],
'ComparisonOperator': self.__class__.__name__}
class EQ(ConditionOneArg):
pass
class NE(ConditionOneArg):
pass
class LE(ConditionOneArg):
pass
class LT(ConditionOneArg):
pass
class GE(ConditionOneArg):
pass
class GT(ConditionOneArg):
pass
class NULL(ConditionNoArgs):
pass
class NOT_NULL(ConditionNoArgs):
pass
class CONTAINS(ConditionOneArg):
pass
class NOT_CONTAINS(ConditionOneArg):
pass
class BEGINS_WITH(ConditionOneArg):
pass
class IN(ConditionSeveralArgs):
pass
class BEGINS_WITH(ConditionOneArg):
pass
class BETWEEN(ConditionTwoArgs):
pass
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/dynamodb/condition.py
| 0.766031 | 0.22482 |
condition.py
|
|
from boto.compat import six
class Batch(object):
"""
Used to construct a BatchGet request.
:ivar table: The Table object from which the item is retrieved.
:ivar keys: A list of scalar or tuple values. Each element in the
list represents one Item to retrieve. If the schema for the
table has both a HashKey and a RangeKey, each element in the
list should be a tuple consisting of (hash_key, range_key). If
the schema for the table contains only a HashKey, each element
in the list should be a scalar value of the appropriate type
for the table schema. NOTE: The maximum number of items that
can be retrieved for a single operation is 100. Also, the
number of items retrieved is constrained by a 1 MB size limit.
:ivar attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:ivar consistent_read: Specify whether or not to use a
consistent read. Defaults to False.
"""
def __init__(self, table, keys, attributes_to_get=None,
consistent_read=False):
self.table = table
self.keys = keys
self.attributes_to_get = attributes_to_get
self.consistent_read = consistent_read
def to_dict(self):
"""
Convert the Batch object into the format required for Layer1.
"""
batch_dict = {}
key_list = []
for key in self.keys:
if isinstance(key, tuple):
hash_key, range_key = key
else:
hash_key = key
range_key = None
k = self.table.layer2.build_key_from_values(self.table.schema,
hash_key, range_key)
key_list.append(k)
batch_dict['Keys'] = key_list
if self.attributes_to_get:
batch_dict['AttributesToGet'] = self.attributes_to_get
if self.consistent_read:
batch_dict['ConsistentRead'] = True
else:
batch_dict['ConsistentRead'] = False
return batch_dict
class BatchWrite(object):
"""
Used to construct a BatchWrite request. Each BatchWrite object
represents a collection of PutItem and DeleteItem requests for
a single Table.
:ivar table: The Table object from which the item is retrieved.
:ivar puts: A list of :class:`boto.dynamodb.item.Item` objects
that you want to write to DynamoDB.
:ivar deletes: A list of scalar or tuple values. Each element in the
list represents one Item to delete. If the schema for the
table has both a HashKey and a RangeKey, each element in the
list should be a tuple consisting of (hash_key, range_key). If
the schema for the table contains only a HashKey, each element
in the list should be a scalar value of the appropriate type
for the table schema.
"""
def __init__(self, table, puts=None, deletes=None):
self.table = table
self.puts = puts or []
self.deletes = deletes or []
def to_dict(self):
"""
Convert the Batch object into the format required for Layer1.
"""
op_list = []
for item in self.puts:
d = {'Item': self.table.layer2.dynamize_item(item)}
d = {'PutRequest': d}
op_list.append(d)
for key in self.deletes:
if isinstance(key, tuple):
hash_key, range_key = key
else:
hash_key = key
range_key = None
k = self.table.layer2.build_key_from_values(self.table.schema,
hash_key, range_key)
d = {'Key': k}
op_list.append({'DeleteRequest': d})
return (self.table.name, op_list)
class BatchList(list):
"""
A subclass of a list object that contains a collection of
:class:`boto.dynamodb.batch.Batch` objects.
"""
def __init__(self, layer2):
list.__init__(self)
self.unprocessed = None
self.layer2 = layer2
def add_batch(self, table, keys, attributes_to_get=None,
consistent_read=False):
"""
Add a Batch to this BatchList.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object in which the items are contained.
:type keys: list
:param keys: A list of scalar or tuple values. Each element in the
list represents one Item to retrieve. If the schema for the
table has both a HashKey and a RangeKey, each element in the
list should be a tuple consisting of (hash_key, range_key). If
the schema for the table contains only a HashKey, each element
in the list should be a scalar value of the appropriate type
for the table schema. NOTE: The maximum number of items that
can be retrieved for a single operation is 100. Also, the
number of items retrieved is constrained by a 1 MB size limit.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
"""
self.append(Batch(table, keys, attributes_to_get, consistent_read))
def resubmit(self):
"""
Resubmit the batch to get the next result set. The request object is
rebuild from scratch meaning that all batch added between ``submit``
and ``resubmit`` will be lost.
Note: This method is experimental and subject to changes in future releases
"""
del self[:]
if not self.unprocessed:
return None
for table_name, table_req in six.iteritems(self.unprocessed):
table_keys = table_req['Keys']
table = self.layer2.get_table(table_name)
keys = []
for key in table_keys:
h = key['HashKeyElement']
r = None
if 'RangeKeyElement' in key:
r = key['RangeKeyElement']
keys.append((h, r))
attributes_to_get = None
if 'AttributesToGet' in table_req:
attributes_to_get = table_req['AttributesToGet']
self.add_batch(table, keys, attributes_to_get=attributes_to_get)
return self.submit()
def submit(self):
res = self.layer2.batch_get_item(self)
if 'UnprocessedKeys' in res:
self.unprocessed = res['UnprocessedKeys']
return res
def to_dict(self):
"""
Convert a BatchList object into format required for Layer1.
"""
d = {}
for batch in self:
b = batch.to_dict()
if b['Keys']:
d[batch.table.name] = b
return d
class BatchWriteList(list):
"""
A subclass of a list object that contains a collection of
:class:`boto.dynamodb.batch.BatchWrite` objects.
"""
def __init__(self, layer2):
list.__init__(self)
self.layer2 = layer2
def add_batch(self, table, puts=None, deletes=None):
"""
Add a BatchWrite to this BatchWriteList.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object in which the items are contained.
:type puts: list of :class:`boto.dynamodb.item.Item` objects
:param puts: A list of items that you want to write to DynamoDB.
:type deletes: A list
:param deletes: A list of scalar or tuple values. Each element
in the list represents one Item to delete. If the schema
for the table has both a HashKey and a RangeKey, each
element in the list should be a tuple consisting of
(hash_key, range_key). If the schema for the table
contains only a HashKey, each element in the list should
be a scalar value of the appropriate type for the table
schema.
"""
self.append(BatchWrite(table, puts, deletes))
def submit(self):
return self.layer2.batch_write_item(self)
def to_dict(self):
"""
Convert a BatchWriteList object into format required for Layer1.
"""
d = {}
for batch in self:
table_name, batch_dict = batch.to_dict()
d[table_name] = batch_dict
return d
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/dynamodb/batch.py
| 0.898335 | 0.481941 |
batch.py
|
|
class Schema(object):
"""
Represents a DynamoDB schema.
:ivar hash_key_name: The name of the hash key of the schema.
:ivar hash_key_type: The DynamoDB type specification for the
hash key of the schema.
:ivar range_key_name: The name of the range key of the schema
or None if no range key is defined.
:ivar range_key_type: The DynamoDB type specification for the
range key of the schema or None if no range key is defined.
:ivar dict: The underlying Python dictionary that needs to be
passed to Layer1 methods.
"""
def __init__(self, schema_dict):
self._dict = schema_dict
def __repr__(self):
if self.range_key_name:
s = 'Schema(%s:%s)' % (self.hash_key_name, self.range_key_name)
else:
s = 'Schema(%s)' % self.hash_key_name
return s
@classmethod
def create(cls, hash_key, range_key=None):
"""Convenience method to create a schema object.
Example usage::
schema = Schema.create(hash_key=('foo', 'N'))
schema2 = Schema.create(hash_key=('foo', 'N'),
range_key=('bar', 'S'))
:type hash_key: tuple
:param hash_key: A tuple of (hash_key_name, hash_key_type)
:type range_key: tuple
:param hash_key: A tuple of (range_key_name, range_key_type)
"""
reconstructed = {
'HashKeyElement': {
'AttributeName': hash_key[0],
'AttributeType': hash_key[1],
}
}
if range_key is not None:
reconstructed['RangeKeyElement'] = {
'AttributeName': range_key[0],
'AttributeType': range_key[1],
}
instance = cls(None)
instance._dict = reconstructed
return instance
@property
def dict(self):
return self._dict
@property
def hash_key_name(self):
return self._dict['HashKeyElement']['AttributeName']
@property
def hash_key_type(self):
return self._dict['HashKeyElement']['AttributeType']
@property
def range_key_name(self):
name = None
if 'RangeKeyElement' in self._dict:
name = self._dict['RangeKeyElement']['AttributeName']
return name
@property
def range_key_type(self):
type = None
if 'RangeKeyElement' in self._dict:
type = self._dict['RangeKeyElement']['AttributeType']
return type
def __eq__(self, other):
return (self.hash_key_name == other.hash_key_name and
self.hash_key_type == other.hash_key_type and
self.range_key_name == other.range_key_name and
self.range_key_type == other.range_key_type)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/dynamodb/schema.py
| 0.925936 | 0.321606 |
schema.py
|
|
from boto.dynamodb.exceptions import DynamoDBItemError
class Item(dict):
"""
An item in Amazon DynamoDB.
:ivar hash_key: The HashKey of this item.
:ivar range_key: The RangeKey of this item or None if no RangeKey
is defined.
:ivar hash_key_name: The name of the HashKey associated with this item.
:ivar range_key_name: The name of the RangeKey associated with this item.
:ivar table: The Table this item belongs to.
"""
def __init__(self, table, hash_key=None, range_key=None, attrs=None):
self.table = table
self._updates = None
self._hash_key_name = self.table.schema.hash_key_name
self._range_key_name = self.table.schema.range_key_name
if attrs is None:
attrs = {}
if hash_key is None:
hash_key = attrs.get(self._hash_key_name, None)
self[self._hash_key_name] = hash_key
if self._range_key_name:
if range_key is None:
range_key = attrs.get(self._range_key_name, None)
self[self._range_key_name] = range_key
self._updates = {}
for key, value in attrs.items():
if key != self._hash_key_name and key != self._range_key_name:
self[key] = value
self.consumed_units = 0
@property
def hash_key(self):
return self[self._hash_key_name]
@property
def range_key(self):
return self.get(self._range_key_name)
@property
def hash_key_name(self):
return self._hash_key_name
@property
def range_key_name(self):
return self._range_key_name
def add_attribute(self, attr_name, attr_value):
"""
Queue the addition of an attribute to an item in DynamoDB.
This will eventually result in an UpdateItem request being issued
with an update action of ADD when the save method is called.
:type attr_name: str
:param attr_name: Name of the attribute you want to alter.
:type attr_value: int|long|float|set
:param attr_value: Value which is to be added to the attribute.
"""
self._updates[attr_name] = ("ADD", attr_value)
def delete_attribute(self, attr_name, attr_value=None):
"""
Queue the deletion of an attribute from an item in DynamoDB.
This call will result in a UpdateItem request being issued
with update action of DELETE when the save method is called.
:type attr_name: str
:param attr_name: Name of the attribute you want to alter.
:type attr_value: set
:param attr_value: A set of values to be removed from the attribute.
This parameter is optional. If None, the whole attribute is
removed from the item.
"""
self._updates[attr_name] = ("DELETE", attr_value)
def put_attribute(self, attr_name, attr_value):
"""
Queue the putting of an attribute to an item in DynamoDB.
This call will result in an UpdateItem request being issued
with the update action of PUT when the save method is called.
:type attr_name: str
:param attr_name: Name of the attribute you want to alter.
:type attr_value: int|long|float|str|set
:param attr_value: New value of the attribute.
"""
self._updates[attr_name] = ("PUT", attr_value)
def save(self, expected_value=None, return_values=None):
"""
Commits pending updates to Amazon DynamoDB.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that
you expect. This dictionary should have name/value pairs
where the name is the name of the attribute and the value is
either the value you are expecting or False if you expect
the attribute not to exist.
:type return_values: str
:param return_values: Controls the return of attribute name/value pairs
before they were updated. Possible values are: None, 'ALL_OLD',
'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is
specified and the item is overwritten, the content of the old item
is returned. If 'ALL_NEW' is specified, then all the attributes of
the new version of the item are returned. If 'UPDATED_NEW' is
specified, the new versions of only the updated attributes are
returned.
"""
return self.table.layer2.update_item(self, expected_value,
return_values)
def delete(self, expected_value=None, return_values=None):
"""
Delete the item from DynamoDB.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that
you expect. This dictionary should have name/value pairs
where the name is the name of the attribute and the value
is either the value you are expecting or False if you expect
the attribute not to exist.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
return self.table.layer2.delete_item(self, expected_value,
return_values)
def put(self, expected_value=None, return_values=None):
"""
Store a new item or completely replace an existing item
in Amazon DynamoDB.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that
you expect. This dictionary should have name/value pairs
where the name is the name of the attribute and the value
is either the value you are expecting or False if you expect
the attribute not to exist.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
return self.table.layer2.put_item(self, expected_value, return_values)
def __setitem__(self, key, value):
"""Overrwrite the setter to instead update the _updates
method so this can act like a normal dict"""
if self._updates is not None:
self.put_attribute(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
"""Remove this key from the items"""
if self._updates is not None:
self.delete_attribute(key)
dict.__delitem__(self, key)
# Allow this item to still be pickled
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/dynamodb/item.py
| 0.8812 | 0.294262 |
item.py
|
|
from boto.dynamodb.layer1 import Layer1
from boto.dynamodb.table import Table
from boto.dynamodb.schema import Schema
from boto.dynamodb.item import Item
from boto.dynamodb.batch import BatchList, BatchWriteList
from boto.dynamodb.types import get_dynamodb_type, Dynamizer, \
LossyFloatDynamizer, NonBooleanDynamizer
class TableGenerator(object):
"""
This is an object that wraps up the table_generator function.
The only real reason to have this is that we want to be able
to accumulate and return the ConsumedCapacityUnits element that
is part of each response.
:ivar last_evaluated_key: A sequence representing the key(s)
of the item last evaluated, or None if no additional
results are available.
:ivar remaining: The remaining quantity of results requested.
:ivar table: The table to which the call was made.
"""
def __init__(self, table, callable, remaining, item_class, kwargs):
self.table = table
self.callable = callable
self.remaining = -1 if remaining is None else remaining
self.item_class = item_class
self.kwargs = kwargs
self._consumed_units = 0.0
self.last_evaluated_key = None
self._count = 0
self._scanned_count = 0
self._response = None
@property
def count(self):
"""
The total number of items retrieved thus far. This value changes with
iteration and even when issuing a call with count=True, it is necessary
to complete the iteration to assert an accurate count value.
"""
self.response
return self._count
@property
def scanned_count(self):
"""
As above, but representing the total number of items scanned by
DynamoDB, without regard to any filters.
"""
self.response
return self._scanned_count
@property
def consumed_units(self):
"""
Returns a float representing the ConsumedCapacityUnits accumulated.
"""
self.response
return self._consumed_units
@property
def response(self):
"""
The current response to the call from DynamoDB.
"""
return self.next_response() if self._response is None else self._response
def next_response(self):
"""
Issue a call and return the result. You can invoke this method
while iterating over the TableGenerator in order to skip to the
next "page" of results.
"""
# preserve any existing limit in case the user alters self.remaining
limit = self.kwargs.get('limit')
if (self.remaining > 0 and (limit is None or limit > self.remaining)):
self.kwargs['limit'] = self.remaining
self._response = self.callable(**self.kwargs)
self.kwargs['limit'] = limit
self._consumed_units += self._response.get('ConsumedCapacityUnits', 0.0)
self._count += self._response.get('Count', 0)
self._scanned_count += self._response.get('ScannedCount', 0)
# at the expense of a possibly gratuitous dynamize, ensure that
# early generator termination won't result in bad LEK values
if 'LastEvaluatedKey' in self._response:
lek = self._response['LastEvaluatedKey']
esk = self.table.layer2.dynamize_last_evaluated_key(lek)
self.kwargs['exclusive_start_key'] = esk
lektuple = (lek['HashKeyElement'],)
if 'RangeKeyElement' in lek:
lektuple += (lek['RangeKeyElement'],)
self.last_evaluated_key = lektuple
else:
self.last_evaluated_key = None
return self._response
def __iter__(self):
while self.remaining != 0:
response = self.response
for item in response.get('Items', []):
self.remaining -= 1
yield self.item_class(self.table, attrs=item)
if self.remaining == 0:
break
if response is not self._response:
break
else:
if self.last_evaluated_key is not None:
self.next_response()
continue
break
if response is not self._response:
continue
break
class Layer2(object):
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
debug=0, security_token=None, region=None,
validate_certs=True, dynamizer=LossyFloatDynamizer,
profile_name=None):
self.layer1 = Layer1(aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port,
debug, security_token, region,
validate_certs=validate_certs,
profile_name=profile_name)
self.dynamizer = dynamizer()
def use_decimals(self, use_boolean=False):
"""
Use the ``decimal.Decimal`` type for encoding/decoding numeric types.
By default, ints/floats are used to represent numeric types
('N', 'NS') received from DynamoDB. Using the ``Decimal``
type is recommended to prevent loss of precision.
"""
# Eventually this should be made the default dynamizer.
self.dynamizer = Dynamizer() if use_boolean else NonBooleanDynamizer()
def dynamize_attribute_updates(self, pending_updates):
"""
Convert a set of pending item updates into the structure
required by Layer1.
"""
d = {}
for attr_name in pending_updates:
action, value = pending_updates[attr_name]
if value is None:
# DELETE without an attribute value
d[attr_name] = {"Action": action}
else:
d[attr_name] = {"Action": action,
"Value": self.dynamizer.encode(value)}
return d
def dynamize_item(self, item):
d = {}
for attr_name in item:
d[attr_name] = self.dynamizer.encode(item[attr_name])
return d
def dynamize_range_key_condition(self, range_key_condition):
"""
Convert a layer2 range_key_condition parameter into the
structure required by Layer1.
"""
return range_key_condition.to_dict()
def dynamize_scan_filter(self, scan_filter):
"""
Convert a layer2 scan_filter parameter into the
structure required by Layer1.
"""
d = None
if scan_filter:
d = {}
for attr_name in scan_filter:
condition = scan_filter[attr_name]
d[attr_name] = condition.to_dict()
return d
def dynamize_expected_value(self, expected_value):
"""
Convert an expected_value parameter into the data structure
required for Layer1.
"""
d = None
if expected_value:
d = {}
for attr_name in expected_value:
attr_value = expected_value[attr_name]
if attr_value is True:
attr_value = {'Exists': True}
elif attr_value is False:
attr_value = {'Exists': False}
else:
val = self.dynamizer.encode(expected_value[attr_name])
attr_value = {'Value': val}
d[attr_name] = attr_value
return d
def dynamize_last_evaluated_key(self, last_evaluated_key):
"""
Convert a last_evaluated_key parameter into the data structure
required for Layer1.
"""
d = None
if last_evaluated_key:
hash_key = last_evaluated_key['HashKeyElement']
d = {'HashKeyElement': self.dynamizer.encode(hash_key)}
if 'RangeKeyElement' in last_evaluated_key:
range_key = last_evaluated_key['RangeKeyElement']
d['RangeKeyElement'] = self.dynamizer.encode(range_key)
return d
def build_key_from_values(self, schema, hash_key, range_key=None):
"""
Build a Key structure to be used for accessing items
in Amazon DynamoDB. This method takes the supplied hash_key
and optional range_key and validates them against the
schema. If there is a mismatch, a TypeError is raised.
Otherwise, a Python dict version of a Amazon DynamoDB Key
data structure is returned.
:type hash_key: int|float|str|unicode|Binary
:param hash_key: The hash key of the item you are looking for.
The type of the hash key should match the type defined in
the schema.
:type range_key: int|float|str|unicode|Binary
:param range_key: The range key of the item your are looking for.
This should be supplied only if the schema requires a
range key. The type of the range key should match the
type defined in the schema.
"""
dynamodb_key = {}
dynamodb_value = self.dynamizer.encode(hash_key)
if list(dynamodb_value.keys())[0] != schema.hash_key_type:
msg = 'Hashkey must be of type: %s' % schema.hash_key_type
raise TypeError(msg)
dynamodb_key['HashKeyElement'] = dynamodb_value
if range_key is not None:
dynamodb_value = self.dynamizer.encode(range_key)
if list(dynamodb_value.keys())[0] != schema.range_key_type:
msg = 'RangeKey must be of type: %s' % schema.range_key_type
raise TypeError(msg)
dynamodb_key['RangeKeyElement'] = dynamodb_value
return dynamodb_key
def new_batch_list(self):
"""
Return a new, empty :class:`boto.dynamodb.batch.BatchList`
object.
"""
return BatchList(self)
def new_batch_write_list(self):
"""
Return a new, empty :class:`boto.dynamodb.batch.BatchWriteList`
object.
"""
return BatchWriteList(self)
def list_tables(self, limit=None):
"""
Return a list of the names of all tables associated with the
current account and region.
:type limit: int
:param limit: The maximum number of tables to return.
"""
tables = []
start_table = None
while not limit or len(tables) < limit:
this_round_limit = None
if limit:
this_round_limit = limit - len(tables)
this_round_limit = min(this_round_limit, 100)
result = self.layer1.list_tables(limit=this_round_limit, start_table=start_table)
tables.extend(result.get('TableNames', []))
start_table = result.get('LastEvaluatedTableName', None)
if not start_table:
break
return tables
def describe_table(self, name):
"""
Retrieve information about an existing table.
:type name: str
:param name: The name of the desired table.
"""
return self.layer1.describe_table(name)
def table_from_schema(self, name, schema):
"""
Create a Table object from a schema.
This method will create a Table object without
making any API calls. If you know the name and schema
of the table, you can use this method instead of
``get_table``.
Example usage::
table = layer2.table_from_schema(
'tablename',
Schema.create(hash_key=('foo', 'N')))
:type name: str
:param name: The name of the table.
:type schema: :class:`boto.dynamodb.schema.Schema`
:param schema: The schema associated with the table.
:rtype: :class:`boto.dynamodb.table.Table`
:return: A Table object representing the table.
"""
return Table.create_from_schema(self, name, schema)
def get_table(self, name):
"""
Retrieve the Table object for an existing table.
:type name: str
:param name: The name of the desired table.
:rtype: :class:`boto.dynamodb.table.Table`
:return: A Table object representing the table.
"""
response = self.layer1.describe_table(name)
return Table(self, response)
lookup = get_table
def create_table(self, name, schema, read_units, write_units):
"""
Create a new Amazon DynamoDB table.
:type name: str
:param name: The name of the desired table.
:type schema: :class:`boto.dynamodb.schema.Schema`
:param schema: The Schema object that defines the schema used
by this table.
:type read_units: int
:param read_units: The value for ReadCapacityUnits.
:type write_units: int
:param write_units: The value for WriteCapacityUnits.
:rtype: :class:`boto.dynamodb.table.Table`
:return: A Table object representing the new Amazon DynamoDB table.
"""
response = self.layer1.create_table(name, schema.dict,
{'ReadCapacityUnits': read_units,
'WriteCapacityUnits': write_units})
return Table(self, response)
def update_throughput(self, table, read_units, write_units):
"""
Update the ProvisionedThroughput for the Amazon DynamoDB Table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object whose throughput is being updated.
:type read_units: int
:param read_units: The new value for ReadCapacityUnits.
:type write_units: int
:param write_units: The new value for WriteCapacityUnits.
"""
response = self.layer1.update_table(table.name,
{'ReadCapacityUnits': read_units,
'WriteCapacityUnits': write_units})
table.update_from_response(response)
def delete_table(self, table):
"""
Delete this table and all items in it. After calling this
the Table objects status attribute will be set to 'DELETING'.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being deleted.
"""
response = self.layer1.delete_table(table.name)
table.update_from_response(response)
def create_schema(self, hash_key_name, hash_key_proto_value,
range_key_name=None, range_key_proto_value=None):
"""
Create a Schema object used when creating a Table.
:type hash_key_name: str
:param hash_key_name: The name of the HashKey for the schema.
:type hash_key_proto_value: int|long|float|str|unicode|Binary
:param hash_key_proto_value: A sample or prototype of the type
of value you want to use for the HashKey. Alternatively,
you can also just pass in the Python type (e.g. int, float, etc.).
:type range_key_name: str
:param range_key_name: The name of the RangeKey for the schema.
This parameter is optional.
:type range_key_proto_value: int|long|float|str|unicode|Binary
:param range_key_proto_value: A sample or prototype of the type
of value you want to use for the RangeKey. Alternatively,
you can also pass in the Python type (e.g. int, float, etc.)
This parameter is optional.
"""
hash_key = (hash_key_name, get_dynamodb_type(hash_key_proto_value))
if range_key_name and range_key_proto_value is not None:
range_key = (range_key_name,
get_dynamodb_type(range_key_proto_value))
else:
range_key = None
return Schema.create(hash_key, range_key)
def get_item(self, table, hash_key, range_key=None,
attributes_to_get=None, consistent_read=False,
item_class=Item):
"""
Retrieve an existing item from the table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object from which the item is retrieved.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key: int|long|float|str|unicode|Binary
:param range_key: The optional RangeKey of the requested item.
The type of the value must match the type defined in the
schema for the table.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
"""
key = self.build_key_from_values(table.schema, hash_key, range_key)
response = self.layer1.get_item(table.name, key,
attributes_to_get, consistent_read,
object_hook=self.dynamizer.decode)
item = item_class(table, hash_key, range_key, response['Item'])
if 'ConsumedCapacityUnits' in response:
item.consumed_units = response['ConsumedCapacityUnits']
return item
def batch_get_item(self, batch_list):
"""
Return a set of attributes for a multiple items in
multiple tables using their primary keys.
:type batch_list: :class:`boto.dynamodb.batch.BatchList`
:param batch_list: A BatchList object which consists of a
list of :class:`boto.dynamoddb.batch.Batch` objects.
Each Batch object contains the information about one
batch of objects that you wish to retrieve in this
request.
"""
request_items = batch_list.to_dict()
return self.layer1.batch_get_item(request_items,
object_hook=self.dynamizer.decode)
def batch_write_item(self, batch_list):
"""
Performs multiple Puts and Deletes in one batch.
:type batch_list: :class:`boto.dynamodb.batch.BatchWriteList`
:param batch_list: A BatchWriteList object which consists of a
list of :class:`boto.dynamoddb.batch.BatchWrite` objects.
Each Batch object contains the information about one
batch of objects that you wish to put or delete.
"""
request_items = batch_list.to_dict()
return self.layer1.batch_write_item(request_items,
object_hook=self.dynamizer.decode)
def put_item(self, item, expected_value=None, return_values=None):
"""
Store a new item or completely replace an existing item
in Amazon DynamoDB.
:type item: :class:`boto.dynamodb.item.Item`
:param item: The Item to write to Amazon DynamoDB.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that you expect.
This dictionary should have name/value pairs where the name
is the name of the attribute and the value is either the value
you are expecting or False if you expect the attribute not to
exist.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
expected_value = self.dynamize_expected_value(expected_value)
response = self.layer1.put_item(item.table.name,
self.dynamize_item(item),
expected_value, return_values,
object_hook=self.dynamizer.decode)
if 'ConsumedCapacityUnits' in response:
item.consumed_units = response['ConsumedCapacityUnits']
return response
def update_item(self, item, expected_value=None, return_values=None):
"""
Commit pending item updates to Amazon DynamoDB.
:type item: :class:`boto.dynamodb.item.Item`
:param item: The Item to update in Amazon DynamoDB. It is expected
that you would have called the add_attribute, put_attribute
and/or delete_attribute methods on this Item prior to calling
this method. Those queued changes are what will be updated.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that you
expect. This dictionary should have name/value pairs where the
name is the name of the attribute and the value is either the
value you are expecting or False if you expect the attribute
not to exist.
:type return_values: str
:param return_values: Controls the return of attribute name/value pairs
before they were updated. Possible values are: None, 'ALL_OLD',
'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is
specified and the item is overwritten, the content of the old item
is returned. If 'ALL_NEW' is specified, then all the attributes of
the new version of the item are returned. If 'UPDATED_NEW' is
specified, the new versions of only the updated attributes are
returned.
"""
expected_value = self.dynamize_expected_value(expected_value)
key = self.build_key_from_values(item.table.schema,
item.hash_key, item.range_key)
attr_updates = self.dynamize_attribute_updates(item._updates)
response = self.layer1.update_item(item.table.name, key,
attr_updates,
expected_value, return_values,
object_hook=self.dynamizer.decode)
item._updates.clear()
if 'ConsumedCapacityUnits' in response:
item.consumed_units = response['ConsumedCapacityUnits']
return response
def delete_item(self, item, expected_value=None, return_values=None):
"""
Delete the item from Amazon DynamoDB.
:type item: :class:`boto.dynamodb.item.Item`
:param item: The Item to delete from Amazon DynamoDB.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that you expect.
This dictionary should have name/value pairs where the name
is the name of the attribute and the value is either the value
you are expecting or False if you expect the attribute not to
exist.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
expected_value = self.dynamize_expected_value(expected_value)
key = self.build_key_from_values(item.table.schema,
item.hash_key, item.range_key)
return self.layer1.delete_item(item.table.name, key,
expected=expected_value,
return_values=return_values,
object_hook=self.dynamizer.decode)
def query(self, table, hash_key, range_key_condition=None,
attributes_to_get=None, request_limit=None,
max_results=None, consistent_read=False,
scan_index_forward=True, exclusive_start_key=None,
item_class=Item, count=False):
"""
Perform a query on the table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being queried.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key_condition: :class:`boto.dynamodb.condition.Condition`
:param range_key_condition: A Condition object.
Condition object can be one of the following types:
EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN
The only condition which expects or will accept two
values is 'BETWEEN', otherwise a single value should
be passed to the Condition constructor.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Query operation, even if the
operation has no matching items for the assigned filter.
If count is True, the actual items are not returned and
the count is accessible as the ``count`` attribute of
the returned object.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: :class:`boto.dynamodb.layer2.TableGenerator`
"""
if range_key_condition:
rkc = self.dynamize_range_key_condition(range_key_condition)
else:
rkc = None
if exclusive_start_key:
esk = self.build_key_from_values(table.schema,
*exclusive_start_key)
else:
esk = None
kwargs = {'table_name': table.name,
'hash_key_value': self.dynamizer.encode(hash_key),
'range_key_conditions': rkc,
'attributes_to_get': attributes_to_get,
'limit': request_limit,
'count': count,
'consistent_read': consistent_read,
'scan_index_forward': scan_index_forward,
'exclusive_start_key': esk,
'object_hook': self.dynamizer.decode}
return TableGenerator(table, self.layer1.query,
max_results, item_class, kwargs)
def scan(self, table, scan_filter=None,
attributes_to_get=None, request_limit=None, max_results=None,
exclusive_start_key=None, item_class=Item, count=False):
"""
Perform a scan of DynamoDB.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being scanned.
:type scan_filter: A dict
:param scan_filter: A dictionary where the key is the
attribute name and the value is a
:class:`boto.dynamodb.condition.Condition` object.
Valid Condition objects include:
* EQ - equal (1)
* NE - not equal (1)
* LE - less than or equal (1)
* LT - less than (1)
* GE - greater than or equal (1)
* GT - greater than (1)
* NOT_NULL - attribute exists (0, use None)
* NULL - attribute does not exist (0, use None)
* CONTAINS - substring or value in list (1)
* NOT_CONTAINS - absence of substring or value in list (1)
* BEGINS_WITH - substring prefix (1)
* IN - exact match in list (N)
* BETWEEN - >= first value, <= second value (2)
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
If count is True, the actual items are not returned and
the count is accessible as the ``count`` attribute of
the returned object.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: :class:`boto.dynamodb.layer2.TableGenerator`
"""
if exclusive_start_key:
esk = self.build_key_from_values(table.schema,
*exclusive_start_key)
else:
esk = None
kwargs = {'table_name': table.name,
'scan_filter': self.dynamize_scan_filter(scan_filter),
'attributes_to_get': attributes_to_get,
'limit': request_limit,
'count': count,
'exclusive_start_key': esk,
'object_hook': self.dynamizer.decode}
return TableGenerator(table, self.layer1.scan,
max_results, item_class, kwargs)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/dynamodb/layer2.py
| 0.782538 | 0.391144 |
layer2.py
|
|
import time
from binascii import crc32
import boto
from boto.connection import AWSAuthConnection
from boto.exception import DynamoDBResponseError
from boto.provider import Provider
from boto.dynamodb import exceptions as dynamodb_exceptions
from boto.compat import json
class Layer1(AWSAuthConnection):
"""
This is the lowest-level interface to DynamoDB. Methods at this
layer map directly to API requests and parameters to the methods
are either simple, scalar values or they are the Python equivalent
of the JSON input as defined in the DynamoDB Developer's Guide.
All responses are direct decoding of the JSON response bodies to
Python data structures via the json or simplejson modules.
:ivar throughput_exceeded_events: An integer variable that
keeps a running total of the number of ThroughputExceeded
responses this connection has received from Amazon DynamoDB.
"""
DefaultRegionName = 'us-east-1'
"""The default region name for DynamoDB API."""
ServiceName = 'DynamoDB'
"""The name of the Service"""
Version = '20111205'
"""DynamoDB API version."""
ThruputError = "ProvisionedThroughputExceededException"
"""The error response returned when provisioned throughput is exceeded"""
SessionExpiredError = 'com.amazon.coral.service#ExpiredTokenException'
"""The error response returned when session token has expired"""
ConditionalCheckFailedError = 'ConditionalCheckFailedException'
"""The error response returned when a conditional check fails"""
ValidationError = 'ValidationException'
"""The error response returned when an item is invalid in some way"""
ResponseError = DynamoDBResponseError
NumberRetries = 10
"""The number of times an error is retried."""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
debug=0, security_token=None, region=None,
validate_certs=True, validate_checksums=True, profile_name=None):
if not region:
region_name = boto.config.get('DynamoDB', 'region',
self.DefaultRegionName)
for reg in boto.dynamodb.regions():
if reg.name == region_name:
region = reg
break
self.region = region
super(Layer1, self).__init__(self.region.endpoint,
aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
debug=debug, security_token=security_token,
validate_certs=validate_certs,
profile_name=profile_name)
self.throughput_exceeded_events = 0
self._validate_checksums = boto.config.getbool(
'DynamoDB', 'validate_checksums', validate_checksums)
def _get_session_token(self):
self.provider = Provider(self._provider_type)
self._auth_handler.update_provider(self.provider)
def _required_auth_capability(self):
return ['hmac-v4']
def make_request(self, action, body='', object_hook=None):
"""
:raises: ``DynamoDBExpiredTokenError`` if the security token expires.
"""
headers = {'X-Amz-Target': '%s_%s.%s' % (self.ServiceName,
self.Version, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.0',
'Content-Length': str(len(body))}
http_request = self.build_base_http_request('POST', '/', '/',
{}, headers, body, None)
start = time.time()
response = self._mexe(http_request, sender=None,
override_num_retries=self.NumberRetries,
retry_handler=self._retry_handler)
elapsed = (time.time() - start) * 1000
request_id = response.getheader('x-amzn-RequestId')
boto.log.debug('RequestId: %s' % request_id)
boto.perflog.debug('%s: id=%s time=%sms',
headers['X-Amz-Target'], request_id, int(elapsed))
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
return json.loads(response_body, object_hook=object_hook)
def _retry_handler(self, response, i, next_sleep):
status = None
if response.status == 400:
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
data = json.loads(response_body)
if self.ThruputError in data.get('__type'):
self.throughput_exceeded_events += 1
msg = "%s, retry attempt %s" % (self.ThruputError, i)
next_sleep = self._exponential_time(i)
i += 1
status = (msg, i, next_sleep)
if i == self.NumberRetries:
# If this was our last retry attempt, raise
# a specific error saying that the throughput
# was exceeded.
raise dynamodb_exceptions.DynamoDBThroughputExceededError(
response.status, response.reason, data)
elif self.SessionExpiredError in data.get('__type'):
msg = 'Renewing Session Token'
self._get_session_token()
status = (msg, i + self.num_retries - 1, 0)
elif self.ConditionalCheckFailedError in data.get('__type'):
raise dynamodb_exceptions.DynamoDBConditionalCheckFailedError(
response.status, response.reason, data)
elif self.ValidationError in data.get('__type'):
raise dynamodb_exceptions.DynamoDBValidationError(
response.status, response.reason, data)
else:
raise self.ResponseError(response.status, response.reason,
data)
expected_crc32 = response.getheader('x-amz-crc32')
if self._validate_checksums and expected_crc32 is not None:
boto.log.debug('Validating crc32 checksum for body: %s',
response.read().decode('utf-8'))
actual_crc32 = crc32(response.read()) & 0xffffffff
expected_crc32 = int(expected_crc32)
if actual_crc32 != expected_crc32:
msg = ("The calculated checksum %s did not match the expected "
"checksum %s" % (actual_crc32, expected_crc32))
status = (msg, i + 1, self._exponential_time(i))
return status
def _exponential_time(self, i):
if i == 0:
next_sleep = 0
else:
next_sleep = min(0.05 * (2 ** i),
boto.config.get('Boto', 'max_retry_delay', 60))
return next_sleep
def list_tables(self, limit=None, start_table=None):
"""
Returns a dictionary of results. The dictionary contains
a **TableNames** key whose value is a list of the table names.
The dictionary could also contain a **LastEvaluatedTableName**
key whose value would be the last table name returned if
the complete list of table names was not returned. This
value would then be passed as the ``start_table`` parameter on
a subsequent call to this method.
:type limit: int
:param limit: The maximum number of tables to return.
:type start_table: str
:param start_table: The name of the table that starts the
list. If you ran a previous list_tables and not
all results were returned, the response dict would
include a LastEvaluatedTableName attribute. Use
that value here to continue the listing.
"""
data = {}
if limit:
data['Limit'] = limit
if start_table:
data['ExclusiveStartTableName'] = start_table
json_input = json.dumps(data)
return self.make_request('ListTables', json_input)
def describe_table(self, table_name):
"""
Returns information about the table including current
state of the table, primary key schema and when the
table was created.
:type table_name: str
:param table_name: The name of the table to describe.
"""
data = {'TableName': table_name}
json_input = json.dumps(data)
return self.make_request('DescribeTable', json_input)
def create_table(self, table_name, schema, provisioned_throughput):
"""
Add a new table to your account. The table name must be unique
among those associated with the account issuing the request.
This request triggers an asynchronous workflow to begin creating
the table. When the workflow is complete, the state of the
table will be ACTIVE.
:type table_name: str
:param table_name: The name of the table to create.
:type schema: dict
:param schema: A Python version of the KeySchema data structure
as defined by DynamoDB
:type provisioned_throughput: dict
:param provisioned_throughput: A Python version of the
ProvisionedThroughput data structure defined by
DynamoDB.
"""
data = {'TableName': table_name,
'KeySchema': schema,
'ProvisionedThroughput': provisioned_throughput}
json_input = json.dumps(data)
response_dict = self.make_request('CreateTable', json_input)
return response_dict
def update_table(self, table_name, provisioned_throughput):
"""
Updates the provisioned throughput for a given table.
:type table_name: str
:param table_name: The name of the table to update.
:type provisioned_throughput: dict
:param provisioned_throughput: A Python version of the
ProvisionedThroughput data structure defined by
DynamoDB.
"""
data = {'TableName': table_name,
'ProvisionedThroughput': provisioned_throughput}
json_input = json.dumps(data)
return self.make_request('UpdateTable', json_input)
def delete_table(self, table_name):
"""
Deletes the table and all of it's data. After this request
the table will be in the DELETING state until DynamoDB
completes the delete operation.
:type table_name: str
:param table_name: The name of the table to delete.
"""
data = {'TableName': table_name}
json_input = json.dumps(data)
return self.make_request('DeleteTable', json_input)
def get_item(self, table_name, key, attributes_to_get=None,
consistent_read=False, object_hook=None):
"""
Return a set of attributes for an item that matches
the supplied key.
:type table_name: str
:param table_name: The name of the table containing the item.
:type key: dict
:param key: A Python version of the Key data structure
defined by DynamoDB.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
"""
data = {'TableName': table_name,
'Key': key}
if attributes_to_get:
data['AttributesToGet'] = attributes_to_get
if consistent_read:
data['ConsistentRead'] = True
json_input = json.dumps(data)
response = self.make_request('GetItem', json_input,
object_hook=object_hook)
if 'Item' not in response:
raise dynamodb_exceptions.DynamoDBKeyNotFoundError(
"Key does not exist."
)
return response
def batch_get_item(self, request_items, object_hook=None):
"""
Return a set of attributes for a multiple items in
multiple tables using their primary keys.
:type request_items: dict
:param request_items: A Python version of the RequestItems
data structure defined by DynamoDB.
"""
# If the list is empty, return empty response
if not request_items:
return {}
data = {'RequestItems': request_items}
json_input = json.dumps(data)
return self.make_request('BatchGetItem', json_input,
object_hook=object_hook)
def batch_write_item(self, request_items, object_hook=None):
"""
This operation enables you to put or delete several items
across multiple tables in a single API call.
:type request_items: dict
:param request_items: A Python version of the RequestItems
data structure defined by DynamoDB.
"""
data = {'RequestItems': request_items}
json_input = json.dumps(data)
return self.make_request('BatchWriteItem', json_input,
object_hook=object_hook)
def put_item(self, table_name, item,
expected=None, return_values=None,
object_hook=None):
"""
Create a new item or replace an old item with a new
item (including all attributes). If an item already
exists in the specified table with the same primary
key, the new item will completely replace the old item.
You can perform a conditional put by specifying an
expected rule.
:type table_name: str
:param table_name: The name of the table in which to put the item.
:type item: dict
:param item: A Python version of the Item data structure
defined by DynamoDB.
:type expected: dict
:param expected: A Python version of the Expected
data structure defined by DynamoDB.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
data = {'TableName': table_name,
'Item': item}
if expected:
data['Expected'] = expected
if return_values:
data['ReturnValues'] = return_values
json_input = json.dumps(data)
return self.make_request('PutItem', json_input,
object_hook=object_hook)
def update_item(self, table_name, key, attribute_updates,
expected=None, return_values=None,
object_hook=None):
"""
Edits an existing item's attributes. You can perform a conditional
update (insert a new attribute name-value pair if it doesn't exist,
or replace an existing name-value pair if it has certain expected
attribute values).
:type table_name: str
:param table_name: The name of the table.
:type key: dict
:param key: A Python version of the Key data structure
defined by DynamoDB which identifies the item to be updated.
:type attribute_updates: dict
:param attribute_updates: A Python version of the AttributeUpdates
data structure defined by DynamoDB.
:type expected: dict
:param expected: A Python version of the Expected
data structure defined by DynamoDB.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
data = {'TableName': table_name,
'Key': key,
'AttributeUpdates': attribute_updates}
if expected:
data['Expected'] = expected
if return_values:
data['ReturnValues'] = return_values
json_input = json.dumps(data)
return self.make_request('UpdateItem', json_input,
object_hook=object_hook)
def delete_item(self, table_name, key,
expected=None, return_values=None,
object_hook=None):
"""
Delete an item and all of it's attributes by primary key.
You can perform a conditional delete by specifying an
expected rule.
:type table_name: str
:param table_name: The name of the table containing the item.
:type key: dict
:param key: A Python version of the Key data structure
defined by DynamoDB.
:type expected: dict
:param expected: A Python version of the Expected
data structure defined by DynamoDB.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
data = {'TableName': table_name,
'Key': key}
if expected:
data['Expected'] = expected
if return_values:
data['ReturnValues'] = return_values
json_input = json.dumps(data)
return self.make_request('DeleteItem', json_input,
object_hook=object_hook)
def query(self, table_name, hash_key_value, range_key_conditions=None,
attributes_to_get=None, limit=None, consistent_read=False,
scan_index_forward=True, exclusive_start_key=None,
object_hook=None, count=False):
"""
Perform a query of DynamoDB. This version is currently punting
and expecting you to provide a full and correct JSON body
which is passed as is to DynamoDB.
:type table_name: str
:param table_name: The name of the table to query.
:type hash_key_value: dict
:param key: A DynamoDB-style HashKeyValue.
:type range_key_conditions: dict
:param range_key_conditions: A Python version of the
RangeKeyConditions data structure.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type limit: int
:param limit: The maximum number of items to return.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Query operation, even if the
operation has no matching items for the assigned filter.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
"""
data = {'TableName': table_name,
'HashKeyValue': hash_key_value}
if range_key_conditions:
data['RangeKeyCondition'] = range_key_conditions
if attributes_to_get:
data['AttributesToGet'] = attributes_to_get
if limit:
data['Limit'] = limit
if count:
data['Count'] = True
if consistent_read:
data['ConsistentRead'] = True
if scan_index_forward:
data['ScanIndexForward'] = True
else:
data['ScanIndexForward'] = False
if exclusive_start_key:
data['ExclusiveStartKey'] = exclusive_start_key
json_input = json.dumps(data)
return self.make_request('Query', json_input,
object_hook=object_hook)
def scan(self, table_name, scan_filter=None,
attributes_to_get=None, limit=None,
exclusive_start_key=None, object_hook=None, count=False):
"""
Perform a scan of DynamoDB. This version is currently punting
and expecting you to provide a full and correct JSON body
which is passed as is to DynamoDB.
:type table_name: str
:param table_name: The name of the table to scan.
:type scan_filter: dict
:param scan_filter: A Python version of the
ScanFilter data structure.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type limit: int
:param limit: The maximum number of items to evaluate.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
"""
data = {'TableName': table_name}
if scan_filter:
data['ScanFilter'] = scan_filter
if attributes_to_get:
data['AttributesToGet'] = attributes_to_get
if limit:
data['Limit'] = limit
if count:
data['Count'] = True
if exclusive_start_key:
data['ExclusiveStartKey'] = exclusive_start_key
json_input = json.dumps(data)
return self.make_request('Scan', json_input, object_hook=object_hook)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/dynamodb/layer1.py
| 0.683947 | 0.226142 |
layer1.py
|
|
from boto.dynamodb.batch import BatchList
from boto.dynamodb.schema import Schema
from boto.dynamodb.item import Item
from boto.dynamodb import exceptions as dynamodb_exceptions
import time
class TableBatchGenerator(object):
"""
A low-level generator used to page through results from
batch_get_item operations.
:ivar consumed_units: An integer that holds the number of
ConsumedCapacityUnits accumulated thus far for this
generator.
"""
def __init__(self, table, keys, attributes_to_get=None,
consistent_read=False):
self.table = table
self.keys = keys
self.consumed_units = 0
self.attributes_to_get = attributes_to_get
self.consistent_read = consistent_read
def _queue_unprocessed(self, res):
if u'UnprocessedKeys' not in res:
return
if self.table.name not in res[u'UnprocessedKeys']:
return
keys = res[u'UnprocessedKeys'][self.table.name][u'Keys']
for key in keys:
h = key[u'HashKeyElement']
r = key[u'RangeKeyElement'] if u'RangeKeyElement' in key else None
self.keys.append((h, r))
def __iter__(self):
while self.keys:
# Build the next batch
batch = BatchList(self.table.layer2)
batch.add_batch(self.table, self.keys[:100],
self.attributes_to_get)
res = batch.submit()
# parse the results
if self.table.name not in res[u'Responses']:
continue
self.consumed_units += res[u'Responses'][self.table.name][u'ConsumedCapacityUnits']
for elem in res[u'Responses'][self.table.name][u'Items']:
yield elem
# re-queue un processed keys
self.keys = self.keys[100:]
self._queue_unprocessed(res)
class Table(object):
"""
An Amazon DynamoDB table.
:ivar name: The name of the table.
:ivar create_time: The date and time that the table was created.
:ivar status: The current status of the table. One of:
'ACTIVE', 'UPDATING', 'DELETING'.
:ivar schema: A :class:`boto.dynamodb.schema.Schema` object representing
the schema defined for the table.
:ivar item_count: The number of items in the table. This value is
set only when the Table object is created or refreshed and
may not reflect the actual count.
:ivar size_bytes: Total size of the specified table, in bytes.
Amazon DynamoDB updates this value approximately every six hours.
Recent changes might not be reflected in this value.
:ivar read_units: The ReadCapacityUnits of the tables
Provisioned Throughput.
:ivar write_units: The WriteCapacityUnits of the tables
Provisioned Throughput.
:ivar schema: The Schema object associated with the table.
"""
def __init__(self, layer2, response):
"""
:type layer2: :class:`boto.dynamodb.layer2.Layer2`
:param layer2: A `Layer2` api object.
:type response: dict
:param response: The output of
`boto.dynamodb.layer1.Layer1.describe_table`.
"""
self.layer2 = layer2
self._dict = {}
self.update_from_response(response)
@classmethod
def create_from_schema(cls, layer2, name, schema):
"""Create a Table object.
If you know the name and schema of your table, you can
create a ``Table`` object without having to make any
API calls (normally an API call is made to retrieve
the schema of a table).
Example usage::
table = Table.create_from_schema(
boto.connect_dynamodb(),
'tablename',
Schema.create(hash_key=('keyname', 'N')))
:type layer2: :class:`boto.dynamodb.layer2.Layer2`
:param layer2: A ``Layer2`` api object.
:type name: str
:param name: The name of the table.
:type schema: :class:`boto.dynamodb.schema.Schema`
:param schema: The schema associated with the table.
:rtype: :class:`boto.dynamodb.table.Table`
:return: A Table object representing the table.
"""
table = cls(layer2, {'Table': {'TableName': name}})
table._schema = schema
return table
def __repr__(self):
return 'Table(%s)' % self.name
@property
def name(self):
return self._dict['TableName']
@property
def create_time(self):
return self._dict.get('CreationDateTime', None)
@property
def status(self):
return self._dict.get('TableStatus', None)
@property
def item_count(self):
return self._dict.get('ItemCount', 0)
@property
def size_bytes(self):
return self._dict.get('TableSizeBytes', 0)
@property
def schema(self):
return self._schema
@property
def read_units(self):
try:
return self._dict['ProvisionedThroughput']['ReadCapacityUnits']
except KeyError:
return None
@property
def write_units(self):
try:
return self._dict['ProvisionedThroughput']['WriteCapacityUnits']
except KeyError:
return None
def update_from_response(self, response):
"""
Update the state of the Table object based on the response
data received from Amazon DynamoDB.
"""
# 'Table' is from a describe_table call.
if 'Table' in response:
self._dict.update(response['Table'])
# 'TableDescription' is from a create_table call.
elif 'TableDescription' in response:
self._dict.update(response['TableDescription'])
if 'KeySchema' in self._dict:
self._schema = Schema(self._dict['KeySchema'])
def refresh(self, wait_for_active=False, retry_seconds=5):
"""
Refresh all of the fields of the Table object by calling
the underlying DescribeTable request.
:type wait_for_active: bool
:param wait_for_active: If True, this command will not return
until the table status, as returned from Amazon DynamoDB, is
'ACTIVE'.
:type retry_seconds: int
:param retry_seconds: If wait_for_active is True, this
parameter controls the number of seconds of delay between
calls to update_table in Amazon DynamoDB. Default is 5 seconds.
"""
done = False
while not done:
response = self.layer2.describe_table(self.name)
self.update_from_response(response)
if wait_for_active:
if self.status == 'ACTIVE':
done = True
else:
time.sleep(retry_seconds)
else:
done = True
def update_throughput(self, read_units, write_units):
"""
Update the ProvisionedThroughput for the Amazon DynamoDB Table.
:type read_units: int
:param read_units: The new value for ReadCapacityUnits.
:type write_units: int
:param write_units: The new value for WriteCapacityUnits.
"""
self.layer2.update_throughput(self, read_units, write_units)
def delete(self):
"""
Delete this table and all items in it. After calling this
the Table objects status attribute will be set to 'DELETING'.
"""
self.layer2.delete_table(self)
def get_item(self, hash_key, range_key=None,
attributes_to_get=None, consistent_read=False,
item_class=Item):
"""
Retrieve an existing item from the table.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key: int|long|float|str|unicode|Binary
:param range_key: The optional RangeKey of the requested item.
The type of the value must match the type defined in the
schema for the table.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
"""
return self.layer2.get_item(self, hash_key, range_key,
attributes_to_get, consistent_read,
item_class)
lookup = get_item
def has_item(self, hash_key, range_key=None, consistent_read=False):
"""
Checks the table to see if the Item with the specified ``hash_key``
exists. This may save a tiny bit of time/bandwidth over a
straight :py:meth:`get_item` if you have no intention to touch
the data that is returned, since this method specifically tells
Amazon not to return anything but the Item's key.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key: int|long|float|str|unicode|Binary
:param range_key: The optional RangeKey of the requested item.
The type of the value must match the type defined in the
schema for the table.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:rtype: bool
:returns: ``True`` if the Item exists, ``False`` if not.
"""
try:
# Attempt to get the key. If it can't be found, it'll raise
# an exception.
self.get_item(hash_key, range_key=range_key,
# This minimizes the size of the response body.
attributes_to_get=[hash_key],
consistent_read=consistent_read)
except dynamodb_exceptions.DynamoDBKeyNotFoundError:
# Key doesn't exist.
return False
return True
def new_item(self, hash_key=None, range_key=None, attrs=None,
item_class=Item):
"""
Return an new, unsaved Item which can later be PUT to
Amazon DynamoDB.
This method has explicit (but optional) parameters for
the hash_key and range_key values of the item. You can use
these explicit parameters when calling the method, such as::
>>> my_item = my_table.new_item(hash_key='a', range_key=1,
attrs={'key1': 'val1', 'key2': 'val2'})
>>> my_item
{u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'}
Or, if you prefer, you can simply put the hash_key and range_key
in the attrs dictionary itself, like this::
>>> attrs = {'foo': 'a', 'bar': 1, 'key1': 'val1', 'key2': 'val2'}
>>> my_item = my_table.new_item(attrs=attrs)
>>> my_item
{u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'}
The effect is the same.
.. note:
The explicit parameters take priority over the values in
the attrs dict. So, if you have a hash_key or range_key
in the attrs dict and you also supply either or both using
the explicit parameters, the values in the attrs will be
ignored.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the new item. The
type of the value must match the type defined in the
schema for the table.
:type range_key: int|long|float|str|unicode|Binary
:param range_key: The optional RangeKey of the new item.
The type of the value must match the type defined in the
schema for the table.
:type attrs: dict
:param attrs: A dictionary of key value pairs used to
populate the new item.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
"""
return item_class(self, hash_key, range_key, attrs)
def query(self, hash_key, *args, **kw):
"""
Perform a query on the table.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key_condition: :class:`boto.dynamodb.condition.Condition`
:param range_key_condition: A Condition object.
Condition object can be one of the following types:
EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN
The only condition which expects or will accept two
values is 'BETWEEN', otherwise a single value should
be passed to the Condition constructor.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Query operation, even if the
operation has no matching items for the assigned filter.
If count is True, the actual items are not returned and
the count is accessible as the ``count`` attribute of
the returned object.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
"""
return self.layer2.query(self, hash_key, *args, **kw)
def scan(self, *args, **kw):
"""
Scan through this table, this is a very long
and expensive operation, and should be avoided if
at all possible.
:type scan_filter: A dict
:param scan_filter: A dictionary where the key is the
attribute name and the value is a
:class:`boto.dynamodb.condition.Condition` object.
Valid Condition objects include:
* EQ - equal (1)
* NE - not equal (1)
* LE - less than or equal (1)
* LT - less than (1)
* GE - greater than or equal (1)
* GT - greater than (1)
* NOT_NULL - attribute exists (0, use None)
* NULL - attribute does not exist (0, use None)
* CONTAINS - substring or value in list (1)
* NOT_CONTAINS - absence of substring or value in list (1)
* BEGINS_WITH - substring prefix (1)
* IN - exact match in list (N)
* BETWEEN - >= first value, <= second value (2)
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
If count is True, the actual items are not returned and
the count is accessible as the ``count`` attribute of
the returned object.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:return: A TableGenerator (generator) object which will iterate
over all results
:rtype: :class:`boto.dynamodb.layer2.TableGenerator`
"""
return self.layer2.scan(self, *args, **kw)
def batch_get_item(self, keys, attributes_to_get=None):
"""
Return a set of attributes for a multiple items from a single table
using their primary keys. This abstraction removes the 100 Items per
batch limitations as well as the "UnprocessedKeys" logic.
:type keys: list
:param keys: A list of scalar or tuple values. Each element in the
list represents one Item to retrieve. If the schema for the
table has both a HashKey and a RangeKey, each element in the
list should be a tuple consisting of (hash_key, range_key). If
the schema for the table contains only a HashKey, each element
in the list should be a scalar value of the appropriate type
for the table schema. NOTE: The maximum number of items that
can be retrieved for a single operation is 100. Also, the
number of items retrieved is constrained by a 1 MB size limit.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:return: A TableBatchGenerator (generator) object which will
iterate over all results
:rtype: :class:`boto.dynamodb.table.TableBatchGenerator`
"""
return TableBatchGenerator(self, keys, attributes_to_get)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/dynamodb/table.py
| 0.783492 | 0.285207 |
table.py
|
|
class HealthCheck(object):
"""An individual health check"""
POSTXMLBody = """
<HealthCheckConfig>
%(ip_addr_part)s
<Port>%(port)s</Port>
<Type>%(type)s</Type>
<ResourcePath>%(resource_path)s</ResourcePath>
%(fqdn_part)s
%(string_match_part)s
%(request_interval)s
<FailureThreshold>%(failure_threshold)s</FailureThreshold>
</HealthCheckConfig>
"""
XMLIpAddrPart = """<IPAddress>%(ip_addr)s</IPAddress>"""
XMLFQDNPart = """<FullyQualifiedDomainName>%(fqdn)s</FullyQualifiedDomainName>"""
XMLStringMatchPart = """<SearchString>%(string_match)s</SearchString>"""
XMLRequestIntervalPart = """<RequestInterval>%(request_interval)d</RequestInterval>"""
valid_request_intervals = (10, 30)
def __init__(self, ip_addr, port, hc_type, resource_path, fqdn=None, string_match=None, request_interval=30, failure_threshold=3):
"""
HealthCheck object
:type ip_addr: str
:param ip_addr: Optional IP Address
:type port: int
:param port: Port to check
:type hc_type: str
:param hc_type: One of HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP
:type resource_path: str
:param resource_path: Path to check
:type fqdn: str
:param fqdn: domain name of the endpoint to check
:type string_match: str
:param string_match: if hc_type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string to search for in the response body from the specified resource
:type request_interval: int
:param request_interval: The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request.
:type failure_threshold: int
:param failure_threshold: The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa.
"""
self.ip_addr = ip_addr
self.port = port
self.hc_type = hc_type
self.resource_path = resource_path
self.fqdn = fqdn
self.string_match = string_match
self.failure_threshold = failure_threshold
if request_interval in self.valid_request_intervals:
self.request_interval = request_interval
else:
raise AttributeError(
"Valid values for request_interval are: %s" %
",".join(str(i) for i in self.valid_request_intervals))
if failure_threshold < 1 or failure_threshold > 10:
raise AttributeError(
'Valid values for failure_threshold are 1 - 10.')
def to_xml(self):
params = {
'ip_addr_part': '',
'port': self.port,
'type': self.hc_type,
'resource_path': self.resource_path,
'fqdn_part': "",
'string_match_part': "",
'request_interval': (self.XMLRequestIntervalPart %
{'request_interval': self.request_interval}),
'failure_threshold': self.failure_threshold,
}
if self.fqdn is not None:
params['fqdn_part'] = self.XMLFQDNPart % {'fqdn': self.fqdn}
if self.ip_addr:
params['ip_addr_part'] = self.XMLIpAddrPart % {'ip_addr': self.ip_addr}
if self.string_match is not None:
params['string_match_part'] = self.XMLStringMatchPart % {'string_match': self.string_match}
return self.POSTXMLBody % params
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/route53/healthcheck.py
| 0.80077 | 0.234812 |
healthcheck.py
|
|
RECORD_TYPES = ['A', 'AAAA', 'TXT', 'CNAME', 'MX', 'PTR', 'SRV', 'SPF']
from boto.resultset import ResultSet
class ResourceRecordSets(ResultSet):
"""
A list of resource records.
:ivar hosted_zone_id: The ID of the hosted zone.
:ivar comment: A comment that will be stored with the change.
:ivar changes: A list of changes.
"""
ChangeResourceRecordSetsBody = """<?xml version="1.0" encoding="UTF-8"?>
<ChangeResourceRecordSetsRequest xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ChangeBatch>
<Comment>%(comment)s</Comment>
<Changes>%(changes)s</Changes>
</ChangeBatch>
</ChangeResourceRecordSetsRequest>"""
ChangeXML = """<Change>
<Action>%(action)s</Action>
%(record)s
</Change>"""
def __init__(self, connection=None, hosted_zone_id=None, comment=None):
self.connection = connection
self.hosted_zone_id = hosted_zone_id
self.comment = comment
self.changes = []
self.next_record_name = None
self.next_record_type = None
self.next_record_identifier = None
super(ResourceRecordSets, self).__init__([('ResourceRecordSet', Record)])
def __repr__(self):
if self.changes:
record_list = ','.join([c.__repr__() for c in self.changes])
else:
record_list = ','.join([record.__repr__() for record in self])
return '<ResourceRecordSets:%s [%s]' % (self.hosted_zone_id,
record_list)
def add_change(self, action, name, type, ttl=600,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None, alias_evaluate_target_health=None,
health_check=None, failover=None):
"""
Add a change request to the set.
:type action: str
:param action: The action to perform ('CREATE'|'DELETE'|'UPSERT')
:type name: str
:param name: The name of the domain you want to perform the action on.
:type type: str
:param type: The DNS record type. Valid values are:
* A
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
:type ttl: int
:param ttl: The resource record cache time to live (TTL), in seconds.
:type alias_hosted_zone_id: str
:param alias_dns_name: *Alias resource record sets only* The value
of the hosted zone ID, CanonicalHostedZoneNameId, for
the LoadBalancer.
:type alias_dns_name: str
:param alias_hosted_zone_id: *Alias resource record sets only*
Information about the domain to which you are redirecting traffic.
:type identifier: str
:param identifier: *Weighted and latency-based resource record sets
only* An identifier that differentiates among multiple resource
record sets that have the same combination of DNS name and type.
:type weight: int
:param weight: *Weighted resource record sets only* Among resource
record sets that have the same combination of DNS name and type,
a value that determines what portion of traffic for the current
resource record set is routed to the associated location
:type region: str
:param region: *Latency-based resource record sets only* Among resource
record sets that have the same combination of DNS name and type,
a value that determines which region this should be associated with
for the latency-based routing
:type alias_evaluate_target_health: bool
:param alias_evaluate_target_health: *Required for alias resource record
sets* Indicates whether this Resource Record Set should respect the
health status of any health checks associated with the ALIAS target
record which it is linked to.
:type health_check: str
:param health_check: Health check to associate with this record
:type failover: str
:param failover: *Failover resource record sets only* Whether this is the
primary or secondary resource record set.
"""
change = Record(name, type, ttl,
alias_hosted_zone_id=alias_hosted_zone_id,
alias_dns_name=alias_dns_name, identifier=identifier,
weight=weight, region=region,
alias_evaluate_target_health=alias_evaluate_target_health,
health_check=health_check, failover=failover)
self.changes.append([action, change])
return change
def add_change_record(self, action, change):
"""Add an existing record to a change set with the specified action"""
self.changes.append([action, change])
return
def to_xml(self):
"""Convert this ResourceRecordSet into XML
to be saved via the ChangeResourceRecordSetsRequest"""
changesXML = ""
for change in self.changes:
changeParams = {"action": change[0], "record": change[1].to_xml()}
changesXML += self.ChangeXML % changeParams
params = {"comment": self.comment, "changes": changesXML}
return self.ChangeResourceRecordSetsBody % params
def commit(self):
"""Commit this change"""
if not self.connection:
import boto
self.connection = boto.connect_route53()
return self.connection.change_rrsets(self.hosted_zone_id, self.to_xml())
def endElement(self, name, value, connection):
"""Overwritten to also add the NextRecordName,
NextRecordType and NextRecordIdentifier to the base object"""
if name == 'NextRecordName':
self.next_record_name = value
elif name == 'NextRecordType':
self.next_record_type = value
elif name == 'NextRecordIdentifier':
self.next_record_identifier = value
else:
return super(ResourceRecordSets, self).endElement(name, value, connection)
def __iter__(self):
"""Override the next function to support paging"""
results = super(ResourceRecordSets, self).__iter__()
truncated = self.is_truncated
while results:
for obj in results:
yield obj
if self.is_truncated:
self.is_truncated = False
results = self.connection.get_all_rrsets(self.hosted_zone_id, name=self.next_record_name,
type=self.next_record_type,
identifier=self.next_record_identifier)
else:
results = None
self.is_truncated = truncated
class Record(object):
"""An individual ResourceRecordSet"""
HealthCheckBody = """<HealthCheckId>%s</HealthCheckId>"""
XMLBody = """<ResourceRecordSet>
<Name>%(name)s</Name>
<Type>%(type)s</Type>
%(weight)s
%(body)s
%(health_check)s
</ResourceRecordSet>"""
WRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Weight>%(weight)s</Weight>
"""
RRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Region>%(region)s</Region>
"""
FailoverBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Failover>%(failover)s</Failover>
"""
ResourceRecordsBody = """
<TTL>%(ttl)s</TTL>
<ResourceRecords>
%(records)s
</ResourceRecords>"""
ResourceRecordBody = """<ResourceRecord>
<Value>%s</Value>
</ResourceRecord>"""
AliasBody = """<AliasTarget>
<HostedZoneId>%(hosted_zone_id)s</HostedZoneId>
<DNSName>%(dns_name)s</DNSName>
%(eval_target_health)s
</AliasTarget>"""
EvaluateTargetHealth = """<EvaluateTargetHealth>%s</EvaluateTargetHealth>"""
def __init__(self, name=None, type=None, ttl=600, resource_records=None,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None, alias_evaluate_target_health=None,
health_check=None, failover=None):
self.name = name
self.type = type
self.ttl = ttl
if resource_records is None:
resource_records = []
self.resource_records = resource_records
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.identifier = identifier
self.weight = weight
self.region = region
self.alias_evaluate_target_health = alias_evaluate_target_health
self.health_check = health_check
self.failover = failover
def __repr__(self):
return '<Record:%s:%s:%s>' % (self.name, self.type, self.to_print())
def add_value(self, value):
"""Add a resource record value"""
self.resource_records.append(value)
def set_alias(self, alias_hosted_zone_id, alias_dns_name,
alias_evaluate_target_health=False):
"""Make this an alias resource record set"""
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.alias_evaluate_target_health = alias_evaluate_target_health
def to_xml(self):
"""Spit this resource record set out as XML"""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Use alias
if self.alias_evaluate_target_health is not None:
eval_target_health = self.EvaluateTargetHealth % ('true' if self.alias_evaluate_target_health else 'false')
else:
eval_target_health = ""
body = self.AliasBody % {"hosted_zone_id": self.alias_hosted_zone_id,
"dns_name": self.alias_dns_name,
"eval_target_health": eval_target_health}
else:
# Use resource record(s)
records = ""
for r in self.resource_records:
records += self.ResourceRecordBody % r
body = self.ResourceRecordsBody % {
"ttl": self.ttl,
"records": records,
}
weight = ""
if self.identifier is not None and self.weight is not None:
weight = self.WRRBody % {"identifier": self.identifier,
"weight": self.weight}
elif self.identifier is not None and self.region is not None:
weight = self.RRRBody % {"identifier": self.identifier,
"region": self.region}
elif self.identifier is not None and self.failover is not None:
weight = self.FailoverBody % {"identifier": self.identifier,
"failover": self.failover}
health_check = ""
if self.health_check is not None:
health_check = self.HealthCheckBody % (self.health_check)
params = {
"name": self.name,
"type": self.type,
"weight": weight,
"body": body,
"health_check": health_check
}
return self.XMLBody % params
def to_print(self):
rr = ""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Show alias
rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name
if self.alias_evaluate_target_health is not None:
rr += ' (EvalTarget %s)' % self.alias_evaluate_target_health
else:
# Show resource record(s)
rr = ",".join(self.resource_records)
if self.identifier is not None and self.weight is not None:
rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight)
elif self.identifier is not None and self.region is not None:
rr += ' (LBR id=%s, region=%s)' % (self.identifier, self.region)
elif self.identifier is not None and self.failover is not None:
rr += ' (FAILOVER id=%s, failover=%s)' % (self.identifier, self.failover)
return rr
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'Type':
self.type = value
elif name == 'TTL':
self.ttl = value
elif name == 'Value':
self.resource_records.append(value)
elif name == 'HostedZoneId':
self.alias_hosted_zone_id = value
elif name == 'DNSName':
self.alias_dns_name = value
elif name == 'SetIdentifier':
self.identifier = value
elif name == 'EvaluateTargetHealth':
self.alias_evaluate_target_health = value.lower() == 'true'
elif name == 'Weight':
self.weight = value
elif name == 'Region':
self.region = value
elif name == 'Failover':
self.failover = value
elif name == 'HealthCheckId':
self.health_check = value
def startElement(self, name, attrs, connection):
return None
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/route53/record.py
| 0.677261 | 0.270926 |
record.py
|
|
# this is here for backward compatibility
# originally, the Route53Connection class was defined here
from boto.route53.connection import Route53Connection
from boto.regioninfo import RegionInfo, get_regions
from boto.regioninfo import connect
class Route53RegionInfo(RegionInfo):
def connect(self, **kw_params):
"""
Connect to this Region's endpoint. Returns an connection
object pointing to the endpoint associated with this region.
You may pass any of the arguments accepted by the connection
class's constructor as keyword arguments and they will be
passed along to the connection object.
:rtype: Connection object
:return: The connection to this regions endpoint
"""
if self.connection_cls:
return self.connection_cls(host=self.endpoint, **kw_params)
def regions():
"""
Get all available regions for the Route53 service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo` instances
"""
regions = get_regions(
'route53',
region_cls=Route53RegionInfo,
connection_cls=Route53Connection
)
# For historical reasons, we had a "universal" endpoint as well.
regions.append(
Route53RegionInfo(
name='universal',
endpoint='route53.amazonaws.com',
connection_cls=Route53Connection
)
)
return regions
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.route53.connection.Route53Connection`.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.route53.connection.Route53Connection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
if region_name == 'universal':
region = Route53RegionInfo(
name='universal',
endpoint='route53.amazonaws.com',
connection_cls=Route53Connection
)
return region.connect(**kw_params)
return connect('route53', region_name, region_cls=Route53RegionInfo,
connection_cls=Route53Connection, **kw_params)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/route53/__init__.py
| 0.727685 | 0.172555 |
__init__.py
|
|
default_ttl = 60
import copy
from boto.exception import TooManyRecordsException
from boto.route53.record import ResourceRecordSets
from boto.route53.status import Status
class Zone(object):
"""
A Route53 Zone.
:ivar route53connection: A :class:`boto.route53.connection.Route53Connection` connection
:ivar id: The ID of the hosted zone
"""
def __init__(self, route53connection, zone_dict):
self.route53connection = route53connection
for key in zone_dict:
if key == 'Id':
self.id = zone_dict['Id'].replace('/hostedzone/', '')
else:
self.__setattr__(key.lower(), zone_dict[key])
def __repr__(self):
return '<Zone:%s>' % self.name
def _commit(self, changes):
"""
Commit a set of changes and return the ChangeInfo portion of
the response.
:type changes: ResourceRecordSets
:param changes: changes to be committed
"""
response = changes.commit()
return response['ChangeResourceRecordSetsResponse']['ChangeInfo']
def _new_record(self, changes, resource_type, name, value, ttl, identifier,
comment=""):
"""
Add a CREATE change record to an existing ResourceRecordSets
:type changes: ResourceRecordSets
:param changes: change set to append to
:type name: str
:param name: The name of the resource record you want to
perform the action on.
:type resource_type: str
:param resource_type: The DNS record type
:param value: Appropriate value for resource_type
:type ttl: int
:param ttl: The resource record cache time to live (TTL), in seconds.
:type identifier: tuple
:param identifier: A tuple for setting WRR or LBR attributes. Valid
forms are:
* (str, int): WRR record [e.g. ('foo',10)]
* (str, str): LBR record [e.g. ('foo','us-east-1')
:type comment: str
:param comment: A comment that will be stored with the change.
"""
weight = None
region = None
if identifier is not None:
try:
int(identifier[1])
weight = identifier[1]
identifier = identifier[0]
except:
region = identifier[1]
identifier = identifier[0]
change = changes.add_change("CREATE", name, resource_type, ttl,
identifier=identifier, weight=weight,
region=region)
if type(value) in [list, tuple, set]:
for record in value:
change.add_value(record)
else:
change.add_value(value)
def add_record(self, resource_type, name, value, ttl=60, identifier=None,
comment=""):
"""
Add a new record to this Zone. See _new_record for parameter
documentation. Returns a Status object.
"""
changes = ResourceRecordSets(self.route53connection, self.id, comment)
self._new_record(changes, resource_type, name, value, ttl, identifier,
comment)
return Status(self.route53connection, self._commit(changes))
def update_record(self, old_record, new_value, new_ttl=None,
new_identifier=None, comment=""):
"""
Update an existing record in this Zone. Returns a Status object.
:type old_record: ResourceRecord
:param old_record: A ResourceRecord (e.g. returned by find_records)
See _new_record for additional parameter documentation.
"""
new_ttl = new_ttl or default_ttl
record = copy.copy(old_record)
changes = ResourceRecordSets(self.route53connection, self.id, comment)
changes.add_change_record("DELETE", record)
self._new_record(changes, record.type, record.name,
new_value, new_ttl, new_identifier, comment)
return Status(self.route53connection, self._commit(changes))
def delete_record(self, record, comment=""):
"""
Delete one or more records from this Zone. Returns a Status object.
:param record: A ResourceRecord (e.g. returned by
find_records) or list, tuple, or set of ResourceRecords.
:type comment: str
:param comment: A comment that will be stored with the change.
"""
changes = ResourceRecordSets(self.route53connection, self.id, comment)
if type(record) in [list, tuple, set]:
for r in record:
changes.add_change_record("DELETE", r)
else:
changes.add_change_record("DELETE", record)
return Status(self.route53connection, self._commit(changes))
def add_cname(self, name, value, ttl=None, identifier=None, comment=""):
"""
Add a new CNAME record to this Zone. See _new_record for
parameter documentation. Returns a Status object.
"""
ttl = ttl or default_ttl
name = self.route53connection._make_qualified(name)
value = self.route53connection._make_qualified(value)
return self.add_record(resource_type='CNAME',
name=name,
value=value,
ttl=ttl,
identifier=identifier,
comment=comment)
def add_a(self, name, value, ttl=None, identifier=None, comment=""):
"""
Add a new A record to this Zone. See _new_record for
parameter documentation. Returns a Status object.
"""
ttl = ttl or default_ttl
name = self.route53connection._make_qualified(name)
return self.add_record(resource_type='A',
name=name,
value=value,
ttl=ttl,
identifier=identifier,
comment=comment)
def add_mx(self, name, records, ttl=None, identifier=None, comment=""):
"""
Add a new MX record to this Zone. See _new_record for
parameter documentation. Returns a Status object.
"""
ttl = ttl or default_ttl
records = self.route53connection._make_qualified(records)
return self.add_record(resource_type='MX',
name=name,
value=records,
ttl=ttl,
identifier=identifier,
comment=comment)
def find_records(self, name, type, desired=1, all=False, identifier=None):
"""
Search this Zone for records that match given parameters.
Returns None if no results, a ResourceRecord if one result, or
a ResourceRecordSets if more than one result.
:type name: str
:param name: The name of the records should match this parameter
:type type: str
:param type: The type of the records should match this parameter
:type desired: int
:param desired: The number of desired results. If the number of
matching records in the Zone exceeds the value of this parameter,
throw TooManyRecordsException
:type all: Boolean
:param all: If true return all records that match name, type, and
identifier parameters
:type identifier: Tuple
:param identifier: A tuple specifying WRR or LBR attributes. Valid
forms are:
* (str, int): WRR record [e.g. ('foo',10)]
* (str, str): LBR record [e.g. ('foo','us-east-1')
"""
name = self.route53connection._make_qualified(name)
returned = self.route53connection.get_all_rrsets(self.id, name=name,
type=type)
# name/type for get_all_rrsets sets the starting record; they
# are not a filter
results = []
for r in returned:
if r.name == name and r.type == type:
results.append(r)
# Is at the end of the list of matched records. No need to continue
# since the records are sorted by name and type.
else:
break
weight = None
region = None
if identifier is not None:
try:
int(identifier[1])
weight = identifier[1]
except:
region = identifier[1]
if weight is not None:
results = [r for r in results if (r.weight == weight and
r.identifier == identifier[0])]
if region is not None:
results = [r for r in results if (r.region == region and
r.identifier == identifier[0])]
if ((not all) and (len(results) > desired)):
message = "Search: name %s type %s" % (name, type)
message += "\nFound: "
message += ", ".join(["%s %s %s" % (r.name, r.type, r.to_print())
for r in results])
raise TooManyRecordsException(message)
elif len(results) > 1:
return results
elif len(results) == 1:
return results[0]
else:
return None
def get_cname(self, name, all=False):
"""
Search this Zone for CNAME records that match name.
Returns a ResourceRecord.
If there is more than one match return all as a
ResourceRecordSets if all is True, otherwise throws
TooManyRecordsException.
"""
return self.find_records(name, 'CNAME', all=all)
def get_a(self, name, all=False):
"""
Search this Zone for A records that match name.
Returns a ResourceRecord.
If there is more than one match return all as a
ResourceRecordSets if all is True, otherwise throws
TooManyRecordsException.
"""
return self.find_records(name, 'A', all=all)
def get_mx(self, name, all=False):
"""
Search this Zone for MX records that match name.
Returns a ResourceRecord.
If there is more than one match return all as a
ResourceRecordSets if all is True, otherwise throws
TooManyRecordsException.
"""
return self.find_records(name, 'MX', all=all)
def update_cname(self, name, value, ttl=None, identifier=None, comment=""):
"""
Update the given CNAME record in this Zone to a new value, ttl,
and identifier. Returns a Status object.
Will throw TooManyRecordsException is name, value does not match
a single record.
"""
name = self.route53connection._make_qualified(name)
value = self.route53connection._make_qualified(value)
old_record = self.get_cname(name)
ttl = ttl or old_record.ttl
return self.update_record(old_record,
new_value=value,
new_ttl=ttl,
new_identifier=identifier,
comment=comment)
def update_a(self, name, value, ttl=None, identifier=None, comment=""):
"""
Update the given A record in this Zone to a new value, ttl,
and identifier. Returns a Status object.
Will throw TooManyRecordsException is name, value does not match
a single record.
"""
name = self.route53connection._make_qualified(name)
old_record = self.get_a(name)
ttl = ttl or old_record.ttl
return self.update_record(old_record,
new_value=value,
new_ttl=ttl,
new_identifier=identifier,
comment=comment)
def update_mx(self, name, value, ttl=None, identifier=None, comment=""):
"""
Update the given MX record in this Zone to a new value, ttl,
and identifier. Returns a Status object.
Will throw TooManyRecordsException is name, value does not match
a single record.
"""
name = self.route53connection._make_qualified(name)
value = self.route53connection._make_qualified(value)
old_record = self.get_mx(name)
ttl = ttl or old_record.ttl
return self.update_record(old_record,
new_value=value,
new_ttl=ttl,
new_identifier=identifier,
comment=comment)
def delete_cname(self, name, identifier=None, all=False):
"""
Delete a CNAME record matching name and identifier from
this Zone. Returns a Status object.
If there is more than one match delete all matching records if
all is True, otherwise throws TooManyRecordsException.
"""
name = self.route53connection._make_qualified(name)
record = self.find_records(name, 'CNAME', identifier=identifier,
all=all)
return self.delete_record(record)
def delete_a(self, name, identifier=None, all=False):
"""
Delete an A record matching name and identifier from this
Zone. Returns a Status object.
If there is more than one match delete all matching records if
all is True, otherwise throws TooManyRecordsException.
"""
name = self.route53connection._make_qualified(name)
record = self.find_records(name, 'A', identifier=identifier,
all=all)
return self.delete_record(record)
def delete_mx(self, name, identifier=None, all=False):
"""
Delete an MX record matching name and identifier from this
Zone. Returns a Status object.
If there is more than one match delete all matching records if
all is True, otherwise throws TooManyRecordsException.
"""
name = self.route53connection._make_qualified(name)
record = self.find_records(name, 'MX', identifier=identifier,
all=all)
return self.delete_record(record)
def get_records(self):
"""
Return a ResourceRecordsSets for all of the records in this zone.
"""
return self.route53connection.get_all_rrsets(self.id)
def delete(self):
"""
Request that this zone be deleted by Amazon.
"""
self.route53connection.delete_hosted_zone(self.id)
def get_nameservers(self):
""" Get the list of nameservers for this zone."""
ns = self.find_records(self.name, 'NS')
if ns is not None:
ns = ns.resource_records
return ns
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/route53/zone.py
| 0.747432 | 0.154504 |
zone.py
|
|
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.route53.domains import exceptions
class Route53DomainsConnection(AWSQueryConnection):
"""
"""
APIVersion = "2014-05-15"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "route53domains.us-east-1.amazonaws.com"
ServiceName = "Route53Domains"
TargetPrefix = "Route53Domains_v20140515"
ResponseError = JSONResponseError
_faults = {
"DuplicateRequest": exceptions.DuplicateRequest,
"DomainLimitExceeded": exceptions.DomainLimitExceeded,
"InvalidInput": exceptions.InvalidInput,
"OperationLimitExceeded": exceptions.OperationLimitExceeded,
"UnsupportedTLD": exceptions.UnsupportedTLD,
"TLDRulesViolation": exceptions.TLDRulesViolation,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(Route53DomainsConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def check_domain_availability(self, domain_name, idn_lang_code=None):
"""
This operation checks the availability of one domain name. You
can access this API without authenticating. Note that if the
availability status of a domain is pending, you must submit
another request to determine the availability of the domain
name.
:type domain_name: string
:param domain_name: The name of a domain.
Type: String
Default: None
Constraints: The domain name can contain only the letters a through z,
the numbers 0 through 9, and hyphen (-). Internationalized Domain
Names are not supported.
Required: Yes
:type idn_lang_code: string
:param idn_lang_code: Reserved for future use.
"""
params = {'DomainName': domain_name, }
if idn_lang_code is not None:
params['IdnLangCode'] = idn_lang_code
return self.make_request(action='CheckDomainAvailability',
body=json.dumps(params))
def disable_domain_transfer_lock(self, domain_name):
"""
This operation removes the transfer lock on the domain
(specifically the `clientTransferProhibited` status) to allow
domain transfers. We recommend you refrain from performing
this action unless you intend to transfer the domain to a
different registrar. Successful submission returns an
operation ID that you can use to track the progress and
completion of the action. If the request is not completed
successfully, the domain registrant will be notified by email.
:type domain_name: string
:param domain_name: The name of a domain.
Type: String
Default: None
Constraints: The domain name can contain only the letters a through z,
the numbers 0 through 9, and hyphen (-). Internationalized Domain
Names are not supported.
Required: Yes
"""
params = {'DomainName': domain_name, }
return self.make_request(action='DisableDomainTransferLock',
body=json.dumps(params))
def enable_domain_transfer_lock(self, domain_name):
"""
This operation sets the transfer lock on the domain
(specifically the `clientTransferProhibited` status) to
prevent domain transfers. Successful submission returns an
operation ID that you can use to track the progress and
completion of the action. If the request is not completed
successfully, the domain registrant will be notified by email.
:type domain_name: string
:param domain_name: The name of a domain.
Type: String
Default: None
Constraints: The domain name can contain only the letters a through z,
the numbers 0 through 9, and hyphen (-). Internationalized Domain
Names are not supported.
Required: Yes
"""
params = {'DomainName': domain_name, }
return self.make_request(action='EnableDomainTransferLock',
body=json.dumps(params))
def get_domain_detail(self, domain_name):
"""
This operation returns detailed information about the domain.
The domain's contact information is also returned as part of
the output.
:type domain_name: string
:param domain_name: The name of a domain.
Type: String
Default: None
Constraints: The domain name can contain only the letters a through z,
the numbers 0 through 9, and hyphen (-). Internationalized Domain
Names are not supported.
Required: Yes
"""
params = {'DomainName': domain_name, }
return self.make_request(action='GetDomainDetail',
body=json.dumps(params))
def get_operation_detail(self, operation_id):
"""
This operation returns the current status of an operation that
is not completed.
:type operation_id: string
:param operation_id: The identifier for the operation for which you
want to get the status. Amazon Route 53 returned the identifier in
the response to the original request.
Type: String
Default: None
Required: Yes
"""
params = {'OperationId': operation_id, }
return self.make_request(action='GetOperationDetail',
body=json.dumps(params))
def list_domains(self, marker=None, max_items=None):
"""
This operation returns all the domain names registered with
Amazon Route 53 for the current AWS account.
:type marker: string
:param marker: For an initial request for a list of domains, omit this
element. If the number of domains that are associated with the
current AWS account is greater than the value that you specified
for `MaxItems`, you can use `Marker` to return additional domains.
Get the value of `NextPageMarker` from the previous response, and
submit another request that includes the value of `NextPageMarker`
in the `Marker` element.
Type: String
Default: None
Constraints: The marker must match the value specified in the previous
request.
Required: No
:type max_items: integer
:param max_items: Number of domains to be returned.
Type: Integer
Default: 20
Constraints: A numeral between 1 and 100.
Required: No
"""
params = {}
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.make_request(action='ListDomains',
body=json.dumps(params))
def list_operations(self, marker=None, max_items=None):
"""
This operation returns the operation IDs of operations that
are not yet complete.
:type marker: string
:param marker: For an initial request for a list of operations, omit
this element. If the number of operations that are not yet complete
is greater than the value that you specified for `MaxItems`, you
can use `Marker` to return additional operations. Get the value of
`NextPageMarker` from the previous response, and submit another
request that includes the value of `NextPageMarker` in the `Marker`
element.
Type: String
Default: None
Required: No
:type max_items: integer
:param max_items: Number of domains to be returned.
Type: Integer
Default: 20
Constraints: A value between 1 and 100.
Required: No
"""
params = {}
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.make_request(action='ListOperations',
body=json.dumps(params))
def register_domain(self, domain_name, duration_in_years, admin_contact,
registrant_contact, tech_contact, idn_lang_code=None,
auto_renew=None, privacy_protect_admin_contact=None,
privacy_protect_registrant_contact=None,
privacy_protect_tech_contact=None):
"""
This operation registers a domain. Domains are registered by
the AWS registrar partner, Gandi. For some top-level domains
(TLDs), this operation requires extra parameters.
When you register a domain, Amazon Route 53 does the
following:
+ Creates a Amazon Route 53 hosted zone that has the same name
as the domain. Amazon Route 53 assigns four name servers to
your hosted zone and automatically updates your domain
registration with the names of these name servers.
+ Enables autorenew, so your domain registration will renew
automatically each year. We'll notify you in advance of the
renewal date so you can choose whether to renew the
registration.
+ Optionally enables privacy protection, so WHOIS queries
return contact information for our registrar partner, Gandi,
instead of the information you entered for registrant, admin,
and tech contacts.
+ If registration is successful, returns an operation ID that
you can use to track the progress and completion of the
action. If the request is not completed successfully, the
domain registrant is notified by email.
+ Charges your AWS account an amount based on the top-level
domain. For more information, see `Amazon Route 53 Pricing`_.
:type domain_name: string
:param domain_name: The name of a domain.
Type: String
Default: None
Constraints: The domain name can contain only the letters a through z,
the numbers 0 through 9, and hyphen (-). Internationalized Domain
Names are not supported.
Required: Yes
:type idn_lang_code: string
:param idn_lang_code: Reserved for future use.
:type duration_in_years: integer
:param duration_in_years: The number of years the domain will be
registered. Domains are registered for a minimum of one year. The
maximum period depends on the top-level domain.
Type: Integer
Default: 1
Valid values: Integer from 1 to 10
Required: Yes
:type auto_renew: boolean
:param auto_renew: Indicates whether the domain will be automatically
renewed ( `True`) or not ( `False`). Autorenewal only takes effect
after the account is charged.
Type: Boolean
Valid values: `True` | `False`
Default: `True`
Required: No
:type admin_contact: dict
:param admin_contact: Provides detailed contact information.
Type: Complex
Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`,
`OrganizationName`, `AddressLine1`, `AddressLine2`, `City`,
`State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`,
`ExtraParams`
Required: Yes
:type registrant_contact: dict
:param registrant_contact: Provides detailed contact information.
Type: Complex
Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`,
`OrganizationName`, `AddressLine1`, `AddressLine2`, `City`,
`State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`,
`ExtraParams`
Required: Yes
:type tech_contact: dict
:param tech_contact: Provides detailed contact information.
Type: Complex
Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`,
`OrganizationName`, `AddressLine1`, `AddressLine2`, `City`,
`State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`,
`ExtraParams`
Required: Yes
:type privacy_protect_admin_contact: boolean
:param privacy_protect_admin_contact: Whether you want to conceal
contact information from WHOIS queries. If you specify true, WHOIS
("who is") queries will return contact information for our
registrar partner, Gandi, instead of the contact information that
you enter.
Type: Boolean
Default: `True`
Valid values: `True` | `False`
Required: No
:type privacy_protect_registrant_contact: boolean
:param privacy_protect_registrant_contact: Whether you want to conceal
contact information from WHOIS queries. If you specify true, WHOIS
("who is") queries will return contact information for our
registrar partner, Gandi, instead of the contact information that
you enter.
Type: Boolean
Default: `True`
Valid values: `True` | `False`
Required: No
:type privacy_protect_tech_contact: boolean
:param privacy_protect_tech_contact: Whether you want to conceal
contact information from WHOIS queries. If you specify true, WHOIS
("who is") queries will return contact information for our
registrar partner, Gandi, instead of the contact information that
you enter.
Type: Boolean
Default: `True`
Valid values: `True` | `False`
Required: No
"""
params = {
'DomainName': domain_name,
'DurationInYears': duration_in_years,
'AdminContact': admin_contact,
'RegistrantContact': registrant_contact,
'TechContact': tech_contact,
}
if idn_lang_code is not None:
params['IdnLangCode'] = idn_lang_code
if auto_renew is not None:
params['AutoRenew'] = auto_renew
if privacy_protect_admin_contact is not None:
params['PrivacyProtectAdminContact'] = privacy_protect_admin_contact
if privacy_protect_registrant_contact is not None:
params['PrivacyProtectRegistrantContact'] = privacy_protect_registrant_contact
if privacy_protect_tech_contact is not None:
params['PrivacyProtectTechContact'] = privacy_protect_tech_contact
return self.make_request(action='RegisterDomain',
body=json.dumps(params))
def retrieve_domain_auth_code(self, domain_name):
"""
This operation returns the AuthCode for the domain. To
transfer a domain to another registrar, you provide this value
to the new registrar.
:type domain_name: string
:param domain_name: The name of a domain.
Type: String
Default: None
Constraints: The domain name can contain only the letters a through z,
the numbers 0 through 9, and hyphen (-). Internationalized Domain
Names are not supported.
Required: Yes
"""
params = {'DomainName': domain_name, }
return self.make_request(action='RetrieveDomainAuthCode',
body=json.dumps(params))
def transfer_domain(self, domain_name, duration_in_years, nameservers,
admin_contact, registrant_contact, tech_contact,
idn_lang_code=None, auth_code=None, auto_renew=None,
privacy_protect_admin_contact=None,
privacy_protect_registrant_contact=None,
privacy_protect_tech_contact=None):
"""
This operation transfers a domain from another registrar to
Amazon Route 53. Domains are registered by the AWS registrar,
Gandi upon transfer.
To transfer a domain, you need to meet all the domain transfer
criteria, including the following:
+ You must supply nameservers to transfer a domain.
+ You must disable the domain transfer lock (if any) before
transferring the domain.
+ A minimum of 60 days must have elapsed since the domain's
registration or last transfer.
We recommend you use the Amazon Route 53 as the DNS service
for your domain. You can create a hosted zone in Amazon Route
53 for your current domain before transferring your domain.
Note that upon transfer, the domain duration is extended for a
year if not otherwise specified. Autorenew is enabled by
default.
If the transfer is successful, this method returns an
operation ID that you can use to track the progress and
completion of the action. If the request is not completed
successfully, the domain registrant will be notified by email.
Transferring domains charges your AWS account an amount based
on the top-level domain. For more information, see `Amazon
Route 53 Pricing`_.
:type domain_name: string
:param domain_name: The name of a domain.
Type: String
Default: None
Constraints: The domain name can contain only the letters a through z,
the numbers 0 through 9, and hyphen (-). Internationalized Domain
Names are not supported.
Required: Yes
:type idn_lang_code: string
:param idn_lang_code: Reserved for future use.
:type duration_in_years: integer
:param duration_in_years: The number of years the domain will be
registered. Domains are registered for a minimum of one year. The
maximum period depends on the top-level domain.
Type: Integer
Default: 1
Valid values: Integer from 1 to 10
Required: Yes
:type nameservers: list
:param nameservers: Contains details for the host and glue IP
addresses.
Type: Complex
Children: `GlueIps`, `Name`
:type auth_code: string
:param auth_code: The authorization code for the domain. You get this
value from the current registrar.
Type: String
Required: Yes
:type auto_renew: boolean
:param auto_renew: Indicates whether the domain will be automatically
renewed (true) or not (false). Autorenewal only takes effect after
the account is charged.
Type: Boolean
Valid values: `True` | `False`
Default: true
Required: No
:type admin_contact: dict
:param admin_contact: Provides detailed contact information.
Type: Complex
Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`,
`OrganizationName`, `AddressLine1`, `AddressLine2`, `City`,
`State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`,
`ExtraParams`
Required: Yes
:type registrant_contact: dict
:param registrant_contact: Provides detailed contact information.
Type: Complex
Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`,
`OrganizationName`, `AddressLine1`, `AddressLine2`, `City`,
`State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`,
`ExtraParams`
Required: Yes
:type tech_contact: dict
:param tech_contact: Provides detailed contact information.
Type: Complex
Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`,
`OrganizationName`, `AddressLine1`, `AddressLine2`, `City`,
`State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`,
`ExtraParams`
Required: Yes
:type privacy_protect_admin_contact: boolean
:param privacy_protect_admin_contact: Whether you want to conceal
contact information from WHOIS queries. If you specify true, WHOIS
("who is") queries will return contact information for our
registrar partner, Gandi, instead of the contact information that
you enter.
Type: Boolean
Default: `True`
Valid values: `True` | `False`
Required: No
:type privacy_protect_registrant_contact: boolean
:param privacy_protect_registrant_contact: Whether you want to conceal
contact information from WHOIS queries. If you specify true, WHOIS
("who is") queries will return contact information for our
registrar partner, Gandi, instead of the contact information that
you enter.
Type: Boolean
Default: `True`
Valid values: `True` | `False`
Required: No
:type privacy_protect_tech_contact: boolean
:param privacy_protect_tech_contact: Whether you want to conceal
contact information from WHOIS queries. If you specify true, WHOIS
("who is") queries will return contact information for our
registrar partner, Gandi, instead of the contact information that
you enter.
Type: Boolean
Default: `True`
Valid values: `True` | `False`
Required: No
"""
params = {
'DomainName': domain_name,
'DurationInYears': duration_in_years,
'Nameservers': nameservers,
'AdminContact': admin_contact,
'RegistrantContact': registrant_contact,
'TechContact': tech_contact,
}
if idn_lang_code is not None:
params['IdnLangCode'] = idn_lang_code
if auth_code is not None:
params['AuthCode'] = auth_code
if auto_renew is not None:
params['AutoRenew'] = auto_renew
if privacy_protect_admin_contact is not None:
params['PrivacyProtectAdminContact'] = privacy_protect_admin_contact
if privacy_protect_registrant_contact is not None:
params['PrivacyProtectRegistrantContact'] = privacy_protect_registrant_contact
if privacy_protect_tech_contact is not None:
params['PrivacyProtectTechContact'] = privacy_protect_tech_contact
return self.make_request(action='TransferDomain',
body=json.dumps(params))
def update_domain_contact(self, domain_name, admin_contact=None,
registrant_contact=None, tech_contact=None):
"""
This operation updates the contact information for a
particular domain. Information for at least one contact
(registrant, administrator, or technical) must be supplied for
update.
If the update is successful, this method returns an operation
ID that you can use to track the progress and completion of
the action. If the request is not completed successfully, the
domain registrant will be notified by email.
:type domain_name: string
:param domain_name: The name of a domain.
Type: String
Default: None
Constraints: The domain name can contain only the letters a through z,
the numbers 0 through 9, and hyphen (-). Internationalized Domain
Names are not supported.
Required: Yes
:type admin_contact: dict
:param admin_contact: Provides detailed contact information.
Type: Complex
Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`,
`OrganizationName`, `AddressLine1`, `AddressLine2`, `City`,
`State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`,
`ExtraParams`
Required: Yes
:type registrant_contact: dict
:param registrant_contact: Provides detailed contact information.
Type: Complex
Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`,
`OrganizationName`, `AddressLine1`, `AddressLine2`, `City`,
`State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`,
`ExtraParams`
Required: Yes
:type tech_contact: dict
:param tech_contact: Provides detailed contact information.
Type: Complex
Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`,
`OrganizationName`, `AddressLine1`, `AddressLine2`, `City`,
`State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`,
`ExtraParams`
Required: Yes
"""
params = {'DomainName': domain_name, }
if admin_contact is not None:
params['AdminContact'] = admin_contact
if registrant_contact is not None:
params['RegistrantContact'] = registrant_contact
if tech_contact is not None:
params['TechContact'] = tech_contact
return self.make_request(action='UpdateDomainContact',
body=json.dumps(params))
def update_domain_contact_privacy(self, domain_name, admin_privacy=None,
registrant_privacy=None,
tech_privacy=None):
"""
This operation updates the specified domain contact's privacy
setting. When the privacy option is enabled, personal
information such as postal or email address is hidden from the
results of a public WHOIS query. The privacy services are
provided by the AWS registrar, Gandi. For more information,
see the `Gandi privacy features`_.
This operation only affects the privacy of the specified
contact type (registrant, administrator, or tech). Successful
acceptance returns an operation ID that you can use with
GetOperationDetail to track the progress and completion of the
action. If the request is not completed successfully, the
domain registrant will be notified by email.
:type domain_name: string
:param domain_name: The name of a domain.
Type: String
Default: None
Constraints: The domain name can contain only the letters a through z,
the numbers 0 through 9, and hyphen (-). Internationalized Domain
Names are not supported.
Required: Yes
:type admin_privacy: boolean
:param admin_privacy: Whether you want to conceal contact information
from WHOIS queries. If you specify true, WHOIS ("who is") queries
will return contact information for our registrar partner, Gandi,
instead of the contact information that you enter.
Type: Boolean
Default: None
Valid values: `True` | `False`
Required: No
:type registrant_privacy: boolean
:param registrant_privacy: Whether you want to conceal contact
information from WHOIS queries. If you specify true, WHOIS ("who
is") queries will return contact information for our registrar
partner, Gandi, instead of the contact information that you enter.
Type: Boolean
Default: None
Valid values: `True` | `False`
Required: No
:type tech_privacy: boolean
:param tech_privacy: Whether you want to conceal contact information
from WHOIS queries. If you specify true, WHOIS ("who is") queries
will return contact information for our registrar partner, Gandi,
instead of the contact information that you enter.
Type: Boolean
Default: None
Valid values: `True` | `False`
Required: No
"""
params = {'DomainName': domain_name, }
if admin_privacy is not None:
params['AdminPrivacy'] = admin_privacy
if registrant_privacy is not None:
params['RegistrantPrivacy'] = registrant_privacy
if tech_privacy is not None:
params['TechPrivacy'] = tech_privacy
return self.make_request(action='UpdateDomainContactPrivacy',
body=json.dumps(params))
def update_domain_nameservers(self, domain_name, nameservers):
"""
This operation replaces the current set of name servers for
the domain with the specified set of name servers. If you use
Amazon Route 53 as your DNS service, specify the four name
servers in the delegation set for the hosted zone for the
domain.
If successful, this operation returns an operation ID that you
can use to track the progress and completion of the action. If
the request is not completed successfully, the domain
registrant will be notified by email.
:type domain_name: string
:param domain_name: The name of a domain.
Type: String
Default: None
Constraints: The domain name can contain only the letters a through z,
the numbers 0 through 9, and hyphen (-). Internationalized Domain
Names are not supported.
Required: Yes
:type nameservers: list
:param nameservers: A list of new name servers for the domain.
Type: Complex
Children: `Name`, `GlueIps`
Required: Yes
"""
params = {
'DomainName': domain_name,
'Nameservers': nameservers,
}
return self.make_request(action='UpdateDomainNameservers',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/route53/domains/layer1.py
| 0.653238 | 0.167287 |
layer1.py
|
|
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.opsworks import exceptions
class OpsWorksConnection(AWSQueryConnection):
"""
AWS OpsWorks
Welcome to the AWS OpsWorks API Reference . This guide provides
descriptions, syntax, and usage examples about AWS OpsWorks
actions and data types, including common parameters and error
codes.
AWS OpsWorks is an application management service that provides an
integrated experience for overseeing the complete application
lifecycle. For information about this product, go to the `AWS
OpsWorks`_ details page.
**SDKs and CLI**
The most common way to use the AWS OpsWorks API is by using the
AWS Command Line Interface (CLI) or by using one of the AWS SDKs
to implement applications in your preferred language. For more
information, see:
+ `AWS CLI`_
+ `AWS SDK for Java`_
+ `AWS SDK for .NET`_
+ `AWS SDK for PHP 2`_
+ `AWS SDK for Ruby`_
+ `AWS SDK for Node.js`_
+ `AWS SDK for Python(Boto)`_
**Endpoints**
AWS OpsWorks supports only one endpoint, opsworks.us-
east-1.amazonaws.com (HTTPS), so you must connect to that
endpoint. You can then use the API to direct AWS OpsWorks to
create stacks in any AWS Region.
**Chef Versions**
When you call CreateStack, CloneStack, or UpdateStack we recommend
you use the `ConfigurationManager` parameter to specify the Chef
version, 0.9, 11.4, or 11.10. The default value is currently
11.10. For more information, see `Chef Versions`_.
You can still specify Chef 0.9 for your stack, but new features
are not available for Chef 0.9 stacks, and support is scheduled to
end on July 24, 2014. We do not recommend using Chef 0.9 for new
stacks, and we recommend migrating your existing Chef 0.9 stacks
to Chef 11.10 as soon as possible.
"""
APIVersion = "2013-02-18"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "opsworks.us-east-1.amazonaws.com"
ServiceName = "OpsWorks"
TargetPrefix = "OpsWorks_20130218"
ResponseError = JSONResponseError
_faults = {
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"ValidationException": exceptions.ValidationException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(OpsWorksConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def assign_instance(self, instance_id, layer_ids):
"""
Assign a registered instance to a custom layer. You cannot use
this action with instances that were created with AWS
OpsWorks.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type layer_ids: list
:param layer_ids: The layer ID, which must correspond to a custom
layer. You cannot assign a registered instance to a built-in layer.
"""
params = {
'InstanceId': instance_id,
'LayerIds': layer_ids,
}
return self.make_request(action='AssignInstance',
body=json.dumps(params))
def assign_volume(self, volume_id, instance_id=None):
"""
Assigns one of the stack's registered Amazon EBS volumes to a
specified instance. The volume must first be registered with
the stack by calling RegisterVolume. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'VolumeId': volume_id, }
if instance_id is not None:
params['InstanceId'] = instance_id
return self.make_request(action='AssignVolume',
body=json.dumps(params))
def associate_elastic_ip(self, elastic_ip, instance_id=None):
"""
Associates one of the stack's registered Elastic IP addresses
with a specified instance. The address must first be
registered with the stack by calling RegisterElasticIp. For
more information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'ElasticIp': elastic_ip, }
if instance_id is not None:
params['InstanceId'] = instance_id
return self.make_request(action='AssociateElasticIp',
body=json.dumps(params))
def attach_elastic_load_balancer(self, elastic_load_balancer_name,
layer_id):
"""
Attaches an Elastic Load Balancing load balancer to a
specified layer. For more information, see `Elastic Load
Balancing`_.
You must create the Elastic Load Balancing instance
separately, by using the Elastic Load Balancing console, API,
or CLI. For more information, see ` Elastic Load Balancing
Developer Guide`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_load_balancer_name: string
:param elastic_load_balancer_name: The Elastic Load Balancing
instance's name.
:type layer_id: string
:param layer_id: The ID of the layer that the Elastic Load Balancing
instance is to be attached to.
"""
params = {
'ElasticLoadBalancerName': elastic_load_balancer_name,
'LayerId': layer_id,
}
return self.make_request(action='AttachElasticLoadBalancer',
body=json.dumps(params))
def clone_stack(self, source_stack_id, service_role_arn, name=None,
region=None, vpc_id=None, attributes=None,
default_instance_profile_arn=None, default_os=None,
hostname_theme=None, default_availability_zone=None,
default_subnet_id=None, custom_json=None,
configuration_manager=None, chef_configuration=None,
use_custom_cookbooks=None,
use_opsworks_security_groups=None,
custom_cookbooks_source=None, default_ssh_key_name=None,
clone_permissions=None, clone_app_ids=None,
default_root_device_type=None):
"""
Creates a clone of a specified stack. For more information,
see `Clone a Stack`_.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type source_stack_id: string
:param source_stack_id: The source stack ID.
:type name: string
:param name: The cloned stack name.
:type region: string
:param region: The cloned stack AWS region, such as "us-east-1". For
more information about AWS regions, see `Regions and Endpoints`_.
:type vpc_id: string
:param vpc_id: The ID of the VPC that the cloned stack is to be
launched into. It must be in the specified region. All instances
are launched into this VPC, and you cannot change the ID later.
+ If your account supports EC2 Classic, the default value is no VPC.
+ If your account does not support EC2 Classic, the default value is
the default VPC for the specified region.
If the VPC ID corresponds to a default VPC and you have specified
either the `DefaultAvailabilityZone` or the `DefaultSubnetId`
parameter only, AWS OpsWorks infers the value of the other
parameter. If you specify neither parameter, AWS OpsWorks sets
these parameters to the first valid Availability Zone for the
specified region and the corresponding default VPC subnet ID,
respectively.
If you specify a nondefault VPC ID, note the following:
+ It must belong to a VPC in your account that is in the specified
region.
+ You must specify a value for `DefaultSubnetId`.
For more information on how to use AWS OpsWorks with a VPC, see
`Running a Stack in a VPC`_. For more information on default VPC
and EC2 Classic, see `Supported Platforms`_.
:type attributes: map
:param attributes: A list of stack attributes and values as key/value
pairs to be added to the cloned stack.
:type service_role_arn: string
:param service_role_arn:
The stack AWS Identity and Access Management (IAM) role, which allows
AWS OpsWorks to work with AWS resources on your behalf. You must
set this parameter to the Amazon Resource Name (ARN) for an
existing IAM role. If you create a stack by using the AWS OpsWorks
console, it creates the role for you. You can obtain an existing
stack's IAM ARN programmatically by calling DescribePermissions.
For more information about IAM ARNs, see `Using Identifiers`_.
You must set this parameter to a valid service role ARN or the action
will fail; there is no default value. You can specify the source
stack's service role ARN, if you prefer, but you must do so
explicitly.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stacks's operating system, which must be set to
one of the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`. You specify the custom AMI you want to use
when you create instances.
The default option is the current Amazon Linux version.
:type hostname_theme: string
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The cloned stack's default
Availability Zone, which must be in the specified region. For more
information, see `Regions and Endpoints`_. If you also specify a
value for `DefaultSubnetId`, the subnet must be in the same zone.
For more information, see the `VpcId` parameter description.
:type default_subnet_id: string
:param default_subnet_id: The stack's default VPC subnet ID. This
parameter is required if you specify a value for the `VpcId`
parameter. All instances are launched into this subnet unless you
specify otherwise when you create the instance. If you also specify
a value for `DefaultAvailabilityZone`, the subnet must be in that
zone. For information on default values and when this parameter is
required, see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you clone
a stack we recommend that you use the configuration manager to
specify the Chef version, 0.9, 11.4, or 11.10. The default value is
currently 11.4.
:type chef_configuration: dict
:param chef_configuration: A `ChefConfiguration` object that specifies
whether to enable Berkshelf and the Berkshelf version on Chef 11.10
stacks. For more information, see `Create a New Stack`_.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether to use custom cookbooks.
:type use_opsworks_security_groups: boolean
:param use_opsworks_security_groups: Whether to associate the AWS
OpsWorks built-in security groups with the stack's layers.
AWS OpsWorks provides a standard set of built-in security groups, one
for each layer, which are associated with layers by default. With
`UseOpsworksSecurityGroups` you can instead provide your own custom
security groups. `UseOpsworksSecurityGroups` has the following
settings:
+ True - AWS OpsWorks automatically associates the appropriate built-in
security group with each layer (default setting). You can associate
additional security groups with a layer after you create it but you
cannot delete the built-in security group.
+ False - AWS OpsWorks does not associate built-in security groups with
layers. You must create appropriate EC2 security groups and
associate a security group with each layer that you create.
However, you can still manually associate a built-in security group
with a layer on creation; custom security groups are required only
for those layers that need custom settings.
For more information, see `Create a New Stack`_.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type clone_permissions: boolean
:param clone_permissions: Whether to clone the source stack's
permissions.
:type clone_app_ids: list
:param clone_app_ids: A list of source stack app IDs to be included in
the cloned stack.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the cloned stack, but
you can override it when you create an instance. For more
information, see `Storage for the Root Device`_.
"""
params = {
'SourceStackId': source_stack_id,
'ServiceRoleArn': service_role_arn,
}
if name is not None:
params['Name'] = name
if region is not None:
params['Region'] = region
if vpc_id is not None:
params['VpcId'] = vpc_id
if attributes is not None:
params['Attributes'] = attributes
if default_instance_profile_arn is not None:
params['DefaultInstanceProfileArn'] = default_instance_profile_arn
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if chef_configuration is not None:
params['ChefConfiguration'] = chef_configuration
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if use_opsworks_security_groups is not None:
params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if clone_permissions is not None:
params['ClonePermissions'] = clone_permissions
if clone_app_ids is not None:
params['CloneAppIds'] = clone_app_ids
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
return self.make_request(action='CloneStack',
body=json.dumps(params))
def create_app(self, stack_id, name, type, shortname=None,
description=None, data_sources=None, app_source=None,
domains=None, enable_ssl=None, ssl_configuration=None,
attributes=None, environment=None):
"""
Creates an app for a specified stack. For more information,
see `Creating Apps`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type shortname: string
:param shortname: The app's short name.
:type name: string
:param name: The app name.
:type description: string
:param description: A description of the app.
:type data_sources: list
:param data_sources: The app's data source.
:type type: string
:param type: The app type. Each supported type is associated with a
particular layer. For example, PHP applications are associated with
a PHP layer. AWS OpsWorks deploys an application to those instances
that are members of the corresponding layer.
:type app_source: dict
:param app_source: A `Source` object that specifies the app repository.
:type domains: list
:param domains: The app virtual host settings, with multiple domains
separated by commas. For example: `'www.example.com, example.com'`
:type enable_ssl: boolean
:param enable_ssl: Whether to enable SSL for the app.
:type ssl_configuration: dict
:param ssl_configuration: An `SslConfiguration` object with the SSL
configuration.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type environment: list
:param environment:
An array of `EnvironmentVariable` objects that specify environment
variables to be associated with the app. You can specify up to ten
environment variables. After you deploy the app, these variables
are defined on the associated app server instance.
This parameter is supported only by Chef 11.10 stacks. If you have
specified one or more environment variables, you cannot modify the
stack's Chef version.
"""
params = {'StackId': stack_id, 'Name': name, 'Type': type, }
if shortname is not None:
params['Shortname'] = shortname
if description is not None:
params['Description'] = description
if data_sources is not None:
params['DataSources'] = data_sources
if app_source is not None:
params['AppSource'] = app_source
if domains is not None:
params['Domains'] = domains
if enable_ssl is not None:
params['EnableSsl'] = enable_ssl
if ssl_configuration is not None:
params['SslConfiguration'] = ssl_configuration
if attributes is not None:
params['Attributes'] = attributes
if environment is not None:
params['Environment'] = environment
return self.make_request(action='CreateApp',
body=json.dumps(params))
def create_deployment(self, stack_id, command, app_id=None,
instance_ids=None, comment=None, custom_json=None):
"""
Runs deployment or stack commands. For more information, see
`Deploying Apps`_ and `Run Stack Commands`_.
**Required Permissions**: To use this action, an IAM user must
have a Deploy or Manage permissions level for the stack, or an
attached policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type app_id: string
:param app_id: The app ID. This parameter is required for app
deployments, but not for other deployment commands.
:type instance_ids: list
:param instance_ids: The instance IDs for the deployment targets.
:type command: dict
:param command: A `DeploymentCommand` object that specifies the
deployment command and any associated arguments.
:type comment: string
:param comment: A user-defined comment.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
"""
params = {'StackId': stack_id, 'Command': command, }
if app_id is not None:
params['AppId'] = app_id
if instance_ids is not None:
params['InstanceIds'] = instance_ids
if comment is not None:
params['Comment'] = comment
if custom_json is not None:
params['CustomJson'] = custom_json
return self.make_request(action='CreateDeployment',
body=json.dumps(params))
def create_instance(self, stack_id, layer_ids, instance_type,
auto_scaling_type=None, hostname=None, os=None,
ami_id=None, ssh_key_name=None,
availability_zone=None, virtualization_type=None,
subnet_id=None, architecture=None,
root_device_type=None, install_updates_on_boot=None,
ebs_optimized=None):
"""
Creates an instance in a specified stack. For more
information, see `Adding an Instance to a Layer`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type layer_ids: list
:param layer_ids: An array that contains the instance layer IDs.
:type instance_type: string
:param instance_type: The instance type. AWS OpsWorks supports all
instance types except Cluster Compute, Cluster GPU, and High Memory
Cluster. For more information, see `Instance Families and Types`_.
The parameter values that you use to specify the various types are
in the API Name column of the Available Instance Types table.
:type auto_scaling_type: string
:param auto_scaling_type: For load-based or time-based instances, the
type.
:type hostname: string
:param hostname: The instance host name.
:type os: string
:param os: The instance's operating system, which must be set to one of
the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`
The default option is the current Amazon Linux version. If you set this
parameter to `Custom`, you must use the CreateInstance action's
AmiId parameter to specify the custom AMI that you want to use. For
more information on the standard operating systems, see `Operating
Systems`_For more information on how to use custom AMIs with
OpsWorks, see `Using Custom AMIs`_.
:type ami_id: string
:param ami_id:
A custom AMI ID to be used to create the instance. The AMI should be
based on one of the standard AWS OpsWorks AMIs: Amazon Linux,
Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see
`Instances`_.
If you specify a custom AMI, you must set `Os` to `Custom`.
:type ssh_key_name: string
:param ssh_key_name: The instance SSH key name.
:type availability_zone: string
:param availability_zone: The instance Availability Zone. For more
information, see `Regions and Endpoints`_.
:type virtualization_type: string
:param virtualization_type: The instance's virtualization type,
`paravirtual` or `hvm`.
:type subnet_id: string
:param subnet_id: The ID of the instance's subnet. If the stack is
running in a VPC, you can use this parameter to override the
stack's default subnet ID value and direct AWS OpsWorks to launch
the instance in a different subnet.
:type architecture: string
:param architecture: The instance architecture. The default option is
`x86_64`. Instance types do not necessarily support both
architectures. For a list of the architectures that are supported
by the different instance types, see `Instance Families and
Types`_.
:type root_device_type: string
:param root_device_type: The instance root device type. For more
information, see `Storage for the Root Device`_.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True` to ensure that
your instances have the latest security updates.
:type ebs_optimized: boolean
:param ebs_optimized: Whether to create an Amazon EBS-optimized
instance.
"""
params = {
'StackId': stack_id,
'LayerIds': layer_ids,
'InstanceType': instance_type,
}
if auto_scaling_type is not None:
params['AutoScalingType'] = auto_scaling_type
if hostname is not None:
params['Hostname'] = hostname
if os is not None:
params['Os'] = os
if ami_id is not None:
params['AmiId'] = ami_id
if ssh_key_name is not None:
params['SshKeyName'] = ssh_key_name
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if virtualization_type is not None:
params['VirtualizationType'] = virtualization_type
if subnet_id is not None:
params['SubnetId'] = subnet_id
if architecture is not None:
params['Architecture'] = architecture
if root_device_type is not None:
params['RootDeviceType'] = root_device_type
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if ebs_optimized is not None:
params['EbsOptimized'] = ebs_optimized
return self.make_request(action='CreateInstance',
body=json.dumps(params))
def create_layer(self, stack_id, type, name, shortname, attributes=None,
custom_instance_profile_arn=None,
custom_security_group_ids=None, packages=None,
volume_configurations=None, enable_auto_healing=None,
auto_assign_elastic_ips=None,
auto_assign_public_ips=None, custom_recipes=None,
install_updates_on_boot=None,
use_ebs_optimized_instances=None,
lifecycle_event_configuration=None):
"""
Creates a layer. For more information, see `How to Create a
Layer`_.
You should use **CreateLayer** for noncustom layer types such
as PHP App Server only if the stack does not have an existing
layer of that type. A stack can have at most one instance of
each noncustom layer; if you attempt to create a second
instance, **CreateLayer** fails. A stack can have an arbitrary
number of custom layers, so you can call **CreateLayer** as
many times as you like for that layer type.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The layer stack ID.
:type type: string
:param type: The layer type. A stack cannot have more than one built-in
layer of the same type. It can have any number of custom layers.
:type name: string
:param name: The layer name, which is used by the console.
:type shortname: string
:param shortname: The layer short name, which is used internally by AWS
OpsWorks and by Chef recipes. The short name is also used as the
name for the directory where your app files are installed. It can
have a maximum of 200 characters, which are limited to the
alphanumeric characters, '-', '_', and '.'.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type custom_instance_profile_arn: string
:param custom_instance_profile_arn: The ARN of an IAM profile that to
be used for the layer's EC2 instances. For more information about
IAM ARNs, see `Using Identifiers`_.
:type custom_security_group_ids: list
:param custom_security_group_ids: An array containing the layer custom
security group IDs.
:type packages: list
:param packages: An array of `Package` objects that describe the layer
packages.
:type volume_configurations: list
:param volume_configurations: A `VolumeConfigurations` object that
describes the layer's Amazon EBS volumes.
:type enable_auto_healing: boolean
:param enable_auto_healing: Whether to disable auto healing for the
layer.
:type auto_assign_elastic_ips: boolean
:param auto_assign_elastic_ips: Whether to automatically assign an
`Elastic IP address`_ to the layer's instances. For more
information, see `How to Edit a Layer`_.
:type auto_assign_public_ips: boolean
:param auto_assign_public_ips: For stacks that are running in a VPC,
whether to automatically assign a public IP address to the layer's
instances. For more information, see `How to Edit a Layer`_.
:type custom_recipes: dict
:param custom_recipes: A `LayerCustomRecipes` object that specifies the
layer custom recipes.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
:type use_ebs_optimized_instances: boolean
:param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized
instances.
:type lifecycle_event_configuration: dict
:param lifecycle_event_configuration: A LifeCycleEventConfiguration
object that you can use to configure the Shutdown event to specify
an execution timeout and enable or disable Elastic Load Balancer
connection draining.
"""
params = {
'StackId': stack_id,
'Type': type,
'Name': name,
'Shortname': shortname,
}
if attributes is not None:
params['Attributes'] = attributes
if custom_instance_profile_arn is not None:
params['CustomInstanceProfileArn'] = custom_instance_profile_arn
if custom_security_group_ids is not None:
params['CustomSecurityGroupIds'] = custom_security_group_ids
if packages is not None:
params['Packages'] = packages
if volume_configurations is not None:
params['VolumeConfigurations'] = volume_configurations
if enable_auto_healing is not None:
params['EnableAutoHealing'] = enable_auto_healing
if auto_assign_elastic_ips is not None:
params['AutoAssignElasticIps'] = auto_assign_elastic_ips
if auto_assign_public_ips is not None:
params['AutoAssignPublicIps'] = auto_assign_public_ips
if custom_recipes is not None:
params['CustomRecipes'] = custom_recipes
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if use_ebs_optimized_instances is not None:
params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances
if lifecycle_event_configuration is not None:
params['LifecycleEventConfiguration'] = lifecycle_event_configuration
return self.make_request(action='CreateLayer',
body=json.dumps(params))
def create_stack(self, name, region, service_role_arn,
default_instance_profile_arn, vpc_id=None,
attributes=None, default_os=None, hostname_theme=None,
default_availability_zone=None, default_subnet_id=None,
custom_json=None, configuration_manager=None,
chef_configuration=None, use_custom_cookbooks=None,
use_opsworks_security_groups=None,
custom_cookbooks_source=None, default_ssh_key_name=None,
default_root_device_type=None):
"""
Creates a new stack. For more information, see `Create a New
Stack`_.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type name: string
:param name: The stack name.
:type region: string
:param region: The stack AWS region, such as "us-east-1". For more
information about Amazon regions, see `Regions and Endpoints`_.
:type vpc_id: string
:param vpc_id: The ID of the VPC that the stack is to be launched into.
It must be in the specified region. All instances are launched into
this VPC, and you cannot change the ID later.
+ If your account supports EC2 Classic, the default value is no VPC.
+ If your account does not support EC2 Classic, the default value is
the default VPC for the specified region.
If the VPC ID corresponds to a default VPC and you have specified
either the `DefaultAvailabilityZone` or the `DefaultSubnetId`
parameter only, AWS OpsWorks infers the value of the other
parameter. If you specify neither parameter, AWS OpsWorks sets
these parameters to the first valid Availability Zone for the
specified region and the corresponding default VPC subnet ID,
respectively.
If you specify a nondefault VPC ID, note the following:
+ It must belong to a VPC in your account that is in the specified
region.
+ You must specify a value for `DefaultSubnetId`.
For more information on how to use AWS OpsWorks with a VPC, see
`Running a Stack in a VPC`_. For more information on default VPC
and EC2 Classic, see `Supported Platforms`_.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type service_role_arn: string
:param service_role_arn: The stack AWS Identity and Access Management
(IAM) role, which allows AWS OpsWorks to work with AWS resources on
your behalf. You must set this parameter to the Amazon Resource
Name (ARN) for an existing IAM role. For more information about IAM
ARNs, see `Using Identifiers`_.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stack's operating system, which must be set to
one of the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`. You specify the custom AMI you want to use
when you create instances.
The default option is the current Amazon Linux version.
:type hostname_theme: string
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The stack's default Availability
Zone, which must be in the specified region. For more information,
see `Regions and Endpoints`_. If you also specify a value for
`DefaultSubnetId`, the subnet must be in the same zone. For more
information, see the `VpcId` parameter description.
:type default_subnet_id: string
:param default_subnet_id: The stack's default VPC subnet ID. This
parameter is required if you specify a value for the `VpcId`
parameter. All instances are launched into this subnet unless you
specify otherwise when you create the instance. If you also specify
a value for `DefaultAvailabilityZone`, the subnet must be in that
zone. For information on default values and when this parameter is
required, see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you clone
a stack we recommend that you use the configuration manager to
specify the Chef version, 0.9, 11.4, or 11.10. The default value is
currently 11.4.
:type chef_configuration: dict
:param chef_configuration: A `ChefConfiguration` object that specifies
whether to enable Berkshelf and the Berkshelf version on Chef 11.10
stacks. For more information, see `Create a New Stack`_.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether the stack uses custom cookbooks.
:type use_opsworks_security_groups: boolean
:param use_opsworks_security_groups: Whether to associate the AWS
OpsWorks built-in security groups with the stack's layers.
AWS OpsWorks provides a standard set of built-in security groups, one
for each layer, which are associated with layers by default. With
`UseOpsworksSecurityGroups` you can instead provide your own custom
security groups. `UseOpsworksSecurityGroups` has the following
settings:
+ True - AWS OpsWorks automatically associates the appropriate built-in
security group with each layer (default setting). You can associate
additional security groups with a layer after you create it but you
cannot delete the built-in security group.
+ False - AWS OpsWorks does not associate built-in security groups with
layers. You must create appropriate EC2 security groups and
associate a security group with each layer that you create.
However, you can still manually associate a built-in security group
with a layer on creation; custom security groups are required only
for those layers that need custom settings.
For more information, see `Create a New Stack`_.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the stack, but you
can override it when you create an instance. The default option is
`instance-store`. For more information, see `Storage for the Root
Device`_.
"""
params = {
'Name': name,
'Region': region,
'ServiceRoleArn': service_role_arn,
'DefaultInstanceProfileArn': default_instance_profile_arn,
}
if vpc_id is not None:
params['VpcId'] = vpc_id
if attributes is not None:
params['Attributes'] = attributes
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if chef_configuration is not None:
params['ChefConfiguration'] = chef_configuration
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if use_opsworks_security_groups is not None:
params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
return self.make_request(action='CreateStack',
body=json.dumps(params))
def create_user_profile(self, iam_user_arn, ssh_username=None,
ssh_public_key=None, allow_self_management=None):
"""
Creates a new user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
:type ssh_username: string
:param ssh_username: The user's SSH user name. The allowable characters
are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name
includes other punctuation marks, AWS OpsWorks removes them. For
example, `my.name` will be changed to `myname`. If you do not
specify an SSH user name, AWS OpsWorks generates one from the IAM
user name.
:type ssh_public_key: string
:param ssh_public_key: The user's public SSH key.
:type allow_self_management: boolean
:param allow_self_management: Whether users can specify their own SSH
public key through the My Settings page. For more information, see
`Setting an IAM User's Public SSH Key`_.
"""
params = {'IamUserArn': iam_user_arn, }
if ssh_username is not None:
params['SshUsername'] = ssh_username
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
if allow_self_management is not None:
params['AllowSelfManagement'] = allow_self_management
return self.make_request(action='CreateUserProfile',
body=json.dumps(params))
def delete_app(self, app_id):
"""
Deletes a specified app.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type app_id: string
:param app_id: The app ID.
"""
params = {'AppId': app_id, }
return self.make_request(action='DeleteApp',
body=json.dumps(params))
def delete_instance(self, instance_id, delete_elastic_ip=None,
delete_volumes=None):
"""
Deletes a specified instance, which terminates the associated
Amazon EC2 instance. You must stop an instance before you can
delete it.
For more information, see `Deleting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type delete_elastic_ip: boolean
:param delete_elastic_ip: Whether to delete the instance Elastic IP
address.
:type delete_volumes: boolean
:param delete_volumes: Whether to delete the instance's Amazon EBS
volumes.
"""
params = {'InstanceId': instance_id, }
if delete_elastic_ip is not None:
params['DeleteElasticIp'] = delete_elastic_ip
if delete_volumes is not None:
params['DeleteVolumes'] = delete_volumes
return self.make_request(action='DeleteInstance',
body=json.dumps(params))
def delete_layer(self, layer_id):
"""
Deletes a specified layer. You must first stop and then delete
all associated instances or unassign registered instances. For
more information, see `How to Delete a Layer`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
"""
params = {'LayerId': layer_id, }
return self.make_request(action='DeleteLayer',
body=json.dumps(params))
def delete_stack(self, stack_id):
"""
Deletes a specified stack. You must first delete all
instances, layers, and apps or deregister registered
instances. For more information, see `Shut Down a Stack`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='DeleteStack',
body=json.dumps(params))
def delete_user_profile(self, iam_user_arn):
"""
Deletes a user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
"""
params = {'IamUserArn': iam_user_arn, }
return self.make_request(action='DeleteUserProfile',
body=json.dumps(params))
def deregister_elastic_ip(self, elastic_ip):
"""
Deregisters a specified Elastic IP address. The address can
then be registered by another stack. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
"""
params = {'ElasticIp': elastic_ip, }
return self.make_request(action='DeregisterElasticIp',
body=json.dumps(params))
def deregister_instance(self, instance_id):
"""
Deregister a registered Amazon EC2 or on-premises instance.
This action removes the instance from the stack and returns it
to your control. This action can not be used with instances
that were created with AWS OpsWorks.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='DeregisterInstance',
body=json.dumps(params))
def deregister_rds_db_instance(self, rds_db_instance_arn):
"""
Deregisters an Amazon RDS instance.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type rds_db_instance_arn: string
:param rds_db_instance_arn: The Amazon RDS instance's ARN.
"""
params = {'RdsDbInstanceArn': rds_db_instance_arn, }
return self.make_request(action='DeregisterRdsDbInstance',
body=json.dumps(params))
def deregister_volume(self, volume_id):
"""
Deregisters an Amazon EBS volume. The volume can then be
registered by another stack. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
"""
params = {'VolumeId': volume_id, }
return self.make_request(action='DeregisterVolume',
body=json.dumps(params))
def describe_apps(self, stack_id=None, app_ids=None):
"""
Requests a description of a specified set of apps.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The app stack ID. If you use this parameter,
`DescribeApps` returns a description of the apps in the specified
stack.
:type app_ids: list
:param app_ids: An array of app IDs for the apps to be described. If
you use this parameter, `DescribeApps` returns a description of the
specified apps. Otherwise, it returns a description of every app.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if app_ids is not None:
params['AppIds'] = app_ids
return self.make_request(action='DescribeApps',
body=json.dumps(params))
def describe_commands(self, deployment_id=None, instance_id=None,
command_ids=None):
"""
Describes the results of specified commands.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type deployment_id: string
:param deployment_id: The deployment ID. If you include this parameter,
`DescribeCommands` returns a description of the commands associated
with the specified deployment.
:type instance_id: string
:param instance_id: The instance ID. If you include this parameter,
`DescribeCommands` returns a description of the commands associated
with the specified instance.
:type command_ids: list
:param command_ids: An array of command IDs. If you include this
parameter, `DescribeCommands` returns a description of the
specified commands. Otherwise, it returns a description of every
command.
"""
params = {}
if deployment_id is not None:
params['DeploymentId'] = deployment_id
if instance_id is not None:
params['InstanceId'] = instance_id
if command_ids is not None:
params['CommandIds'] = command_ids
return self.make_request(action='DescribeCommands',
body=json.dumps(params))
def describe_deployments(self, stack_id=None, app_id=None,
deployment_ids=None):
"""
Requests a description of a specified set of deployments.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID. If you include this parameter,
`DescribeDeployments` returns a description of the commands
associated with the specified stack.
:type app_id: string
:param app_id: The app ID. If you include this parameter,
`DescribeDeployments` returns a description of the commands
associated with the specified app.
:type deployment_ids: list
:param deployment_ids: An array of deployment IDs to be described. If
you include this parameter, `DescribeDeployments` returns a
description of the specified deployments. Otherwise, it returns a
description of every deployment.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if app_id is not None:
params['AppId'] = app_id
if deployment_ids is not None:
params['DeploymentIds'] = deployment_ids
return self.make_request(action='DescribeDeployments',
body=json.dumps(params))
def describe_elastic_ips(self, instance_id=None, stack_id=None, ips=None):
"""
Describes `Elastic IP addresses`_.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you include this parameter,
`DescribeElasticIps` returns a description of the Elastic IP
addresses associated with the specified instance.
:type stack_id: string
:param stack_id: A stack ID. If you include this parameter,
`DescribeElasticIps` returns a description of the Elastic IP
addresses that are registered with the specified stack.
:type ips: list
:param ips: An array of Elastic IP addresses to be described. If you
include this parameter, `DescribeElasticIps` returns a description
of the specified Elastic IP addresses. Otherwise, it returns a
description of every Elastic IP address.
"""
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
if stack_id is not None:
params['StackId'] = stack_id
if ips is not None:
params['Ips'] = ips
return self.make_request(action='DescribeElasticIps',
body=json.dumps(params))
def describe_elastic_load_balancers(self, stack_id=None, layer_ids=None):
"""
Describes a stack's Elastic Load Balancing instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: A stack ID. The action describes the stack's Elastic
Load Balancing instances.
:type layer_ids: list
:param layer_ids: A list of layer IDs. The action describes the Elastic
Load Balancing instances for the specified layers.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if layer_ids is not None:
params['LayerIds'] = layer_ids
return self.make_request(action='DescribeElasticLoadBalancers',
body=json.dumps(params))
def describe_instances(self, stack_id=None, layer_id=None,
instance_ids=None):
"""
Requests a description of a set of instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: A stack ID. If you use this parameter,
`DescribeInstances` returns descriptions of the instances
associated with the specified stack.
:type layer_id: string
:param layer_id: A layer ID. If you use this parameter,
`DescribeInstances` returns descriptions of the instances
associated with the specified layer.
:type instance_ids: list
:param instance_ids: An array of instance IDs to be described. If you
use this parameter, `DescribeInstances` returns a description of
the specified instances. Otherwise, it returns a description of
every instance.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if layer_id is not None:
params['LayerId'] = layer_id
if instance_ids is not None:
params['InstanceIds'] = instance_ids
return self.make_request(action='DescribeInstances',
body=json.dumps(params))
def describe_layers(self, stack_id=None, layer_ids=None):
"""
Requests a description of one or more layers in a specified
stack.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type layer_ids: list
:param layer_ids: An array of layer IDs that specify the layers to be
described. If you omit this parameter, `DescribeLayers` returns a
description of every layer in the specified stack.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if layer_ids is not None:
params['LayerIds'] = layer_ids
return self.make_request(action='DescribeLayers',
body=json.dumps(params))
def describe_load_based_auto_scaling(self, layer_ids):
"""
Describes load-based auto scaling configurations for specified
layers.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type layer_ids: list
:param layer_ids: An array of layer IDs.
"""
params = {'LayerIds': layer_ids, }
return self.make_request(action='DescribeLoadBasedAutoScaling',
body=json.dumps(params))
def describe_my_user_profile(self):
"""
Describes a user's SSH information.
**Required Permissions**: To use this action, an IAM user must
have self-management enabled or an attached policy that
explicitly grants permissions. For more information on user
permissions, see `Managing User Permissions`_.
"""
params = {}
return self.make_request(action='DescribeMyUserProfile',
body=json.dumps(params))
def describe_permissions(self, iam_user_arn=None, stack_id=None):
"""
Describes the permissions for a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN. For more information about IAM
ARNs, see `Using Identifiers`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {}
if iam_user_arn is not None:
params['IamUserArn'] = iam_user_arn
if stack_id is not None:
params['StackId'] = stack_id
return self.make_request(action='DescribePermissions',
body=json.dumps(params))
def describe_raid_arrays(self, instance_id=None, stack_id=None,
raid_array_ids=None):
"""
Describe an instance's RAID arrays.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeRaidArrays` returns descriptions of the RAID arrays
associated with the specified instance.
:type stack_id: string
:param stack_id: The stack ID.
:type raid_array_ids: list
:param raid_array_ids: An array of RAID array IDs. If you use this
parameter, `DescribeRaidArrays` returns descriptions of the
specified arrays. Otherwise, it returns a description of every
array.
"""
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
if stack_id is not None:
params['StackId'] = stack_id
if raid_array_ids is not None:
params['RaidArrayIds'] = raid_array_ids
return self.make_request(action='DescribeRaidArrays',
body=json.dumps(params))
def describe_rds_db_instances(self, stack_id, rds_db_instance_arns=None):
"""
Describes Amazon RDS instances.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID that the instances are registered with.
The operation returns descriptions of all registered Amazon RDS
instances.
:type rds_db_instance_arns: list
:param rds_db_instance_arns: An array containing the ARNs of the
instances to be described.
"""
params = {'StackId': stack_id, }
if rds_db_instance_arns is not None:
params['RdsDbInstanceArns'] = rds_db_instance_arns
return self.make_request(action='DescribeRdsDbInstances',
body=json.dumps(params))
def describe_service_errors(self, stack_id=None, instance_id=None,
service_error_ids=None):
"""
Describes AWS OpsWorks service errors.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID. If you use this parameter,
`DescribeServiceErrors` returns descriptions of the errors
associated with the specified stack.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeServiceErrors` returns descriptions of the errors
associated with the specified instance.
:type service_error_ids: list
:param service_error_ids: An array of service error IDs. If you use
this parameter, `DescribeServiceErrors` returns descriptions of the
specified errors. Otherwise, it returns a description of every
error.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if instance_id is not None:
params['InstanceId'] = instance_id
if service_error_ids is not None:
params['ServiceErrorIds'] = service_error_ids
return self.make_request(action='DescribeServiceErrors',
body=json.dumps(params))
def describe_stack_provisioning_parameters(self, stack_id):
"""
Requests a description of a stack's provisioning parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the stack
or an attached policy that explicitly grants permissions. For
more information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID
"""
params = {'StackId': stack_id, }
return self.make_request(action='DescribeStackProvisioningParameters',
body=json.dumps(params))
def describe_stack_summary(self, stack_id):
"""
Describes the number of layers and apps in a specified stack,
and the number of instances in each state, such as
`running_setup` or `online`.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='DescribeStackSummary',
body=json.dumps(params))
def describe_stacks(self, stack_ids=None):
"""
Requests a description of one or more stacks.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_ids: list
:param stack_ids: An array of stack IDs that specify the stacks to be
described. If you omit this parameter, `DescribeStacks` returns a
description of every stack.
"""
params = {}
if stack_ids is not None:
params['StackIds'] = stack_ids
return self.make_request(action='DescribeStacks',
body=json.dumps(params))
def describe_time_based_auto_scaling(self, instance_ids):
"""
Describes time-based auto scaling configurations for specified
instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_ids: list
:param instance_ids: An array of instance IDs.
"""
params = {'InstanceIds': instance_ids, }
return self.make_request(action='DescribeTimeBasedAutoScaling',
body=json.dumps(params))
def describe_user_profiles(self, iam_user_arns=None):
"""
Describe specified users.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arns: list
:param iam_user_arns: An array of IAM user ARNs that identify the users
to be described.
"""
params = {}
if iam_user_arns is not None:
params['IamUserArns'] = iam_user_arns
return self.make_request(action='DescribeUserProfiles',
body=json.dumps(params))
def describe_volumes(self, instance_id=None, stack_id=None,
raid_array_id=None, volume_ids=None):
"""
Describes an instance's Amazon EBS volumes.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeVolumes` returns descriptions of the volumes associated
with the specified instance.
:type stack_id: string
:param stack_id: A stack ID. The action describes the stack's
registered Amazon EBS volumes.
:type raid_array_id: string
:param raid_array_id: The RAID array ID. If you use this parameter,
`DescribeVolumes` returns descriptions of the volumes associated
with the specified RAID array.
:type volume_ids: list
:param volume_ids: Am array of volume IDs. If you use this parameter,
`DescribeVolumes` returns descriptions of the specified volumes.
Otherwise, it returns a description of every volume.
"""
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
if stack_id is not None:
params['StackId'] = stack_id
if raid_array_id is not None:
params['RaidArrayId'] = raid_array_id
if volume_ids is not None:
params['VolumeIds'] = volume_ids
return self.make_request(action='DescribeVolumes',
body=json.dumps(params))
def detach_elastic_load_balancer(self, elastic_load_balancer_name,
layer_id):
"""
Detaches a specified Elastic Load Balancing instance from its
layer.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_load_balancer_name: string
:param elastic_load_balancer_name: The Elastic Load Balancing
instance's name.
:type layer_id: string
:param layer_id: The ID of the layer that the Elastic Load Balancing
instance is attached to.
"""
params = {
'ElasticLoadBalancerName': elastic_load_balancer_name,
'LayerId': layer_id,
}
return self.make_request(action='DetachElasticLoadBalancer',
body=json.dumps(params))
def disassociate_elastic_ip(self, elastic_ip):
"""
Disassociates an Elastic IP address from its instance. The
address remains registered with the stack. For more
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
"""
params = {'ElasticIp': elastic_ip, }
return self.make_request(action='DisassociateElasticIp',
body=json.dumps(params))
def get_hostname_suggestion(self, layer_id):
"""
Gets a generated host name for the specified layer, based on
the current host name theme.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
"""
params = {'LayerId': layer_id, }
return self.make_request(action='GetHostnameSuggestion',
body=json.dumps(params))
def reboot_instance(self, instance_id):
"""
Reboots a specified instance. For more information, see
`Starting, Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='RebootInstance',
body=json.dumps(params))
def register_elastic_ip(self, elastic_ip, stack_id):
"""
Registers an Elastic IP address with a specified stack. An
address can be registered with only one stack at a time. If
the address is already registered, you must first deregister
it by calling DeregisterElasticIp. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'ElasticIp': elastic_ip, 'StackId': stack_id, }
return self.make_request(action='RegisterElasticIp',
body=json.dumps(params))
def register_instance(self, stack_id, hostname=None, public_ip=None,
private_ip=None, rsa_public_key=None,
rsa_public_key_fingerprint=None,
instance_identity=None):
"""
Registers instances with a specified stack that were created
outside of AWS OpsWorks.
We do not recommend using this action to register instances.
The complete registration operation has two primary steps,
installing the AWS OpsWorks agent on the instance and
registering the instance with the stack. `RegisterInstance`
handles only the second step. You should instead use the AWS
CLI `register` command, which performs the entire registration
operation.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The ID of the stack that the instance is to be
registered with.
:type hostname: string
:param hostname: The instance's hostname.
:type public_ip: string
:param public_ip: The instance's public IP address.
:type private_ip: string
:param private_ip: The instance's private IP address.
:type rsa_public_key: string
:param rsa_public_key: The instances public RSA key. This key is used
to encrypt communication between the instance and the service.
:type rsa_public_key_fingerprint: string
:param rsa_public_key_fingerprint: The instances public RSA key
fingerprint.
:type instance_identity: dict
:param instance_identity: An InstanceIdentity object that contains the
instance's identity.
"""
params = {'StackId': stack_id, }
if hostname is not None:
params['Hostname'] = hostname
if public_ip is not None:
params['PublicIp'] = public_ip
if private_ip is not None:
params['PrivateIp'] = private_ip
if rsa_public_key is not None:
params['RsaPublicKey'] = rsa_public_key
if rsa_public_key_fingerprint is not None:
params['RsaPublicKeyFingerprint'] = rsa_public_key_fingerprint
if instance_identity is not None:
params['InstanceIdentity'] = instance_identity
return self.make_request(action='RegisterInstance',
body=json.dumps(params))
def register_rds_db_instance(self, stack_id, rds_db_instance_arn,
db_user, db_password):
"""
Registers an Amazon RDS instance with a stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type rds_db_instance_arn: string
:param rds_db_instance_arn: The Amazon RDS instance's ARN.
:type db_user: string
:param db_user: The database's master user name.
:type db_password: string
:param db_password: The database password.
"""
params = {
'StackId': stack_id,
'RdsDbInstanceArn': rds_db_instance_arn,
'DbUser': db_user,
'DbPassword': db_password,
}
return self.make_request(action='RegisterRdsDbInstance',
body=json.dumps(params))
def register_volume(self, stack_id, ec_2_volume_id=None):
"""
Registers an Amazon EBS volume with a specified stack. A
volume can be registered with only one stack at a time. If the
volume is already registered, you must first deregister it by
calling DeregisterVolume. For more information, see `Resource
Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type ec_2_volume_id: string
:param ec_2_volume_id: The Amazon EBS volume ID.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
if ec_2_volume_id is not None:
params['Ec2VolumeId'] = ec_2_volume_id
return self.make_request(action='RegisterVolume',
body=json.dumps(params))
def set_load_based_auto_scaling(self, layer_id, enable=None,
up_scaling=None, down_scaling=None):
"""
Specify the load-based auto scaling configuration for a
specified layer. For more information, see `Managing Load with
Time-based and Load-based Instances`_.
To use load-based auto scaling, you must create a set of load-
based auto scaling instances. Load-based auto scaling operates
only on the instances from that set, so you must ensure that
you have created enough instances to handle the maximum
anticipated load.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
:type enable: boolean
:param enable: Enables load-based auto scaling for the layer.
:type up_scaling: dict
:param up_scaling: An `AutoScalingThresholds` object with the upscaling
threshold configuration. If the load exceeds these thresholds for a
specified amount of time, AWS OpsWorks starts a specified number of
instances.
:type down_scaling: dict
:param down_scaling: An `AutoScalingThresholds` object with the
downscaling threshold configuration. If the load falls below these
thresholds for a specified amount of time, AWS OpsWorks stops a
specified number of instances.
"""
params = {'LayerId': layer_id, }
if enable is not None:
params['Enable'] = enable
if up_scaling is not None:
params['UpScaling'] = up_scaling
if down_scaling is not None:
params['DownScaling'] = down_scaling
return self.make_request(action='SetLoadBasedAutoScaling',
body=json.dumps(params))
def set_permission(self, stack_id, iam_user_arn, allow_ssh=None,
allow_sudo=None, level=None):
"""
Specifies a user's permissions. For more information, see
`Security and Permissions`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
:type allow_ssh: boolean
:param allow_ssh: The user is allowed to use SSH to communicate with
the instance.
:type allow_sudo: boolean
:param allow_sudo: The user is allowed to use **sudo** to elevate
privileges.
:type level: string
:param level: The user's permission level, which must be set to one of
the following strings. You cannot set your own permissions level.
+ `deny`
+ `show`
+ `deploy`
+ `manage`
+ `iam_only`
For more information on the permissions associated with these levels,
see `Managing User Permissions`_
"""
params = {'StackId': stack_id, 'IamUserArn': iam_user_arn, }
if allow_ssh is not None:
params['AllowSsh'] = allow_ssh
if allow_sudo is not None:
params['AllowSudo'] = allow_sudo
if level is not None:
params['Level'] = level
return self.make_request(action='SetPermission',
body=json.dumps(params))
def set_time_based_auto_scaling(self, instance_id,
auto_scaling_schedule=None):
"""
Specify the time-based auto scaling configuration for a
specified instance. For more information, see `Managing Load
with Time-based and Load-based Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type auto_scaling_schedule: dict
:param auto_scaling_schedule: An `AutoScalingSchedule` with the
instance schedule.
"""
params = {'InstanceId': instance_id, }
if auto_scaling_schedule is not None:
params['AutoScalingSchedule'] = auto_scaling_schedule
return self.make_request(action='SetTimeBasedAutoScaling',
body=json.dumps(params))
def start_instance(self, instance_id):
"""
Starts a specified instance. For more information, see
`Starting, Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='StartInstance',
body=json.dumps(params))
def start_stack(self, stack_id):
"""
Starts a stack's instances.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='StartStack',
body=json.dumps(params))
def stop_instance(self, instance_id):
"""
Stops a specified instance. When you stop a standard instance,
the data disappears and must be reinstalled when you restart
the instance. You can stop an Amazon EBS-backed instance
without losing data. For more information, see `Starting,
Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='StopInstance',
body=json.dumps(params))
def stop_stack(self, stack_id):
"""
Stops a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='StopStack',
body=json.dumps(params))
def unassign_instance(self, instance_id):
"""
Unassigns a registered instance from all of it's layers. The
instance remains in the stack as an unassigned instance and
can be assigned to another layer, as needed. You cannot use
this action with instances that were created with AWS
OpsWorks.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='UnassignInstance',
body=json.dumps(params))
def unassign_volume(self, volume_id):
"""
Unassigns an assigned Amazon EBS volume. The volume remains
registered with the stack. For more information, see `Resource
Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
"""
params = {'VolumeId': volume_id, }
return self.make_request(action='UnassignVolume',
body=json.dumps(params))
def update_app(self, app_id, name=None, description=None,
data_sources=None, type=None, app_source=None,
domains=None, enable_ssl=None, ssl_configuration=None,
attributes=None, environment=None):
"""
Updates a specified app.
**Required Permissions**: To use this action, an IAM user must
have a Deploy or Manage permissions level for the stack, or an
attached policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type app_id: string
:param app_id: The app ID.
:type name: string
:param name: The app name.
:type description: string
:param description: A description of the app.
:type data_sources: list
:param data_sources: The app's data sources.
:type type: string
:param type: The app type.
:type app_source: dict
:param app_source: A `Source` object that specifies the app repository.
:type domains: list
:param domains: The app's virtual host settings, with multiple domains
separated by commas. For example: `'www.example.com, example.com'`
:type enable_ssl: boolean
:param enable_ssl: Whether SSL is enabled for the app.
:type ssl_configuration: dict
:param ssl_configuration: An `SslConfiguration` object with the SSL
configuration.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type environment: list
:param environment:
An array of `EnvironmentVariable` objects that specify environment
variables to be associated with the app. You can specify up to ten
environment variables. After you deploy the app, these variables
are defined on the associated app server instances.
This parameter is supported only by Chef 11.10 stacks. If you have
specified one or more environment variables, you cannot modify the
stack's Chef version.
"""
params = {'AppId': app_id, }
if name is not None:
params['Name'] = name
if description is not None:
params['Description'] = description
if data_sources is not None:
params['DataSources'] = data_sources
if type is not None:
params['Type'] = type
if app_source is not None:
params['AppSource'] = app_source
if domains is not None:
params['Domains'] = domains
if enable_ssl is not None:
params['EnableSsl'] = enable_ssl
if ssl_configuration is not None:
params['SslConfiguration'] = ssl_configuration
if attributes is not None:
params['Attributes'] = attributes
if environment is not None:
params['Environment'] = environment
return self.make_request(action='UpdateApp',
body=json.dumps(params))
def update_elastic_ip(self, elastic_ip, name=None):
"""
Updates a registered Elastic IP address's name. For more
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The address.
:type name: string
:param name: The new name.
"""
params = {'ElasticIp': elastic_ip, }
if name is not None:
params['Name'] = name
return self.make_request(action='UpdateElasticIp',
body=json.dumps(params))
def update_instance(self, instance_id, layer_ids=None,
instance_type=None, auto_scaling_type=None,
hostname=None, os=None, ami_id=None,
ssh_key_name=None, architecture=None,
install_updates_on_boot=None, ebs_optimized=None):
"""
Updates a specified instance.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type layer_ids: list
:param layer_ids: The instance's layer IDs.
:type instance_type: string
:param instance_type: The instance type. AWS OpsWorks supports all
instance types except Cluster Compute, Cluster GPU, and High Memory
Cluster. For more information, see `Instance Families and Types`_.
The parameter values that you use to specify the various types are
in the API Name column of the Available Instance Types table.
:type auto_scaling_type: string
:param auto_scaling_type: For load-based or time-based instances, the
type.
:type hostname: string
:param hostname: The instance host name.
:type os: string
:param os: The instance's operating system, which must be set to one of
the following.
+ Standard operating systems: An Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`
The default option is the current Amazon Linux version, such as `Amazon
Linux 2014.09`. If you set this parameter to `Custom`, you must use
the CreateInstance action's AmiId parameter to specify the custom
AMI that you want to use. For more information on the standard
operating systems, see `Operating Systems`_For more information on
how to use custom AMIs with OpsWorks, see `Using Custom AMIs`_.
:type ami_id: string
:param ami_id:
A custom AMI ID to be used to create the instance. The AMI should be
based on one of the standard AWS OpsWorks AMIs: Amazon Linux,
Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see
`Instances`_
If you specify a custom AMI, you must set `Os` to `Custom`.
:type ssh_key_name: string
:param ssh_key_name: The instance SSH key name.
:type architecture: string
:param architecture: The instance architecture. Instance types do not
necessarily support both architectures. For a list of the
architectures that are supported by the different instance types,
see `Instance Families and Types`_.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
:type ebs_optimized: boolean
:param ebs_optimized: Whether this is an Amazon EBS-optimized instance.
"""
params = {'InstanceId': instance_id, }
if layer_ids is not None:
params['LayerIds'] = layer_ids
if instance_type is not None:
params['InstanceType'] = instance_type
if auto_scaling_type is not None:
params['AutoScalingType'] = auto_scaling_type
if hostname is not None:
params['Hostname'] = hostname
if os is not None:
params['Os'] = os
if ami_id is not None:
params['AmiId'] = ami_id
if ssh_key_name is not None:
params['SshKeyName'] = ssh_key_name
if architecture is not None:
params['Architecture'] = architecture
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if ebs_optimized is not None:
params['EbsOptimized'] = ebs_optimized
return self.make_request(action='UpdateInstance',
body=json.dumps(params))
def update_layer(self, layer_id, name=None, shortname=None,
attributes=None, custom_instance_profile_arn=None,
custom_security_group_ids=None, packages=None,
volume_configurations=None, enable_auto_healing=None,
auto_assign_elastic_ips=None,
auto_assign_public_ips=None, custom_recipes=None,
install_updates_on_boot=None,
use_ebs_optimized_instances=None,
lifecycle_event_configuration=None):
"""
Updates a specified layer.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
:type name: string
:param name: The layer name, which is used by the console.
:type shortname: string
:param shortname: The layer short name, which is used internally by AWS
OpsWorksand by Chef. The short name is also used as the name for
the directory where your app files are installed. It can have a
maximum of 200 characters and must be in the following format:
/\A[a-z0-9\-\_\.]+\Z/.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type custom_instance_profile_arn: string
:param custom_instance_profile_arn: The ARN of an IAM profile to be
used for all of the layer's EC2 instances. For more information
about IAM ARNs, see `Using Identifiers`_.
:type custom_security_group_ids: list
:param custom_security_group_ids: An array containing the layer's
custom security group IDs.
:type packages: list
:param packages: An array of `Package` objects that describe the
layer's packages.
:type volume_configurations: list
:param volume_configurations: A `VolumeConfigurations` object that
describes the layer's Amazon EBS volumes.
:type enable_auto_healing: boolean
:param enable_auto_healing: Whether to disable auto healing for the
layer.
:type auto_assign_elastic_ips: boolean
:param auto_assign_elastic_ips: Whether to automatically assign an
`Elastic IP address`_ to the layer's instances. For more
information, see `How to Edit a Layer`_.
:type auto_assign_public_ips: boolean
:param auto_assign_public_ips: For stacks that are running in a VPC,
whether to automatically assign a public IP address to the layer's
instances. For more information, see `How to Edit a Layer`_.
:type custom_recipes: dict
:param custom_recipes: A `LayerCustomRecipes` object that specifies the
layer's custom recipes.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
:type use_ebs_optimized_instances: boolean
:param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized
instances.
:type lifecycle_event_configuration: dict
:param lifecycle_event_configuration:
"""
params = {'LayerId': layer_id, }
if name is not None:
params['Name'] = name
if shortname is not None:
params['Shortname'] = shortname
if attributes is not None:
params['Attributes'] = attributes
if custom_instance_profile_arn is not None:
params['CustomInstanceProfileArn'] = custom_instance_profile_arn
if custom_security_group_ids is not None:
params['CustomSecurityGroupIds'] = custom_security_group_ids
if packages is not None:
params['Packages'] = packages
if volume_configurations is not None:
params['VolumeConfigurations'] = volume_configurations
if enable_auto_healing is not None:
params['EnableAutoHealing'] = enable_auto_healing
if auto_assign_elastic_ips is not None:
params['AutoAssignElasticIps'] = auto_assign_elastic_ips
if auto_assign_public_ips is not None:
params['AutoAssignPublicIps'] = auto_assign_public_ips
if custom_recipes is not None:
params['CustomRecipes'] = custom_recipes
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if use_ebs_optimized_instances is not None:
params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances
if lifecycle_event_configuration is not None:
params['LifecycleEventConfiguration'] = lifecycle_event_configuration
return self.make_request(action='UpdateLayer',
body=json.dumps(params))
def update_my_user_profile(self, ssh_public_key=None):
"""
Updates a user's SSH public key.
**Required Permissions**: To use this action, an IAM user must
have self-management enabled or an attached policy that
explicitly grants permissions. For more information on user
permissions, see `Managing User Permissions`_.
:type ssh_public_key: string
:param ssh_public_key: The user's SSH public key.
"""
params = {}
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
return self.make_request(action='UpdateMyUserProfile',
body=json.dumps(params))
def update_rds_db_instance(self, rds_db_instance_arn, db_user=None,
db_password=None):
"""
Updates an Amazon RDS instance.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type rds_db_instance_arn: string
:param rds_db_instance_arn: The Amazon RDS instance's ARN.
:type db_user: string
:param db_user: The master user name.
:type db_password: string
:param db_password: The database password.
"""
params = {'RdsDbInstanceArn': rds_db_instance_arn, }
if db_user is not None:
params['DbUser'] = db_user
if db_password is not None:
params['DbPassword'] = db_password
return self.make_request(action='UpdateRdsDbInstance',
body=json.dumps(params))
def update_stack(self, stack_id, name=None, attributes=None,
service_role_arn=None,
default_instance_profile_arn=None, default_os=None,
hostname_theme=None, default_availability_zone=None,
default_subnet_id=None, custom_json=None,
configuration_manager=None, chef_configuration=None,
use_custom_cookbooks=None, custom_cookbooks_source=None,
default_ssh_key_name=None,
default_root_device_type=None,
use_opsworks_security_groups=None):
"""
Updates a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type name: string
:param name: The stack's new name.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type service_role_arn: string
:param service_role_arn:
The stack AWS Identity and Access Management (IAM) role, which allows
AWS OpsWorks to work with AWS resources on your behalf. You must
set this parameter to the Amazon Resource Name (ARN) for an
existing IAM role. For more information about IAM ARNs, see `Using
Identifiers`_.
You must set this parameter to a valid service role ARN or the action
will fail; there is no default value. You can specify the stack's
current service role ARN, if you prefer, but you must do so
explicitly.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stack's operating system, which must be set to
one of the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`. You specify the custom AMI you want to use
when you create instances.
The default option is the current Amazon Linux version.
:type hostname_theme: string
:param hostname_theme: The stack's new host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The stack's default Availability
Zone, which must be in the specified region. For more information,
see `Regions and Endpoints`_. If you also specify a value for
`DefaultSubnetId`, the subnet must be in the same zone. For more
information, see CreateStack.
:type default_subnet_id: string
:param default_subnet_id: The stack's default VPC subnet ID. This
parameter is required if you specify a value for the `VpcId`
parameter. All instances are launched into this subnet unless you
specify otherwise when you create the instance. If you also specify
a value for `DefaultAvailabilityZone`, the subnet must be in that
zone. For information on default values and when this parameter is
required, see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you clone
a stack we recommend that you use the configuration manager to
specify the Chef version, 0.9, 11.4, or 11.10. The default value is
currently 11.4.
:type chef_configuration: dict
:param chef_configuration: A `ChefConfiguration` object that specifies
whether to enable Berkshelf and the Berkshelf version on Chef 11.10
stacks. For more information, see `Create a New Stack`_.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether the stack uses custom cookbooks.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the stack, but you
can override it when you create an instance. For more information,
see `Storage for the Root Device`_.
:type use_opsworks_security_groups: boolean
:param use_opsworks_security_groups: Whether to associate the AWS
OpsWorks built-in security groups with the stack's layers.
AWS OpsWorks provides a standard set of built-in security groups, one
for each layer, which are associated with layers by default.
`UseOpsworksSecurityGroups` allows you to instead provide your own
custom security groups. `UseOpsworksSecurityGroups` has the
following settings:
+ True - AWS OpsWorks automatically associates the appropriate built-in
security group with each layer (default setting). You can associate
additional security groups with a layer after you create it but you
cannot delete the built-in security group.
+ False - AWS OpsWorks does not associate built-in security groups with
layers. You must create appropriate EC2 security groups and
associate a security group with each layer that you create.
However, you can still manually associate a built-in security group
with a layer on creation; custom security groups are required only
for those layers that need custom settings.
For more information, see `Create a New Stack`_.
"""
params = {'StackId': stack_id, }
if name is not None:
params['Name'] = name
if attributes is not None:
params['Attributes'] = attributes
if service_role_arn is not None:
params['ServiceRoleArn'] = service_role_arn
if default_instance_profile_arn is not None:
params['DefaultInstanceProfileArn'] = default_instance_profile_arn
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if chef_configuration is not None:
params['ChefConfiguration'] = chef_configuration
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
if use_opsworks_security_groups is not None:
params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups
return self.make_request(action='UpdateStack',
body=json.dumps(params))
def update_user_profile(self, iam_user_arn, ssh_username=None,
ssh_public_key=None, allow_self_management=None):
"""
Updates a specified user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user IAM ARN.
:type ssh_username: string
:param ssh_username: The user's SSH user name. The allowable characters
are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name
includes other punctuation marks, AWS OpsWorks removes them. For
example, `my.name` will be changed to `myname`. If you do not
specify an SSH user name, AWS OpsWorks generates one from the IAM
user name.
:type ssh_public_key: string
:param ssh_public_key: The user's new SSH public key.
:type allow_self_management: boolean
:param allow_self_management: Whether users can specify their own SSH
public key through the My Settings page. For more information, see
`Managing User Permissions`_.
"""
params = {'IamUserArn': iam_user_arn, }
if ssh_username is not None:
params['SshUsername'] = ssh_username
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
if allow_self_management is not None:
params['AllowSelfManagement'] = allow_self_management
return self.make_request(action='UpdateUserProfile',
body=json.dumps(params))
def update_volume(self, volume_id, name=None, mount_point=None):
"""
Updates an Amazon EBS volume's name or mount point. For more
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
:type name: string
:param name: The new name.
:type mount_point: string
:param mount_point: The new mount point.
"""
params = {'VolumeId': volume_id, }
if name is not None:
params['Name'] = name
if mount_point is not None:
params['MountPoint'] = mount_point
return self.make_request(action='UpdateVolume',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/opsworks/layer1.py
| 0.729905 | 0.396828 |
layer1.py
|
|
import boto.exception
from boto.compat import json
import requests
import boto
class SearchServiceException(Exception):
pass
class CommitMismatchError(Exception):
pass
class EncodingError(Exception):
"""
Content sent for Cloud Search indexing was incorrectly encoded.
This usually happens when a document is marked as unicode but non-unicode
characters are present.
"""
pass
class ContentTooLongError(Exception):
"""
Content sent for Cloud Search indexing was too long
This will usually happen when documents queued for indexing add up to more
than the limit allowed per upload batch (5MB)
"""
pass
class DocumentServiceConnection(object):
"""
A CloudSearch document service.
The DocumentServiceConection is used to add, remove and update documents in
CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document Format).
To generate an appropriate SDF, use :func:`add` to add or update documents,
as well as :func:`delete` to remove documents.
Once the set of documents is ready to be index, use :func:`commit` to send the
commands to CloudSearch.
If there are a lot of documents to index, it may be preferable to split the
generation of SDF data and the actual uploading into CloudSearch. Retrieve
the current SDF with :func:`get_sdf`. If this file is the uploaded into S3,
it can be retrieved back afterwards for upload into CloudSearch using
:func:`add_sdf_from_s3`.
The SDF is not cleared after a :func:`commit`. If you wish to continue
using the DocumentServiceConnection for another batch upload of commands,
you will need to :func:`clear_sdf` first to stop the previous batch of
commands from being uploaded again.
"""
def __init__(self, domain=None, endpoint=None):
self.domain = domain
self.endpoint = endpoint
if not self.endpoint:
self.endpoint = domain.doc_service_endpoint
self.documents_batch = []
self._sdf = None
def add(self, _id, version, fields, lang='en'):
"""
Add a document to be processed by the DocumentService
The document will not actually be added until :func:`commit` is called
:type _id: string
:param _id: A unique ID used to refer to this document.
:type version: int
:param version: Version of the document being indexed. If a file is
being reindexed, the version should be higher than the existing one
in CloudSearch.
:type fields: dict
:param fields: A dictionary of key-value pairs to be uploaded .
:type lang: string
:param lang: The language code the data is in. Only 'en' is currently
supported
"""
d = {'type': 'add', 'id': _id, 'version': version, 'lang': lang,
'fields': fields}
self.documents_batch.append(d)
def delete(self, _id, version):
"""
Schedule a document to be removed from the CloudSearch service
The document will not actually be scheduled for removal until :func:`commit` is called
:type _id: string
:param _id: The unique ID of this document.
:type version: int
:param version: Version of the document to remove. The delete will only
occur if this version number is higher than the version currently
in the index.
"""
d = {'type': 'delete', 'id': _id, 'version': version}
self.documents_batch.append(d)
def get_sdf(self):
"""
Generate the working set of documents in Search Data Format (SDF)
:rtype: string
:returns: JSON-formatted string of the documents in SDF
"""
return self._sdf if self._sdf else json.dumps(self.documents_batch)
def clear_sdf(self):
"""
Clear the working documents from this DocumentServiceConnection
This should be used after :func:`commit` if the connection will be reused
for another set of documents.
"""
self._sdf = None
self.documents_batch = []
def add_sdf_from_s3(self, key_obj):
"""
Load an SDF from S3
Using this method will result in documents added through
:func:`add` and :func:`delete` being ignored.
:type key_obj: :class:`boto.s3.key.Key`
:param key_obj: An S3 key which contains an SDF
"""
#@todo:: (lucas) would be nice if this could just take an s3://uri..."
self._sdf = key_obj.get_contents_as_string()
def commit(self):
"""
Actually send an SDF to CloudSearch for processing
If an SDF file has been explicitly loaded it will be used. Otherwise,
documents added through :func:`add` and :func:`delete` will be used.
:rtype: :class:`CommitResponse`
:returns: A summary of documents added and deleted
"""
sdf = self.get_sdf()
if ': null' in sdf:
boto.log.error('null value in sdf detected. This will probably raise '
'500 error.')
index = sdf.index(': null')
boto.log.error(sdf[index - 100:index + 100])
url = "http://%s/2011-02-01/documents/batch" % (self.endpoint)
# Keep-alive is automatic in a post-1.0 requests world.
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
pool_connections=20,
pool_maxsize=50,
max_retries=5
)
session.mount('http://', adapter)
session.mount('https://', adapter)
r = session.post(url, data=sdf, headers={'Content-Type': 'application/json'})
return CommitResponse(r, self, sdf)
class CommitResponse(object):
"""Wrapper for response to Cloudsearch document batch commit.
:type response: :class:`requests.models.Response`
:param response: Response from Cloudsearch /documents/batch API
:type doc_service: :class:`boto.cloudsearch.document.DocumentServiceConnection`
:param doc_service: Object containing the documents posted and methods to
retry
:raises: :class:`boto.exception.BotoServerError`
:raises: :class:`boto.cloudsearch.document.SearchServiceException`
:raises: :class:`boto.cloudsearch.document.EncodingError`
:raises: :class:`boto.cloudsearch.document.ContentTooLongError`
"""
def __init__(self, response, doc_service, sdf):
self.response = response
self.doc_service = doc_service
self.sdf = sdf
_body = response.content.decode('utf-8')
try:
self.content = json.loads(_body)
except:
boto.log.error('Error indexing documents.\nResponse Content:\n{0}\n\n'
'SDF:\n{1}'.format(_body, self.sdf))
raise boto.exception.BotoServerError(self.response.status_code, '',
body=_body)
self.status = self.content['status']
if self.status == 'error':
self.errors = [e.get('message') for e in self.content.get('errors',
[])]
for e in self.errors:
if "Illegal Unicode character" in e:
raise EncodingError("Illegal Unicode character in document")
elif e == "The Content-Length is too long":
raise ContentTooLongError("Content was too long")
if 'adds' not in self.content or 'deletes' not in self.content:
raise SearchServiceException("Error indexing documents"
" => %s" % self.content.get('message', ''))
else:
self.errors = []
self.adds = self.content['adds']
self.deletes = self.content['deletes']
self._check_num_ops('add', self.adds)
self._check_num_ops('delete', self.deletes)
def _check_num_ops(self, type_, response_num):
"""Raise exception if number of ops in response doesn't match commit
:type type_: str
:param type_: Type of commit operation: 'add' or 'delete'
:type response_num: int
:param response_num: Number of adds or deletes in the response.
:raises: :class:`boto.cloudsearch.document.CommitMismatchError`
"""
commit_num = len([d for d in self.doc_service.documents_batch
if d['type'] == type_])
if response_num != commit_num:
raise CommitMismatchError(
'Incorrect number of {0}s returned. Commit: {1} Response: {2}'\
.format(type_, commit_num, response_num))
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cloudsearch/document.py
| 0.725551 | 0.300733 |
document.py
|
|
from math import ceil
from boto.compat import json, map, six
import requests
class SearchServiceException(Exception):
pass
class CommitMismatchError(Exception):
pass
class SearchResults(object):
def __init__(self, **attrs):
self.rid = attrs['info']['rid']
# self.doc_coverage_pct = attrs['info']['doc-coverage-pct']
self.cpu_time_ms = attrs['info']['cpu-time-ms']
self.time_ms = attrs['info']['time-ms']
self.hits = attrs['hits']['found']
self.docs = attrs['hits']['hit']
self.start = attrs['hits']['start']
self.rank = attrs['rank']
self.match_expression = attrs['match-expr']
self.query = attrs['query']
self.search_service = attrs['search_service']
self.facets = {}
if 'facets' in attrs:
for (facet, values) in attrs['facets'].items():
if 'constraints' in values:
self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values['constraints']))
self.num_pages_needed = ceil(self.hits / self.query.real_size)
def __len__(self):
return len(self.docs)
def __iter__(self):
return iter(self.docs)
def next_page(self):
"""Call Cloudsearch to get the next page of search results
:rtype: :class:`boto.cloudsearch.search.SearchResults`
:return: the following page of search results
"""
if self.query.page <= self.num_pages_needed:
self.query.start += self.query.real_size
self.query.page += 1
return self.search_service(self.query)
else:
raise StopIteration
class Query(object):
RESULTS_PER_PAGE = 500
def __init__(self, q=None, bq=None, rank=None,
return_fields=None, size=10,
start=0, facet=None, facet_constraints=None,
facet_sort=None, facet_top_n=None, t=None):
self.q = q
self.bq = bq
self.rank = rank or []
self.return_fields = return_fields or []
self.start = start
self.facet = facet or []
self.facet_constraints = facet_constraints or {}
self.facet_sort = facet_sort or {}
self.facet_top_n = facet_top_n or {}
self.t = t or {}
self.page = 0
self.update_size(size)
def update_size(self, new_size):
self.size = new_size
self.real_size = Query.RESULTS_PER_PAGE if (self.size >
Query.RESULTS_PER_PAGE or self.size == 0) else self.size
def to_params(self):
"""Transform search parameters from instance properties to a dictionary
:rtype: dict
:return: search parameters
"""
params = {'start': self.start, 'size': self.real_size}
if self.q:
params['q'] = self.q
if self.bq:
params['bq'] = self.bq
if self.rank:
params['rank'] = ','.join(self.rank)
if self.return_fields:
params['return-fields'] = ','.join(self.return_fields)
if self.facet:
params['facet'] = ','.join(self.facet)
if self.facet_constraints:
for k, v in six.iteritems(self.facet_constraints):
params['facet-%s-constraints' % k] = v
if self.facet_sort:
for k, v in six.iteritems(self.facet_sort):
params['facet-%s-sort' % k] = v
if self.facet_top_n:
for k, v in six.iteritems(self.facet_top_n):
params['facet-%s-top-n' % k] = v
if self.t:
for k, v in six.iteritems(self.t):
params['t-%s' % k] = v
return params
class SearchConnection(object):
def __init__(self, domain=None, endpoint=None):
self.domain = domain
self.endpoint = endpoint
if not endpoint:
self.endpoint = domain.search_service_endpoint
def build_query(self, q=None, bq=None, rank=None, return_fields=None,
size=10, start=0, facet=None, facet_constraints=None,
facet_sort=None, facet_top_n=None, t=None):
return Query(q=q, bq=bq, rank=rank, return_fields=return_fields,
size=size, start=start, facet=facet,
facet_constraints=facet_constraints,
facet_sort=facet_sort, facet_top_n=facet_top_n, t=t)
def search(self, q=None, bq=None, rank=None, return_fields=None,
size=10, start=0, facet=None, facet_constraints=None,
facet_sort=None, facet_top_n=None, t=None):
"""
Send a query to CloudSearch
Each search query should use at least the q or bq argument to specify
the search parameter. The other options are used to specify the
criteria of the search.
:type q: string
:param q: A string to search the default search fields for.
:type bq: string
:param bq: A string to perform a Boolean search. This can be used to
create advanced searches.
:type rank: List of strings
:param rank: A list of fields or rank expressions used to order the
search results. A field can be reversed by using the - operator.
``['-year', 'author']``
:type return_fields: List of strings
:param return_fields: A list of fields which should be returned by the
search. If this field is not specified, only IDs will be returned.
``['headline']``
:type size: int
:param size: Number of search results to specify
:type start: int
:param start: Offset of the first search result to return (can be used
for paging)
:type facet: list
:param facet: List of fields for which facets should be returned
``['colour', 'size']``
:type facet_constraints: dict
:param facet_constraints: Use to limit facets to specific values
specified as comma-delimited strings in a Dictionary of facets
``{'colour': "'blue','white','red'", 'size': "big"}``
:type facet_sort: dict
:param facet_sort: Rules used to specify the order in which facet
values should be returned. Allowed values are *alpha*, *count*,
*max*, *sum*. Use *alpha* to sort alphabetical, and *count* to sort
the facet by number of available result.
``{'color': 'alpha', 'size': 'count'}``
:type facet_top_n: dict
:param facet_top_n: Dictionary of facets and number of facets to
return.
``{'colour': 2}``
:type t: dict
:param t: Specify ranges for specific fields
``{'year': '2000..2005'}``
:rtype: :class:`boto.cloudsearch.search.SearchResults`
:return: Returns the results of this search
The following examples all assume we have indexed a set of documents
with fields: *author*, *date*, *headline*
A simple search will look for documents whose default text search
fields will contain the search word exactly:
>>> search(q='Tim') # Return documents with the word Tim in them (but not Timothy)
A simple search with more keywords will return documents whose default
text search fields contain the search strings together or separately.
>>> search(q='Tim apple') # Will match "tim" and "apple"
More complex searches require the boolean search operator.
Wildcard searches can be used to search for any words that start with
the search string.
>>> search(bq="'Tim*'") # Return documents with words like Tim or Timothy)
Search terms can also be combined. Allowed operators are "and", "or",
"not", "field", "optional", "token", "phrase", or "filter"
>>> search(bq="(and 'Tim' (field author 'John Smith'))")
Facets allow you to show classification information about the search
results. For example, you can retrieve the authors who have written
about Tim:
>>> search(q='Tim', facet=['Author'])
With facet_constraints, facet_top_n and facet_sort more complicated
constraints can be specified such as returning the top author out of
John Smith and Mark Smith who have a document with the word Tim in it.
>>> search(q='Tim',
... facet=['Author'],
... facet_constraints={'author': "'John Smith','Mark Smith'"},
... facet=['author'],
... facet_top_n={'author': 1},
... facet_sort={'author': 'count'})
"""
query = self.build_query(q=q, bq=bq, rank=rank,
return_fields=return_fields,
size=size, start=start, facet=facet,
facet_constraints=facet_constraints,
facet_sort=facet_sort,
facet_top_n=facet_top_n, t=t)
return self(query)
def __call__(self, query):
"""Make a call to CloudSearch
:type query: :class:`boto.cloudsearch.search.Query`
:param query: A group of search criteria
:rtype: :class:`boto.cloudsearch.search.SearchResults`
:return: search results
"""
url = "http://%s/2011-02-01/search" % (self.endpoint)
params = query.to_params()
r = requests.get(url, params=params)
body = r.content.decode('utf-8')
try:
data = json.loads(body)
except ValueError as e:
if r.status_code == 403:
msg = ''
import re
g = re.search('<html><body><h1>403 Forbidden</h1>([^<]+)<', body)
try:
msg = ': %s' % (g.groups()[0].strip())
except AttributeError:
pass
raise SearchServiceException('Authentication error from Amazon%s' % msg)
raise SearchServiceException("Got non-json response from Amazon. %s" % body, query)
if 'messages' in data and 'error' in data:
for m in data['messages']:
if m['severity'] == 'fatal':
raise SearchServiceException("Error processing search %s "
"=> %s" % (params, m['message']), query)
elif 'error' in data:
raise SearchServiceException("Unknown error processing search %s"
% json.dumps(data), query)
data['query'] = query
data['search_service'] = self
return SearchResults(**data)
def get_all_paged(self, query, per_page):
"""Get a generator to iterate over all pages of search results
:type query: :class:`boto.cloudsearch.search.Query`
:param query: A group of search criteria
:type per_page: int
:param per_page: Number of docs in each :class:`boto.cloudsearch.search.SearchResults` object.
:rtype: generator
:return: Generator containing :class:`boto.cloudsearch.search.SearchResults`
"""
query.update_size(per_page)
page = 0
num_pages_needed = 0
while page <= num_pages_needed:
results = self(query)
num_pages_needed = results.num_pages_needed
yield results
query.start += query.real_size
page += 1
def get_all_hits(self, query):
"""Get a generator to iterate over all search results
Transparently handles the results paging from Cloudsearch
search results so even if you have many thousands of results
you can iterate over all results in a reasonably efficient
manner.
:type query: :class:`boto.cloudsearch.search.Query`
:param query: A group of search criteria
:rtype: generator
:return: All docs matching query
"""
page = 0
num_pages_needed = 0
while page <= num_pages_needed:
results = self(query)
num_pages_needed = results.num_pages_needed
for doc in results:
yield doc
query.start += query.real_size
page += 1
def get_num_hits(self, query):
"""Return the total number of hits for query
:type query: :class:`boto.cloudsearch.search.Query`
:param query: a group of search criteria
:rtype: int
:return: Total number of hits for query
"""
query.update_size(1)
return self(query).hits
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cloudsearch/search.py
| 0.821903 | 0.150528 |
search.py
|
|
import boto
from boto.compat import json
from boto.cloudsearch.optionstatus import OptionStatus
from boto.cloudsearch.optionstatus import IndexFieldStatus
from boto.cloudsearch.optionstatus import ServicePoliciesStatus
from boto.cloudsearch.optionstatus import RankExpressionStatus
from boto.cloudsearch.document import DocumentServiceConnection
from boto.cloudsearch.search import SearchConnection
def handle_bool(value):
if value in [True, 'true', 'True', 'TRUE', 1]:
return True
return False
class Domain(object):
"""
A Cloudsearch domain.
:ivar name: The name of the domain.
:ivar id: The internally generated unique identifier for the domain.
:ivar created: A boolean which is True if the domain is
created. It can take several minutes to initialize a domain
when CreateDomain is called. Newly created search domains are
returned with a False value for Created until domain creation
is complete
:ivar deleted: A boolean which is True if the search domain has
been deleted. The system must clean up resources dedicated to
the search domain when delete is called. Newly deleted
search domains are returned from list_domains with a True
value for deleted for several minutes until resource cleanup
is complete.
:ivar processing: True if processing is being done to activate the
current domain configuration.
:ivar num_searchable_docs: The number of documents that have been
submittted to the domain and indexed.
:ivar requires_index_document: True if index_documents needs to be
called to activate the current domain configuration.
:ivar search_instance_count: The number of search instances that are
available to process search requests.
:ivar search_instance_type: The instance type that is being used to
process search requests.
:ivar search_partition_count: The number of partitions across which
the search index is spread.
"""
def __init__(self, layer1, data):
self.layer1 = layer1
self.update_from_data(data)
def update_from_data(self, data):
self.created = data['created']
self.deleted = data['deleted']
self.processing = data['processing']
self.requires_index_documents = data['requires_index_documents']
self.domain_id = data['domain_id']
self.domain_name = data['domain_name']
self.num_searchable_docs = data['num_searchable_docs']
self.search_instance_count = data['search_instance_count']
self.search_instance_type = data.get('search_instance_type', None)
self.search_partition_count = data['search_partition_count']
self._doc_service = data['doc_service']
self._search_service = data['search_service']
@property
def doc_service_arn(self):
return self._doc_service['arn']
@property
def doc_service_endpoint(self):
return self._doc_service['endpoint']
@property
def search_service_arn(self):
return self._search_service['arn']
@property
def search_service_endpoint(self):
return self._search_service['endpoint']
@property
def created(self):
return self._created
@created.setter
def created(self, value):
self._created = handle_bool(value)
@property
def deleted(self):
return self._deleted
@deleted.setter
def deleted(self, value):
self._deleted = handle_bool(value)
@property
def processing(self):
return self._processing
@processing.setter
def processing(self, value):
self._processing = handle_bool(value)
@property
def requires_index_documents(self):
return self._requires_index_documents
@requires_index_documents.setter
def requires_index_documents(self, value):
self._requires_index_documents = handle_bool(value)
@property
def search_partition_count(self):
return self._search_partition_count
@search_partition_count.setter
def search_partition_count(self, value):
self._search_partition_count = int(value)
@property
def search_instance_count(self):
return self._search_instance_count
@search_instance_count.setter
def search_instance_count(self, value):
self._search_instance_count = int(value)
@property
def num_searchable_docs(self):
return self._num_searchable_docs
@num_searchable_docs.setter
def num_searchable_docs(self, value):
self._num_searchable_docs = int(value)
@property
def name(self):
return self.domain_name
@property
def id(self):
return self.domain_id
def delete(self):
"""
Delete this domain and all index data associated with it.
"""
return self.layer1.delete_domain(self.name)
def get_stemming(self):
"""
Return a :class:`boto.cloudsearch.option.OptionStatus` object
representing the currently defined stemming options for
the domain.
"""
return OptionStatus(self, None,
self.layer1.describe_stemming_options,
self.layer1.update_stemming_options)
def get_stopwords(self):
"""
Return a :class:`boto.cloudsearch.option.OptionStatus` object
representing the currently defined stopword options for
the domain.
"""
return OptionStatus(self, None,
self.layer1.describe_stopword_options,
self.layer1.update_stopword_options)
def get_synonyms(self):
"""
Return a :class:`boto.cloudsearch.option.OptionStatus` object
representing the currently defined synonym options for
the domain.
"""
return OptionStatus(self, None,
self.layer1.describe_synonym_options,
self.layer1.update_synonym_options)
def get_access_policies(self):
"""
Return a :class:`boto.cloudsearch.option.OptionStatus` object
representing the currently defined access policies for
the domain.
"""
return ServicePoliciesStatus(self, None,
self.layer1.describe_service_access_policies,
self.layer1.update_service_access_policies)
def index_documents(self):
"""
Tells the search domain to start indexing its documents using
the latest text processing options and IndexFields. This
operation must be invoked to make options whose OptionStatus
has OptioState of RequiresIndexDocuments visible in search
results.
"""
self.layer1.index_documents(self.name)
def get_index_fields(self, field_names=None):
"""
Return a list of index fields defined for this domain.
"""
data = self.layer1.describe_index_fields(self.name, field_names)
return [IndexFieldStatus(self, d) for d in data]
def create_index_field(self, field_name, field_type,
default='', facet=False, result=False, searchable=False,
source_attributes=[]):
"""
Defines an ``IndexField``, either replacing an existing
definition or creating a new one.
:type field_name: string
:param field_name: The name of a field in the search index.
:type field_type: string
:param field_type: The type of field. Valid values are
uint | literal | text
:type default: string or int
:param default: The default value for the field. If the
field is of type ``uint`` this should be an integer value.
Otherwise, it's a string.
:type facet: bool
:param facet: A boolean to indicate whether facets
are enabled for this field or not. Does not apply to
fields of type ``uint``.
:type results: bool
:param results: A boolean to indicate whether values
of this field can be returned in search results or
used in ranking. Does not apply to fields of type ``uint``.
:type searchable: bool
:param searchable: A boolean to indicate whether search
is enabled for this field or not. Applies only to fields
of type ``literal``.
:type source_attributes: list of dicts
:param source_attributes: An optional list of dicts that
provide information about attributes for this index field.
A maximum of 20 source attributes can be configured for
each index field.
Each item in the list is a dict with the following keys:
* data_copy - The value is a dict with the following keys:
* default - Optional default value if the source attribute
is not specified in a document.
* name - The name of the document source field to add
to this ``IndexField``.
* data_function - Identifies the transformation to apply
when copying data from a source attribute.
* data_map - The value is a dict with the following keys:
* cases - A dict that translates source field values
to custom values.
* default - An optional default value to use if the
source attribute is not specified in a document.
* name - the name of the document source field to add
to this ``IndexField``
* data_trim_title - Trims common title words from a source
document attribute when populating an ``IndexField``.
This can be used to create an ``IndexField`` you can
use for sorting. The value is a dict with the following
fields:
* default - An optional default value.
* language - an IETF RFC 4646 language code.
* separator - The separator that follows the text to trim.
* name - The name of the document source field to add.
:raises: BaseException, InternalException, LimitExceededException,
InvalidTypeException, ResourceNotFoundException
"""
data = self.layer1.define_index_field(self.name, field_name,
field_type, default=default,
facet=facet, result=result,
searchable=searchable,
source_attributes=source_attributes)
return IndexFieldStatus(self, data,
self.layer1.describe_index_fields)
def get_rank_expressions(self, rank_names=None):
"""
Return a list of rank expressions defined for this domain.
"""
fn = self.layer1.describe_rank_expressions
data = fn(self.name, rank_names)
return [RankExpressionStatus(self, d, fn) for d in data]
def create_rank_expression(self, name, expression):
"""
Create a new rank expression.
:type rank_name: string
:param rank_name: The name of an expression computed for ranking
while processing a search request.
:type rank_expression: string
:param rank_expression: The expression to evaluate for ranking
or thresholding while processing a search request. The
RankExpression syntax is based on JavaScript expressions
and supports:
* Integer, floating point, hex and octal literals
* Shortcut evaluation of logical operators such that an
expression a || b evaluates to the value a if a is
true without evaluting b at all
* JavaScript order of precedence for operators
* Arithmetic operators: + - * / %
* Boolean operators (including the ternary operator)
* Bitwise operators
* Comparison operators
* Common mathematic functions: abs ceil erf exp floor
lgamma ln log2 log10 max min sqrt pow
* Trigonometric library functions: acosh acos asinh asin
atanh atan cosh cos sinh sin tanh tan
* Random generation of a number between 0 and 1: rand
* Current time in epoch: time
* The min max functions that operate on a variable argument list
Intermediate results are calculated as double precision
floating point values. The final return value of a
RankExpression is automatically converted from floating
point to a 32-bit unsigned integer by rounding to the
nearest integer, with a natural floor of 0 and a ceiling
of max(uint32_t), 4294967295. Mathematical errors such as
dividing by 0 will fail during evaluation and return a
value of 0.
The source data for a RankExpression can be the name of an
IndexField of type uint, another RankExpression or the
reserved name text_relevance. The text_relevance source is
defined to return an integer from 0 to 1000 (inclusive) to
indicate how relevant a document is to the search request,
taking into account repetition of search terms in the
document and proximity of search terms to each other in
each matching IndexField in the document.
For more information about using rank expressions to
customize ranking, see the Amazon CloudSearch Developer
Guide.
:raises: BaseException, InternalException, LimitExceededException,
InvalidTypeException, ResourceNotFoundException
"""
data = self.layer1.define_rank_expression(self.name, name, expression)
return RankExpressionStatus(self, data,
self.layer1.describe_rank_expressions)
def get_document_service(self):
return DocumentServiceConnection(domain=self)
def get_search_service(self):
return SearchConnection(domain=self)
def __repr__(self):
return '<Domain: %s>' % self.domain_name
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cloudsearch/domain.py
| 0.705379 | 0.316396 |
domain.py
|
|
import time
from boto.compat import json
class OptionStatus(dict):
"""
Presents a combination of status field (defined below) which are
accessed as attributes and option values which are stored in the
native Python dictionary. In this class, the option values are
merged from a JSON object that is stored as the Option part of
the object.
:ivar domain_name: The name of the domain this option is associated with.
:ivar create_date: A timestamp for when this option was created.
:ivar state: The state of processing a change to an option.
Possible values:
* RequiresIndexDocuments: the option's latest value will not
be visible in searches until IndexDocuments has been called
and indexing is complete.
* Processing: the option's latest value is not yet visible in
all searches but is in the process of being activated.
* Active: the option's latest value is completely visible.
:ivar update_date: A timestamp for when this option was updated.
:ivar update_version: A unique integer that indicates when this
option was last updated.
"""
def __init__(self, domain, data=None, refresh_fn=None, save_fn=None):
self.domain = domain
self.refresh_fn = refresh_fn
self.save_fn = save_fn
self.refresh(data)
def _update_status(self, status):
self.creation_date = status['creation_date']
self.status = status['state']
self.update_date = status['update_date']
self.update_version = int(status['update_version'])
def _update_options(self, options):
if options:
self.update(json.loads(options))
def refresh(self, data=None):
"""
Refresh the local state of the object. You can either pass
new state data in as the parameter ``data`` or, if that parameter
is omitted, the state data will be retrieved from CloudSearch.
"""
if not data:
if self.refresh_fn:
data = self.refresh_fn(self.domain.name)
if data:
self._update_status(data['status'])
self._update_options(data['options'])
def to_json(self):
"""
Return the JSON representation of the options as a string.
"""
return json.dumps(self)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'CreationDate':
self.created = value
elif name == 'State':
self.state = value
elif name == 'UpdateDate':
self.updated = value
elif name == 'UpdateVersion':
self.update_version = int(value)
elif name == 'Options':
self.update_from_json_doc(value)
else:
setattr(self, name, value)
def save(self):
"""
Write the current state of the local object back to the
CloudSearch service.
"""
if self.save_fn:
data = self.save_fn(self.domain.name, self.to_json())
self.refresh(data)
def wait_for_state(self, state):
"""
Performs polling of CloudSearch to wait for the ``state``
of this object to change to the provided state.
"""
while self.state != state:
time.sleep(5)
self.refresh()
class IndexFieldStatus(OptionStatus):
def _update_options(self, options):
self.update(options)
def save(self):
pass
class RankExpressionStatus(IndexFieldStatus):
pass
class ServicePoliciesStatus(OptionStatus):
def new_statement(self, arn, ip):
"""
Returns a new policy statement that will allow
access to the service described by ``arn`` by the
ip specified in ``ip``.
:type arn: string
:param arn: The Amazon Resource Notation identifier for the
service you wish to provide access to. This would be
either the search service or the document service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
return {
"Effect":"Allow",
"Action":"*", # Docs say use GET, but denies unless *
"Resource": arn,
"Condition": {
"IpAddress": {
"aws:SourceIp": [ip]
}
}
}
def _allow_ip(self, arn, ip):
if 'Statement' not in self:
s = self.new_statement(arn, ip)
self['Statement'] = [s]
self.save()
else:
add_statement = True
for statement in self['Statement']:
if statement['Resource'] == arn:
for condition_name in statement['Condition']:
if condition_name == 'IpAddress':
add_statement = False
condition = statement['Condition'][condition_name]
if ip not in condition['aws:SourceIp']:
condition['aws:SourceIp'].append(ip)
if add_statement:
s = self.new_statement(arn, ip)
self['Statement'].append(s)
self.save()
def allow_search_ip(self, ip):
"""
Add the provided ip address or CIDR block to the list of
allowable address for the search service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.search_service_arn
self._allow_ip(arn, ip)
def allow_doc_ip(self, ip):
"""
Add the provided ip address or CIDR block to the list of
allowable address for the document service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.doc_service_arn
self._allow_ip(arn, ip)
def _disallow_ip(self, arn, ip):
if 'Statement' not in self:
return
need_update = False
for statement in self['Statement']:
if statement['Resource'] == arn:
for condition_name in statement['Condition']:
if condition_name == 'IpAddress':
condition = statement['Condition'][condition_name]
if ip in condition['aws:SourceIp']:
condition['aws:SourceIp'].remove(ip)
need_update = True
if need_update:
self.save()
def disallow_search_ip(self, ip):
"""
Remove the provided ip address or CIDR block from the list of
allowable address for the search service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.search_service_arn
self._disallow_ip(arn, ip)
def disallow_doc_ip(self, ip):
"""
Remove the provided ip address or CIDR block from the list of
allowable address for the document service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.doc_service_arn
self._disallow_ip(arn, ip)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cloudsearch/optionstatus.py
| 0.727975 | 0.354517 |
optionstatus.py
|
|
import hashlib
from boto.glacier.utils import chunk_hashes, tree_hash, bytes_to_hex
# This import is provided for backwards compatibility. This function is
# now in boto.glacier.utils, but any existing code can still import
# this directly from this module.
from boto.glacier.utils import compute_hashes_from_fileobj
_ONE_MEGABYTE = 1024 * 1024
class _Partitioner(object):
"""Convert variable-size writes into part-sized writes
Call write(data) with variable sized data as needed to write all data. Call
flush() after all data is written.
This instance will call send_fn(part_data) as needed in part_size pieces,
except for the final part which may be shorter than part_size. Make sure to
call flush() to ensure that a short final part results in a final send_fn
call.
"""
def __init__(self, part_size, send_fn):
self.part_size = part_size
self.send_fn = send_fn
self._buffer = []
self._buffer_size = 0
def write(self, data):
if data == b'':
return
self._buffer.append(data)
self._buffer_size += len(data)
while self._buffer_size > self.part_size:
self._send_part()
def _send_part(self):
data = b''.join(self._buffer)
# Put back any data remaining over the part size into the
# buffer
if len(data) > self.part_size:
self._buffer = [data[self.part_size:]]
self._buffer_size = len(self._buffer[0])
else:
self._buffer = []
self._buffer_size = 0
# The part we will send
part = data[:self.part_size]
self.send_fn(part)
def flush(self):
if self._buffer_size > 0:
self._send_part()
class _Uploader(object):
"""Upload to a Glacier upload_id.
Call upload_part for each part (in any order) and then close to complete
the upload.
"""
def __init__(self, vault, upload_id, part_size, chunk_size=_ONE_MEGABYTE):
self.vault = vault
self.upload_id = upload_id
self.part_size = part_size
self.chunk_size = chunk_size
self.archive_id = None
self._uploaded_size = 0
self._tree_hashes = []
self.closed = False
def _insert_tree_hash(self, index, raw_tree_hash):
list_length = len(self._tree_hashes)
if index >= list_length:
self._tree_hashes.extend([None] * (list_length - index + 1))
self._tree_hashes[index] = raw_tree_hash
def upload_part(self, part_index, part_data):
"""Upload a part to Glacier.
:param part_index: part number where 0 is the first part
:param part_data: data to upload corresponding to this part
"""
if self.closed:
raise ValueError("I/O operation on closed file")
# Create a request and sign it
part_tree_hash = tree_hash(chunk_hashes(part_data, self.chunk_size))
self._insert_tree_hash(part_index, part_tree_hash)
hex_tree_hash = bytes_to_hex(part_tree_hash)
linear_hash = hashlib.sha256(part_data).hexdigest()
start = self.part_size * part_index
content_range = (start,
(start + len(part_data)) - 1)
response = self.vault.layer1.upload_part(self.vault.name,
self.upload_id,
linear_hash,
hex_tree_hash,
content_range, part_data)
response.read()
self._uploaded_size += len(part_data)
def skip_part(self, part_index, part_tree_hash, part_length):
"""Skip uploading of a part.
The final close call needs to calculate the tree hash and total size
of all uploaded data, so this is the mechanism for resume
functionality to provide it without actually uploading the data again.
:param part_index: part number where 0 is the first part
:param part_tree_hash: binary tree_hash of part being skipped
:param part_length: length of part being skipped
"""
if self.closed:
raise ValueError("I/O operation on closed file")
self._insert_tree_hash(part_index, part_tree_hash)
self._uploaded_size += part_length
def close(self):
if self.closed:
return
if None in self._tree_hashes:
raise RuntimeError("Some parts were not uploaded.")
# Complete the multiplart glacier upload
hex_tree_hash = bytes_to_hex(tree_hash(self._tree_hashes))
response = self.vault.layer1.complete_multipart_upload(
self.vault.name, self.upload_id, hex_tree_hash,
self._uploaded_size)
self.archive_id = response['ArchiveId']
self.closed = True
def generate_parts_from_fobj(fobj, part_size):
data = fobj.read(part_size)
while data:
yield data.encode('utf-8')
data = fobj.read(part_size)
def resume_file_upload(vault, upload_id, part_size, fobj, part_hash_map,
chunk_size=_ONE_MEGABYTE):
"""Resume upload of a file already part-uploaded to Glacier.
The resumption of an upload where the part-uploaded section is empty is a
valid degenerate case that this function can handle. In this case,
part_hash_map should be an empty dict.
:param vault: boto.glacier.vault.Vault object.
:param upload_id: existing Glacier upload id of upload being resumed.
:param part_size: part size of existing upload.
:param fobj: file object containing local data to resume. This must read
from the start of the entire upload, not just from the point being
resumed. Use fobj.seek(0) to achieve this if necessary.
:param part_hash_map: {part_index: part_tree_hash, ...} of data already
uploaded. Each supplied part_tree_hash will be verified and the part
re-uploaded if there is a mismatch.
:param chunk_size: chunk size of tree hash calculation. This must be
1 MiB for Amazon.
"""
uploader = _Uploader(vault, upload_id, part_size, chunk_size)
for part_index, part_data in enumerate(
generate_parts_from_fobj(fobj, part_size)):
part_tree_hash = tree_hash(chunk_hashes(part_data, chunk_size))
if (part_index not in part_hash_map or
part_hash_map[part_index] != part_tree_hash):
uploader.upload_part(part_index, part_data)
else:
uploader.skip_part(part_index, part_tree_hash, len(part_data))
uploader.close()
return uploader.archive_id
class Writer(object):
"""
Presents a file-like object for writing to a Amazon Glacier
Archive. The data is written using the multi-part upload API.
"""
def __init__(self, vault, upload_id, part_size, chunk_size=_ONE_MEGABYTE):
self.uploader = _Uploader(vault, upload_id, part_size, chunk_size)
self.partitioner = _Partitioner(part_size, self._upload_part)
self.closed = False
self.next_part_index = 0
def write(self, data):
if self.closed:
raise ValueError("I/O operation on closed file")
self.partitioner.write(data)
def _upload_part(self, part_data):
self.uploader.upload_part(self.next_part_index, part_data)
self.next_part_index += 1
def close(self):
if self.closed:
return
self.partitioner.flush()
self.uploader.close()
self.closed = True
def get_archive_id(self):
self.close()
return self.uploader.archive_id
@property
def current_tree_hash(self):
"""
Returns the current tree hash for the data that's been written
**so far**.
Only once the writing is complete is the final tree hash returned.
"""
return tree_hash(self.uploader._tree_hashes)
@property
def current_uploaded_size(self):
"""
Returns the current uploaded size for the data that's been written
**so far**.
Only once the writing is complete is the final uploaded size returned.
"""
return self.uploader._uploaded_size
@property
def upload_id(self):
return self.uploader.upload_id
@property
def vault(self):
return self.uploader.vault
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/glacier/writer.py
| 0.632162 | 0.30004 |
writer.py
|
|
import hashlib
import math
import binascii
from boto.compat import six
_MEGABYTE = 1024 * 1024
DEFAULT_PART_SIZE = 4 * _MEGABYTE
MAXIMUM_NUMBER_OF_PARTS = 10000
def minimum_part_size(size_in_bytes, default_part_size=DEFAULT_PART_SIZE):
"""Calculate the minimum part size needed for a multipart upload.
Glacier allows a maximum of 10,000 parts per upload. It also
states that the maximum archive size is 10,000 * 4 GB, which means
the part size can range from 1MB to 4GB (provided it is one 1MB
multiplied by a power of 2).
This function will compute what the minimum part size must be in
order to upload a file of size ``size_in_bytes``.
It will first check if ``default_part_size`` is sufficient for
a part size given the ``size_in_bytes``. If this is not the case,
then the smallest part size than can accomodate a file of size
``size_in_bytes`` will be returned.
If the file size is greater than the maximum allowed archive
size of 10,000 * 4GB, a ``ValueError`` will be raised.
"""
# The default part size (4 MB) will be too small for a very large
# archive, as there is a limit of 10,000 parts in a multipart upload.
# This puts the maximum allowed archive size with the default part size
# at 40,000 MB. We need to do a sanity check on the part size, and find
# one that works if the default is too small.
part_size = _MEGABYTE
if (default_part_size * MAXIMUM_NUMBER_OF_PARTS) < size_in_bytes:
if size_in_bytes > (4096 * _MEGABYTE * 10000):
raise ValueError("File size too large: %s" % size_in_bytes)
min_part_size = size_in_bytes / 10000
power = 3
while part_size < min_part_size:
part_size = math.ldexp(_MEGABYTE, power)
power += 1
part_size = int(part_size)
else:
part_size = default_part_size
return part_size
def chunk_hashes(bytestring, chunk_size=_MEGABYTE):
chunk_count = int(math.ceil(len(bytestring) / float(chunk_size)))
hashes = []
for i in range(chunk_count):
start = i * chunk_size
end = (i + 1) * chunk_size
hashes.append(hashlib.sha256(bytestring[start:end]).digest())
if not hashes:
return [hashlib.sha256(b'').digest()]
return hashes
def tree_hash(fo):
"""
Given a hash of each 1MB chunk (from chunk_hashes) this will hash
together adjacent hashes until it ends up with one big one. So a
tree of hashes.
"""
hashes = []
hashes.extend(fo)
while len(hashes) > 1:
new_hashes = []
while True:
if len(hashes) > 1:
first = hashes.pop(0)
second = hashes.pop(0)
new_hashes.append(hashlib.sha256(first + second).digest())
elif len(hashes) == 1:
only = hashes.pop(0)
new_hashes.append(only)
else:
break
hashes.extend(new_hashes)
return hashes[0]
def compute_hashes_from_fileobj(fileobj, chunk_size=1024 * 1024):
"""Compute the linear and tree hash from a fileobj.
This function will compute the linear/tree hash of a fileobj
in a single pass through the fileobj.
:param fileobj: A file like object.
:param chunk_size: The size of the chunks to use for the tree
hash. This is also the buffer size used to read from
`fileobj`.
:rtype: tuple
:return: A tuple of (linear_hash, tree_hash). Both hashes
are returned in hex.
"""
# Python 3+, not binary
if six.PY3 and hasattr(fileobj, 'mode') and 'b' not in fileobj.mode:
raise ValueError('File-like object must be opened in binary mode!')
linear_hash = hashlib.sha256()
chunks = []
chunk = fileobj.read(chunk_size)
while chunk:
# It's possible to get a file-like object that has no mode (checked
# above) and returns something other than bytes (e.g. str). So here
# we try to catch that and encode to bytes.
if not isinstance(chunk, bytes):
chunk = chunk.encode(getattr(fileobj, 'encoding', '') or 'utf-8')
linear_hash.update(chunk)
chunks.append(hashlib.sha256(chunk).digest())
chunk = fileobj.read(chunk_size)
if not chunks:
chunks = [hashlib.sha256(b'').digest()]
return linear_hash.hexdigest(), bytes_to_hex(tree_hash(chunks))
def bytes_to_hex(str_as_bytes):
return binascii.hexlify(str_as_bytes)
def tree_hash_from_str(str_as_bytes):
"""
:type str_as_bytes: str
:param str_as_bytes: The string for which to compute the tree hash.
:rtype: str
:return: The computed tree hash, returned as hex.
"""
return bytes_to_hex(tree_hash(chunk_hashes(str_as_bytes)))
class ResettingFileSender(object):
def __init__(self, archive):
self._archive = archive
self._starting_offset = archive.tell()
def __call__(self, connection, method, path, body, headers):
try:
connection.request(method, path, self._archive, headers)
return connection.getresponse()
finally:
self._archive.seek(self._starting_offset)
| true |
pypi
| null |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/glacier/utils.py
| 0.705886 | 0.481454 |
utils.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.