index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
721,914 |
apprise.apprise
|
instantiate
|
Returns the instance of a instantiated plugin based on the provided
Server URL. If the url fails to be parsed, then None is returned.
The specified url can be either a string (the URL itself) or a
dictionary containing all of the components needed to istantiate
the notification service. If identifying a dictionary, at the bare
minimum, one must specify the schema.
An example of a url dictionary object might look like:
{
schema: 'mailto',
host: 'google.com',
user: 'myuser',
password: 'mypassword',
}
Alternatively the string is much easier to specify:
mailto://user:mypassword@google.com
The dictionary works well for people who are calling details() to
extract the components they need to build the URL manually.
|
@staticmethod
def instantiate(url, asset=None, tag=None, suppress_exceptions=True):
"""
Returns the instance of a instantiated plugin based on the provided
Server URL. If the url fails to be parsed, then None is returned.
The specified url can be either a string (the URL itself) or a
dictionary containing all of the components needed to istantiate
the notification service. If identifying a dictionary, at the bare
minimum, one must specify the schema.
An example of a url dictionary object might look like:
{
schema: 'mailto',
host: 'google.com',
user: 'myuser',
password: 'mypassword',
}
Alternatively the string is much easier to specify:
mailto://user:mypassword@google.com
The dictionary works well for people who are calling details() to
extract the components they need to build the URL manually.
"""
# Initialize our result set
results = None
# Prepare our Asset Object
asset = asset if isinstance(asset, AppriseAsset) else AppriseAsset()
if isinstance(url, str):
# Acquire our url tokens
results = plugins.url_to_dict(
url, secure_logging=asset.secure_logging)
if results is None:
# Failed to parse the server URL; detailed logging handled
# inside url_to_dict - nothing to report here.
return None
elif isinstance(url, dict):
# We already have our result set
results = url
if results.get('schema') not in N_MGR:
# schema is a mandatory dictionary item as it is the only way
# we can index into our loaded plugins
logger.error('Dictionary does not include a "schema" entry.')
logger.trace(
'Invalid dictionary unpacked as:{}{}'.format(
os.linesep, os.linesep.join(
['{}="{}"'.format(k, v)
for k, v in results.items()])))
return None
logger.trace(
'Dictionary unpacked as:{}{}'.format(
os.linesep, os.linesep.join(
['{}="{}"'.format(k, v) for k, v in results.items()])))
# Otherwise we handle the invalid input specified
else:
logger.error(
'An invalid URL type (%s) was specified for instantiation',
type(url))
return None
if not N_MGR[results['schema']].enabled:
#
# First Plugin Enable Check (Pre Initialization)
#
# Plugin has been disabled at a global level
logger.error(
'%s:// is disabled on this system.', results['schema'])
return None
# Build a list of tags to associate with the newly added notifications
results['tag'] = set(parse_list(tag))
# Set our Asset Object
results['asset'] = asset
if suppress_exceptions:
try:
# Attempt to create an instance of our plugin using the parsed
# URL information
plugin = N_MGR[results['schema']](**results)
# Create log entry of loaded URL
logger.debug(
'Loaded {} URL: {}'.format(
N_MGR[results['schema']].service_name,
plugin.url(privacy=asset.secure_logging)))
except Exception:
# CWE-312 (Secure Logging) Handling
loggable_url = url if not asset.secure_logging \
else cwe312_url(url)
# the arguments are invalid or can not be used.
logger.error(
'Could not load {} URL: {}'.format(
N_MGR[results['schema']].service_name,
loggable_url))
return None
else:
# Attempt to create an instance of our plugin using the parsed
# URL information but don't wrap it in a try catch
plugin = N_MGR[results['schema']](**results)
if not plugin.enabled:
#
# Second Plugin Enable Check (Post Initialization)
#
# Service/Plugin is disabled (on a more local level). This is a
# case where the plugin was initially enabled but then after the
# __init__() was called under the hood something pre-determined
# that it could no longer be used.
# The only downside to doing it this way is services are
# initialized prior to returning the details() if 3rd party tools
# are polling what is available. These services that become
# disabled thereafter are shown initially that they can be used.
logger.error(
'%s:// has become disabled on this system.', results['schema'])
return None
return plugin
|
(url, asset=None, tag=None, suppress_exceptions=True)
|
721,915 |
apprise.apprise
|
notify
|
Send a notification to all the plugins previously loaded.
If the body_format specified is NotifyFormat.MARKDOWN, it will
be converted to HTML if the Notification type expects this.
if the tag is specified (either a string or a set/list/tuple
of strings), then only the notifications flagged with that
tagged value are notified. By default, all added services
are notified (tag=MATCH_ALL_TAG)
This function returns True if all notifications were successfully
sent, False if even just one of them fails, and None if no
notifications were sent at all as a result of tag filtering and/or
simply having empty configuration files that were read.
Attach can contain a list of attachment URLs. attach can also be
represented by an AttachBase() (or list of) object(s). This
identifies the products you wish to notify
Set interpret_escapes to True if you want to pre-escape a string
such as turning a
into an actual new line, etc.
|
def notify(self, body, title='', notify_type=common.NotifyType.INFO,
body_format=None, tag=common.MATCH_ALL_TAG, match_always=True,
attach=None, interpret_escapes=None):
"""
Send a notification to all the plugins previously loaded.
If the body_format specified is NotifyFormat.MARKDOWN, it will
be converted to HTML if the Notification type expects this.
if the tag is specified (either a string or a set/list/tuple
of strings), then only the notifications flagged with that
tagged value are notified. By default, all added services
are notified (tag=MATCH_ALL_TAG)
This function returns True if all notifications were successfully
sent, False if even just one of them fails, and None if no
notifications were sent at all as a result of tag filtering and/or
simply having empty configuration files that were read.
Attach can contain a list of attachment URLs. attach can also be
represented by an AttachBase() (or list of) object(s). This
identifies the products you wish to notify
Set interpret_escapes to True if you want to pre-escape a string
such as turning a \n into an actual new line, etc.
"""
try:
# Process arguments and build synchronous and asynchronous calls
# (this step can throw internal errors).
sequential_calls, parallel_calls = self._create_notify_calls(
body, title,
notify_type=notify_type, body_format=body_format,
tag=tag, match_always=match_always, attach=attach,
interpret_escapes=interpret_escapes,
)
except TypeError:
# No notifications sent, and there was an internal error.
return False
if not sequential_calls and not parallel_calls:
# Nothing to send
return None
sequential_result = Apprise._notify_sequential(*sequential_calls)
parallel_result = Apprise._notify_parallel_threadpool(*parallel_calls)
return sequential_result and parallel_result
|
(self, body, title='', notify_type='info', body_format=None, tag='all', match_always=True, attach=None, interpret_escapes=None)
|
721,916 |
apprise.apprise
|
pop
|
Removes an indexed Notification Service from the stack and returns it.
The thing is we can never pop AppriseConfig() entries, only what was
loaded within them. So pop needs to carefully iterate over our list
and only track actual entries.
|
def pop(self, index):
"""
Removes an indexed Notification Service from the stack and returns it.
The thing is we can never pop AppriseConfig() entries, only what was
loaded within them. So pop needs to carefully iterate over our list
and only track actual entries.
"""
# Tracking variables
prev_offset = -1
offset = prev_offset
for idx, s in enumerate(self.servers):
if isinstance(s, (ConfigBase, AppriseConfig)):
servers = s.servers()
if len(servers) > 0:
# Acquire a new maximum offset to work with
offset = prev_offset + len(servers)
if offset >= index:
# we can pop an element from our config stack
fn = s.pop if isinstance(s, ConfigBase) \
else s.server_pop
return fn(index if prev_offset == -1
else (index - prev_offset - 1))
else:
offset = prev_offset + 1
if offset == index:
return self.servers.pop(idx)
# Update our old offset
prev_offset = offset
# If we reach here, then we indexed out of range
raise IndexError('list index out of range')
|
(self, index)
|
721,917 |
apprise.apprise
|
urls
|
Returns all of the loaded URLs defined in this apprise object.
|
def urls(self, privacy=False):
"""
Returns all of the loaded URLs defined in this apprise object.
"""
return [x.url(privacy=privacy) for x in self.servers]
|
(self, privacy=False)
|
721,918 |
apprise.asset
|
AppriseAsset
|
Provides a supplimentary class that can be used to provide extra
information and details that can be used by Apprise such as providing
an alternate location to where images/icons can be found and the
URL masks.
Any variable that starts with an underscore (_) can only be initialized
by this class manually and will/can not be parsed from a configuration
file.
|
class AppriseAsset:
"""
Provides a supplimentary class that can be used to provide extra
information and details that can be used by Apprise such as providing
an alternate location to where images/icons can be found and the
URL masks.
Any variable that starts with an underscore (_) can only be initialized
by this class manually and will/can not be parsed from a configuration
file.
"""
# Application Identifier
app_id = 'Apprise'
# Application Description
app_desc = 'Apprise Notifications'
# Provider URL
app_url = 'https://github.com/caronc/apprise'
# A Simple Mapping of Colors; For every NOTIFY_TYPE identified,
# there should be a mapping to it's color here:
html_notify_map = {
NotifyType.INFO: '#3AA3E3',
NotifyType.SUCCESS: '#3AA337',
NotifyType.FAILURE: '#A32037',
NotifyType.WARNING: '#CACF29',
}
# Ascii Notification
ascii_notify_map = {
NotifyType.INFO: '[i]',
NotifyType.SUCCESS: '[+]',
NotifyType.FAILURE: '[!]',
NotifyType.WARNING: '[~]',
}
# The default color to return if a mapping isn't found in our table above
default_html_color = '#888888'
# The default image extension to use
default_extension = '.png'
# The default theme
theme = 'default'
# Image URL Mask
image_url_mask = \
'https://github.com/caronc/apprise/raw/master/apprise/assets/' \
'themes/{THEME}/apprise-{TYPE}-{XY}{EXTENSION}'
# Application Logo
image_url_logo = \
'https://github.com/caronc/apprise/raw/master/apprise/assets/' \
'themes/{THEME}/apprise-logo.png'
# Image Path Mask
image_path_mask = abspath(join(
dirname(__file__),
'assets',
'themes',
'{THEME}',
'apprise-{TYPE}-{XY}{EXTENSION}',
))
# This value can also be set on calls to Apprise.notify(). This allows
# you to let Apprise upfront the type of data being passed in. This
# must be of type NotifyFormat. Possible values could be:
# - NotifyFormat.TEXT
# - NotifyFormat.MARKDOWN
# - NotifyFormat.HTML
# - None
#
# If no format is specified (hence None), then no special pre-formatting
# actions will take place during a notification. This has been and always
# will be the default.
body_format = None
# Always attempt to send notifications asynchronous (as the same time
# if possible)
# This is a Python 3 supported option only. If set to False, then
# notifications are sent sequentially (one after another)
async_mode = True
# Support :smile:, and other alike keywords swapping them for their
# unicode value. A value of None leaves the interpretation up to the
# end user to control (allowing them to specify emojis=yes on the
# URL)
interpret_emojis = None
# Whether or not to interpret escapes found within the input text prior
# to passing it upstream. Such as converting \t to an actual tab and \n
# to a new line.
interpret_escapes = False
# Defines the encoding of the content passed into Apprise
encoding = 'utf-8'
# For more detail see CWE-312 @
# https://cwe.mitre.org/data/definitions/312.html
#
# By enabling this, the logging output has additional overhead applied to
# it preventing secure password and secret information from being
# displayed in the logging. Since there is overhead involved in performing
# this cleanup; system owners who run in a very isolated environment may
# choose to disable this for a slight performance bump. It is recommended
# that you leave this option as is otherwise.
secure_logging = True
# Optionally specify one or more path to attempt to scan for Python modules
# By default, no paths are scanned.
__plugin_paths = []
# All internal/system flags are prefixed with an underscore (_)
# These can only be initialized using Python libraries and are not picked
# up from (yaml) configuration files (if set)
# An internal counter that is used by AppriseAPI
# (https://github.com/caronc/apprise-api). The idea is to allow one
# instance of AppriseAPI to call another, but to track how many times
# this occurs. It's intent is to prevent a loop where an AppriseAPI
# Server calls itself (or loops indefinitely)
_recursion = 0
# A unique identifer we can use to associate our calling source
_uid = str(uuid4())
def __init__(self, plugin_paths=None, **kwargs):
"""
Asset Initialization
"""
# Assign default arguments if specified
for key, value in kwargs.items():
if not hasattr(AppriseAsset, key):
raise AttributeError(
'AppriseAsset init(): '
'An invalid key {} was specified.'.format(key))
setattr(self, key, value)
if plugin_paths:
# Load any decorated modules if defined
N_MGR.module_detection(plugin_paths)
def color(self, notify_type, color_type=None):
"""
Returns an HTML mapped color based on passed in notify type
if color_type is:
None then a standard hex string is returned as
a string format ('#000000').
int then the integer representation is returned
tuple then the the red, green, blue is returned in a tuple
"""
# Attempt to get the type, otherwise return a default grey
# if we couldn't look up the entry
color = self.html_notify_map.get(notify_type, self.default_html_color)
if color_type is None:
# This is the default return type
return color
elif color_type is int:
# Convert the color to integer
return AppriseAsset.hex_to_int(color)
# The only other type is tuple
elif color_type is tuple:
return AppriseAsset.hex_to_rgb(color)
# Unsupported type
raise ValueError(
'AppriseAsset html_color(): An invalid color_type was specified.')
def ascii(self, notify_type):
"""
Returns an ascii representation based on passed in notify type
"""
# look our response up
return self.ascii_notify_map.get(notify_type, self.default_html_color)
def image_url(self, notify_type, image_size, logo=False, extension=None):
"""
Apply our mask to our image URL
if logo is set to True, then the logo_url is used instead
"""
url_mask = self.image_url_logo if logo else self.image_url_mask
if not url_mask:
# No image to return
return None
if extension is None:
extension = self.default_extension
re_map = {
'{THEME}': self.theme if self.theme else '',
'{TYPE}': notify_type,
'{XY}': image_size,
'{EXTENSION}': extension,
}
# Iterate over above list and store content accordingly
re_table = re.compile(
r'(' + '|'.join(re_map.keys()) + r')',
re.IGNORECASE,
)
return re_table.sub(lambda x: re_map[x.group()], url_mask)
def image_path(self, notify_type, image_size, must_exist=True,
extension=None):
"""
Apply our mask to our image file path
"""
if not self.image_path_mask:
# No image to return
return None
if extension is None:
extension = self.default_extension
re_map = {
'{THEME}': self.theme if self.theme else '',
'{TYPE}': notify_type,
'{XY}': image_size,
'{EXTENSION}': extension,
}
# Iterate over above list and store content accordingly
re_table = re.compile(
r'(' + '|'.join(re_map.keys()) + r')',
re.IGNORECASE,
)
# Acquire our path
path = re_table.sub(lambda x: re_map[x.group()], self.image_path_mask)
if must_exist and not isfile(path):
return None
# Return what we parsed
return path
def image_raw(self, notify_type, image_size, extension=None):
"""
Returns the raw image if it can (otherwise the function returns None)
"""
path = self.image_path(
notify_type=notify_type,
image_size=image_size,
extension=extension,
)
if path:
try:
with open(path, 'rb') as fd:
return fd.read()
except (OSError, IOError):
# We can't access the file
return None
return None
def details(self):
"""
Returns the details associated with the AppriseAsset object
"""
return {
'app_id': self.app_id,
'app_desc': self.app_desc,
'default_extension': self.default_extension,
'theme': self.theme,
'image_path_mask': self.image_path_mask,
'image_url_mask': self.image_url_mask,
'image_url_logo': self.image_url_logo,
}
@staticmethod
def hex_to_rgb(value):
"""
Takes a hex string (such as #00ff00) and returns a tuple in the form
of (red, green, blue)
eg: #00ff00 becomes : (0, 65535, 0)
"""
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16)
for i in range(0, lv, lv // 3))
@staticmethod
def hex_to_int(value):
"""
Takes a hex string (such as #00ff00) and returns its integer
equivalent
eg: #00000f becomes : 15
"""
return int(value.lstrip('#'), 16)
|
(plugin_paths=None, **kwargs)
|
721,919 |
apprise.asset
|
__init__
|
Asset Initialization
|
def __init__(self, plugin_paths=None, **kwargs):
"""
Asset Initialization
"""
# Assign default arguments if specified
for key, value in kwargs.items():
if not hasattr(AppriseAsset, key):
raise AttributeError(
'AppriseAsset init(): '
'An invalid key {} was specified.'.format(key))
setattr(self, key, value)
if plugin_paths:
# Load any decorated modules if defined
N_MGR.module_detection(plugin_paths)
|
(self, plugin_paths=None, **kwargs)
|
721,920 |
apprise.asset
|
ascii
|
Returns an ascii representation based on passed in notify type
|
def ascii(self, notify_type):
"""
Returns an ascii representation based on passed in notify type
"""
# look our response up
return self.ascii_notify_map.get(notify_type, self.default_html_color)
|
(self, notify_type)
|
721,921 |
apprise.asset
|
color
|
Returns an HTML mapped color based on passed in notify type
if color_type is:
None then a standard hex string is returned as
a string format ('#000000').
int then the integer representation is returned
tuple then the the red, green, blue is returned in a tuple
|
def color(self, notify_type, color_type=None):
"""
Returns an HTML mapped color based on passed in notify type
if color_type is:
None then a standard hex string is returned as
a string format ('#000000').
int then the integer representation is returned
tuple then the the red, green, blue is returned in a tuple
"""
# Attempt to get the type, otherwise return a default grey
# if we couldn't look up the entry
color = self.html_notify_map.get(notify_type, self.default_html_color)
if color_type is None:
# This is the default return type
return color
elif color_type is int:
# Convert the color to integer
return AppriseAsset.hex_to_int(color)
# The only other type is tuple
elif color_type is tuple:
return AppriseAsset.hex_to_rgb(color)
# Unsupported type
raise ValueError(
'AppriseAsset html_color(): An invalid color_type was specified.')
|
(self, notify_type, color_type=None)
|
721,922 |
apprise.asset
|
details
|
Returns the details associated with the AppriseAsset object
|
def details(self):
"""
Returns the details associated with the AppriseAsset object
"""
return {
'app_id': self.app_id,
'app_desc': self.app_desc,
'default_extension': self.default_extension,
'theme': self.theme,
'image_path_mask': self.image_path_mask,
'image_url_mask': self.image_url_mask,
'image_url_logo': self.image_url_logo,
}
|
(self)
|
721,923 |
apprise.asset
|
hex_to_int
|
Takes a hex string (such as #00ff00) and returns its integer
equivalent
eg: #00000f becomes : 15
|
@staticmethod
def hex_to_int(value):
"""
Takes a hex string (such as #00ff00) and returns its integer
equivalent
eg: #00000f becomes : 15
"""
return int(value.lstrip('#'), 16)
|
(value)
|
721,924 |
apprise.asset
|
hex_to_rgb
|
Takes a hex string (such as #00ff00) and returns a tuple in the form
of (red, green, blue)
eg: #00ff00 becomes : (0, 65535, 0)
|
@staticmethod
def hex_to_rgb(value):
"""
Takes a hex string (such as #00ff00) and returns a tuple in the form
of (red, green, blue)
eg: #00ff00 becomes : (0, 65535, 0)
"""
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16)
for i in range(0, lv, lv // 3))
|
(value)
|
721,925 |
apprise.asset
|
image_path
|
Apply our mask to our image file path
|
def image_path(self, notify_type, image_size, must_exist=True,
extension=None):
"""
Apply our mask to our image file path
"""
if not self.image_path_mask:
# No image to return
return None
if extension is None:
extension = self.default_extension
re_map = {
'{THEME}': self.theme if self.theme else '',
'{TYPE}': notify_type,
'{XY}': image_size,
'{EXTENSION}': extension,
}
# Iterate over above list and store content accordingly
re_table = re.compile(
r'(' + '|'.join(re_map.keys()) + r')',
re.IGNORECASE,
)
# Acquire our path
path = re_table.sub(lambda x: re_map[x.group()], self.image_path_mask)
if must_exist and not isfile(path):
return None
# Return what we parsed
return path
|
(self, notify_type, image_size, must_exist=True, extension=None)
|
721,926 |
apprise.asset
|
image_raw
|
Returns the raw image if it can (otherwise the function returns None)
|
def image_raw(self, notify_type, image_size, extension=None):
"""
Returns the raw image if it can (otherwise the function returns None)
"""
path = self.image_path(
notify_type=notify_type,
image_size=image_size,
extension=extension,
)
if path:
try:
with open(path, 'rb') as fd:
return fd.read()
except (OSError, IOError):
# We can't access the file
return None
return None
|
(self, notify_type, image_size, extension=None)
|
721,927 |
apprise.asset
|
image_url
|
Apply our mask to our image URL
if logo is set to True, then the logo_url is used instead
|
def image_url(self, notify_type, image_size, logo=False, extension=None):
"""
Apply our mask to our image URL
if logo is set to True, then the logo_url is used instead
"""
url_mask = self.image_url_logo if logo else self.image_url_mask
if not url_mask:
# No image to return
return None
if extension is None:
extension = self.default_extension
re_map = {
'{THEME}': self.theme if self.theme else '',
'{TYPE}': notify_type,
'{XY}': image_size,
'{EXTENSION}': extension,
}
# Iterate over above list and store content accordingly
re_table = re.compile(
r'(' + '|'.join(re_map.keys()) + r')',
re.IGNORECASE,
)
return re_table.sub(lambda x: re_map[x.group()], url_mask)
|
(self, notify_type, image_size, logo=False, extension=None)
|
721,928 |
apprise.apprise_attachment
|
AppriseAttachment
|
Our Apprise Attachment File Manager
|
class AppriseAttachment:
"""
Our Apprise Attachment File Manager
"""
def __init__(self, paths=None, asset=None, cache=True, location=None,
**kwargs):
"""
Loads all of the paths/urls specified (if any).
The path can either be a single string identifying one explicit
location, otherwise you can pass in a series of locations to scan
via a list.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
It's also worth nothing that the cache value is only set to elements
that are not already of subclass AttachBase()
Optionally set your current ContentLocation in the location argument.
This is used to further handle attachments. The rules are as follows:
- INACCESSIBLE: You simply have disabled use of the object; no
attachments will be retrieved/handled.
- HOSTED: You are hosting an attachment service for others.
In these circumstances all attachments that are LOCAL
based (such as file://) will not be allowed.
- LOCAL: The least restrictive mode as local files can be
referenced in addition to hosted.
In all both HOSTED and LOCAL modes, INACCESSIBLE attachment types will
continue to be inaccessible. However if you set this field (location)
to None (it's default value) the attachment location category will not
be tested in any way (all attachment types will be allowed).
The location field is also a global option that can be set when
initializing the Apprise object.
"""
# Initialize our attachment listings
self.attachments = list()
# Set our cache flag
self.cache = cache
# Prepare our Asset Object
self.asset = \
asset if isinstance(asset, AppriseAsset) else AppriseAsset()
if location is not None and location not in CONTENT_LOCATIONS:
msg = "An invalid Attachment location ({}) was specified." \
.format(location)
logger.warning(msg)
raise TypeError(msg)
# Store our location
self.location = location
# Now parse any paths specified
if paths is not None:
# Store our path(s)
if not self.add(paths):
# Parse Source domain based on from_addr
raise TypeError("One or more attachments could not be added.")
def add(self, attachments, asset=None, cache=None):
"""
Adds one or more attachments into our list.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
It's also worth nothing that the cache value is only set to elements
that are not already of subclass AttachBase()
"""
# Initialize our return status
return_status = True
# Initialize our default cache value
cache = cache if cache is not None else self.cache
if asset is None:
# prepare default asset
asset = self.asset
if isinstance(attachments, AttachBase):
# Go ahead and just add our attachments into our list
self.attachments.append(attachments)
return True
elif isinstance(attachments, str):
# Save our path
attachments = (attachments, )
elif not isinstance(attachments, (tuple, set, list)):
logger.error(
'An invalid attachment url (type={}) was '
'specified.'.format(type(attachments)))
return False
# Iterate over our attachments
for _attachment in attachments:
if self.location == ContentLocation.INACCESSIBLE:
logger.warning(
"Attachments are disabled; ignoring {}"
.format(_attachment))
return_status = False
continue
if isinstance(_attachment, str):
logger.debug("Loading attachment: {}".format(_attachment))
# Instantiate ourselves an object, this function throws or
# returns None if it fails
instance = AppriseAttachment.instantiate(
_attachment, asset=asset, cache=cache)
if not isinstance(instance, AttachBase):
return_status = False
continue
elif isinstance(_attachment, AppriseAttachment):
# We were provided a list of Apprise Attachments
# append our content together
instance = _attachment.attachments
elif not isinstance(_attachment, AttachBase):
logger.warning(
"An invalid attachment (type={}) was specified.".format(
type(_attachment)))
return_status = False
continue
else:
# our entry is of type AttachBase, so just go ahead and point
# our instance to it for some post processing below
instance = _attachment
# Apply some simple logic if our location flag is set
if self.location and ((
self.location == ContentLocation.HOSTED
and instance.location != ContentLocation.HOSTED)
or instance.location == ContentLocation.INACCESSIBLE):
logger.warning(
"Attachment was disallowed due to accessibility "
"restrictions ({}->{}): {}".format(
self.location, instance.location,
instance.url(privacy=True)))
return_status = False
continue
# Add our initialized plugin to our server listings
if isinstance(instance, list):
self.attachments.extend(instance)
else:
self.attachments.append(instance)
# Return our status
return return_status
@staticmethod
def instantiate(url, asset=None, cache=None, suppress_exceptions=True):
"""
Returns the instance of a instantiated attachment plugin based on
the provided Attachment URL. If the url fails to be parsed, then None
is returned.
A specified cache value will over-ride anything set
"""
# Attempt to acquire the schema at the very least to allow our
# attachment based urls.
schema = GET_SCHEMA_RE.match(url)
if schema is None:
# Plan B is to assume we're dealing with a file
schema = 'file'
url = '{}://{}'.format(schema, URLBase.quote(url))
else:
# Ensure our schema is always in lower case
schema = schema.group('schema').lower()
# Some basic validation
if schema not in A_MGR:
logger.warning('Unsupported schema {}.'.format(schema))
return None
# Parse our url details of the server object as dictionary containing
# all of the information parsed from our URL
results = A_MGR[schema].parse_url(url)
if not results:
# Failed to parse the server URL
logger.warning('Unparseable URL {}.'.format(url))
return None
# Prepare our Asset Object
results['asset'] = \
asset if isinstance(asset, AppriseAsset) else AppriseAsset()
if cache is not None:
# Force an over-ride of the cache value to what we have specified
results['cache'] = cache
if suppress_exceptions:
try:
# Attempt to create an instance of our plugin using the parsed
# URL information
attach_plugin = A_MGR[results['schema']](**results)
except Exception:
# the arguments are invalid or can not be used.
logger.warning('Could not load URL: %s' % url)
return None
else:
# Attempt to create an instance of our plugin using the parsed
# URL information but don't wrap it in a try catch
attach_plugin = A_MGR[results['schema']](**results)
return attach_plugin
def clear(self):
"""
Empties our attachment list
"""
self.attachments[:] = []
def size(self):
"""
Returns the total size of accumulated attachments
"""
return sum([len(a) for a in self.attachments if len(a) > 0])
def pop(self, index=-1):
"""
Removes an indexed Apprise Attachment from the stack and returns it.
by default the last element is poped from the list
"""
# Remove our entry
return self.attachments.pop(index)
def __getitem__(self, index):
"""
Returns the indexed entry of a loaded apprise attachments
"""
return self.attachments[index]
def __bool__(self):
"""
Allows the Apprise object to be wrapped in an 'if statement'.
True is returned if at least one service has been loaded.
"""
return True if self.attachments else False
def __iter__(self):
"""
Returns an iterator to our attachment list
"""
return iter(self.attachments)
def __len__(self):
"""
Returns the number of attachment entries loaded
"""
return len(self.attachments)
|
(paths=None, asset=None, cache=True, location=None, **kwargs)
|
721,929 |
apprise.apprise_attachment
|
__bool__
|
Allows the Apprise object to be wrapped in an 'if statement'.
True is returned if at least one service has been loaded.
|
def __bool__(self):
"""
Allows the Apprise object to be wrapped in an 'if statement'.
True is returned if at least one service has been loaded.
"""
return True if self.attachments else False
|
(self)
|
721,930 |
apprise.apprise_attachment
|
__getitem__
|
Returns the indexed entry of a loaded apprise attachments
|
def __getitem__(self, index):
"""
Returns the indexed entry of a loaded apprise attachments
"""
return self.attachments[index]
|
(self, index)
|
721,931 |
apprise.apprise_attachment
|
__init__
|
Loads all of the paths/urls specified (if any).
The path can either be a single string identifying one explicit
location, otherwise you can pass in a series of locations to scan
via a list.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
It's also worth nothing that the cache value is only set to elements
that are not already of subclass AttachBase()
Optionally set your current ContentLocation in the location argument.
This is used to further handle attachments. The rules are as follows:
- INACCESSIBLE: You simply have disabled use of the object; no
attachments will be retrieved/handled.
- HOSTED: You are hosting an attachment service for others.
In these circumstances all attachments that are LOCAL
based (such as file://) will not be allowed.
- LOCAL: The least restrictive mode as local files can be
referenced in addition to hosted.
In all both HOSTED and LOCAL modes, INACCESSIBLE attachment types will
continue to be inaccessible. However if you set this field (location)
to None (it's default value) the attachment location category will not
be tested in any way (all attachment types will be allowed).
The location field is also a global option that can be set when
initializing the Apprise object.
|
def __init__(self, paths=None, asset=None, cache=True, location=None,
**kwargs):
"""
Loads all of the paths/urls specified (if any).
The path can either be a single string identifying one explicit
location, otherwise you can pass in a series of locations to scan
via a list.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
It's also worth nothing that the cache value is only set to elements
that are not already of subclass AttachBase()
Optionally set your current ContentLocation in the location argument.
This is used to further handle attachments. The rules are as follows:
- INACCESSIBLE: You simply have disabled use of the object; no
attachments will be retrieved/handled.
- HOSTED: You are hosting an attachment service for others.
In these circumstances all attachments that are LOCAL
based (such as file://) will not be allowed.
- LOCAL: The least restrictive mode as local files can be
referenced in addition to hosted.
In all both HOSTED and LOCAL modes, INACCESSIBLE attachment types will
continue to be inaccessible. However if you set this field (location)
to None (it's default value) the attachment location category will not
be tested in any way (all attachment types will be allowed).
The location field is also a global option that can be set when
initializing the Apprise object.
"""
# Initialize our attachment listings
self.attachments = list()
# Set our cache flag
self.cache = cache
# Prepare our Asset Object
self.asset = \
asset if isinstance(asset, AppriseAsset) else AppriseAsset()
if location is not None and location not in CONTENT_LOCATIONS:
msg = "An invalid Attachment location ({}) was specified." \
.format(location)
logger.warning(msg)
raise TypeError(msg)
# Store our location
self.location = location
# Now parse any paths specified
if paths is not None:
# Store our path(s)
if not self.add(paths):
# Parse Source domain based on from_addr
raise TypeError("One or more attachments could not be added.")
|
(self, paths=None, asset=None, cache=True, location=None, **kwargs)
|
721,932 |
apprise.apprise_attachment
|
__iter__
|
Returns an iterator to our attachment list
|
def __iter__(self):
"""
Returns an iterator to our attachment list
"""
return iter(self.attachments)
|
(self)
|
721,933 |
apprise.apprise_attachment
|
__len__
|
Returns the number of attachment entries loaded
|
def __len__(self):
"""
Returns the number of attachment entries loaded
"""
return len(self.attachments)
|
(self)
|
721,934 |
apprise.apprise_attachment
|
add
|
Adds one or more attachments into our list.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
It's also worth nothing that the cache value is only set to elements
that are not already of subclass AttachBase()
|
def add(self, attachments, asset=None, cache=None):
"""
Adds one or more attachments into our list.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
It's also worth nothing that the cache value is only set to elements
that are not already of subclass AttachBase()
"""
# Initialize our return status
return_status = True
# Initialize our default cache value
cache = cache if cache is not None else self.cache
if asset is None:
# prepare default asset
asset = self.asset
if isinstance(attachments, AttachBase):
# Go ahead and just add our attachments into our list
self.attachments.append(attachments)
return True
elif isinstance(attachments, str):
# Save our path
attachments = (attachments, )
elif not isinstance(attachments, (tuple, set, list)):
logger.error(
'An invalid attachment url (type={}) was '
'specified.'.format(type(attachments)))
return False
# Iterate over our attachments
for _attachment in attachments:
if self.location == ContentLocation.INACCESSIBLE:
logger.warning(
"Attachments are disabled; ignoring {}"
.format(_attachment))
return_status = False
continue
if isinstance(_attachment, str):
logger.debug("Loading attachment: {}".format(_attachment))
# Instantiate ourselves an object, this function throws or
# returns None if it fails
instance = AppriseAttachment.instantiate(
_attachment, asset=asset, cache=cache)
if not isinstance(instance, AttachBase):
return_status = False
continue
elif isinstance(_attachment, AppriseAttachment):
# We were provided a list of Apprise Attachments
# append our content together
instance = _attachment.attachments
elif not isinstance(_attachment, AttachBase):
logger.warning(
"An invalid attachment (type={}) was specified.".format(
type(_attachment)))
return_status = False
continue
else:
# our entry is of type AttachBase, so just go ahead and point
# our instance to it for some post processing below
instance = _attachment
# Apply some simple logic if our location flag is set
if self.location and ((
self.location == ContentLocation.HOSTED
and instance.location != ContentLocation.HOSTED)
or instance.location == ContentLocation.INACCESSIBLE):
logger.warning(
"Attachment was disallowed due to accessibility "
"restrictions ({}->{}): {}".format(
self.location, instance.location,
instance.url(privacy=True)))
return_status = False
continue
# Add our initialized plugin to our server listings
if isinstance(instance, list):
self.attachments.extend(instance)
else:
self.attachments.append(instance)
# Return our status
return return_status
|
(self, attachments, asset=None, cache=None)
|
721,935 |
apprise.apprise_attachment
|
clear
|
Empties our attachment list
|
def clear(self):
"""
Empties our attachment list
"""
self.attachments[:] = []
|
(self)
|
721,936 |
apprise.apprise_attachment
|
instantiate
|
Returns the instance of a instantiated attachment plugin based on
the provided Attachment URL. If the url fails to be parsed, then None
is returned.
A specified cache value will over-ride anything set
|
@staticmethod
def instantiate(url, asset=None, cache=None, suppress_exceptions=True):
"""
Returns the instance of a instantiated attachment plugin based on
the provided Attachment URL. If the url fails to be parsed, then None
is returned.
A specified cache value will over-ride anything set
"""
# Attempt to acquire the schema at the very least to allow our
# attachment based urls.
schema = GET_SCHEMA_RE.match(url)
if schema is None:
# Plan B is to assume we're dealing with a file
schema = 'file'
url = '{}://{}'.format(schema, URLBase.quote(url))
else:
# Ensure our schema is always in lower case
schema = schema.group('schema').lower()
# Some basic validation
if schema not in A_MGR:
logger.warning('Unsupported schema {}.'.format(schema))
return None
# Parse our url details of the server object as dictionary containing
# all of the information parsed from our URL
results = A_MGR[schema].parse_url(url)
if not results:
# Failed to parse the server URL
logger.warning('Unparseable URL {}.'.format(url))
return None
# Prepare our Asset Object
results['asset'] = \
asset if isinstance(asset, AppriseAsset) else AppriseAsset()
if cache is not None:
# Force an over-ride of the cache value to what we have specified
results['cache'] = cache
if suppress_exceptions:
try:
# Attempt to create an instance of our plugin using the parsed
# URL information
attach_plugin = A_MGR[results['schema']](**results)
except Exception:
# the arguments are invalid or can not be used.
logger.warning('Could not load URL: %s' % url)
return None
else:
# Attempt to create an instance of our plugin using the parsed
# URL information but don't wrap it in a try catch
attach_plugin = A_MGR[results['schema']](**results)
return attach_plugin
|
(url, asset=None, cache=None, suppress_exceptions=True)
|
721,937 |
apprise.apprise_attachment
|
pop
|
Removes an indexed Apprise Attachment from the stack and returns it.
by default the last element is poped from the list
|
def pop(self, index=-1):
"""
Removes an indexed Apprise Attachment from the stack and returns it.
by default the last element is poped from the list
"""
# Remove our entry
return self.attachments.pop(index)
|
(self, index=-1)
|
721,938 |
apprise.apprise_attachment
|
size
|
Returns the total size of accumulated attachments
|
def size(self):
"""
Returns the total size of accumulated attachments
"""
return sum([len(a) for a in self.attachments if len(a) > 0])
|
(self)
|
721,939 |
apprise.apprise_config
|
AppriseConfig
|
Our Apprise Configuration File Manager
- Supports a list of URLs defined one after another (text format)
- Supports a destinct YAML configuration format
|
class AppriseConfig:
"""
Our Apprise Configuration File Manager
- Supports a list of URLs defined one after another (text format)
- Supports a destinct YAML configuration format
"""
def __init__(self, paths=None, asset=None, cache=True, recursion=0,
insecure_includes=False, **kwargs):
"""
Loads all of the paths specified (if any).
The path can either be a single string identifying one explicit
location, otherwise you can pass in a series of locations to scan
via a list.
If no path is specified then a default list is used.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. Setting this to False does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled and you're set up to
make remote calls. Only disable caching if you understand the
consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
It's also worth nothing that the cache value is only set to elements
that are not already of subclass ConfigBase()
recursion defines how deep we recursively handle entries that use the
`import` keyword. This keyword requires us to fetch more configuration
from another source and add it to our existing compilation. If the
file we remotely retrieve also has an `import` reference, we will only
advance through it if recursion is set to 2 deep. If set to zero
it is off. There is no limit to how high you set this value. It would
be recommended to keep it low if you do intend to use it.
insecure includes by default are disabled. When set to True, all
Apprise Config files marked to be in STRICT mode are treated as being
in ALWAYS mode.
Take a file:// based configuration for example, only a file:// based
configuration can import another file:// based one. because it is set
to STRICT mode. If an http:// based configuration file attempted to
import a file:// one it woul fail. However this import would be
possible if insecure_includes is set to True.
There are cases where a self hosting apprise developer may wish to load
configuration from memory (in a string format) that contains import
entries (even file:// based ones). In these circumstances if you want
these includes to be honored, this value must be set to True.
"""
# Initialize a server list of URLs
self.configs = list()
# Prepare our Asset Object
self.asset = \
asset if isinstance(asset, AppriseAsset) else AppriseAsset()
# Set our cache flag
self.cache = cache
# Initialize our recursion value
self.recursion = recursion
# Initialize our insecure_includes flag
self.insecure_includes = insecure_includes
if paths is not None:
# Store our path(s)
self.add(paths)
return
def add(self, configs, asset=None, tag=None, cache=True, recursion=None,
insecure_includes=None):
"""
Adds one or more config URLs into our list.
You can override the global asset if you wish by including it with the
config(s) that you add.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. Setting this to False does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled and you're set up to
make remote calls. Only disable caching if you understand the
consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
It's also worth nothing that the cache value is only set to elements
that are not already of subclass ConfigBase()
Optionally override the default recursion value.
Optionally override the insecure_includes flag.
if insecure_includes is set to True then all plugins that are
set to a STRICT mode will be a treated as ALWAYS.
"""
# Initialize our return status
return_status = True
# Initialize our default cache value
cache = cache if cache is not None else self.cache
# Initialize our default recursion value
recursion = recursion if recursion is not None else self.recursion
# Initialize our default insecure_includes value
insecure_includes = \
insecure_includes if insecure_includes is not None \
else self.insecure_includes
if asset is None:
# prepare default asset
asset = self.asset
if isinstance(configs, ConfigBase):
# Go ahead and just add our configuration into our list
self.configs.append(configs)
return True
elif isinstance(configs, str):
# Save our path
configs = (configs, )
elif not isinstance(configs, (tuple, set, list)):
logger.error(
'An invalid configuration path (type={}) was '
'specified.'.format(type(configs)))
return False
# Iterate over our configuration
for _config in configs:
if isinstance(_config, ConfigBase):
# Go ahead and just add our configuration into our list
self.configs.append(_config)
continue
elif not isinstance(_config, str):
logger.warning(
"An invalid configuration (type={}) was specified.".format(
type(_config)))
return_status = False
continue
logger.debug("Loading configuration: {}".format(_config))
# Instantiate ourselves an object, this function throws or
# returns None if it fails
instance = AppriseConfig.instantiate(
_config, asset=asset, tag=tag, cache=cache,
recursion=recursion, insecure_includes=insecure_includes)
if not isinstance(instance, ConfigBase):
return_status = False
continue
# Add our initialized plugin to our server listings
self.configs.append(instance)
# Return our status
return return_status
def add_config(self, content, asset=None, tag=None, format=None,
recursion=None, insecure_includes=None):
"""
Adds one configuration file in it's raw format. Content gets loaded as
a memory based object and only exists for the life of this
AppriseConfig object it was loaded into.
If you know the format ('yaml' or 'text') you can specify
it for slightly less overhead during this call. Otherwise the
configuration is auto-detected.
Optionally override the default recursion value.
Optionally override the insecure_includes flag.
if insecure_includes is set to True then all plugins that are
set to a STRICT mode will be a treated as ALWAYS.
"""
# Initialize our default recursion value
recursion = recursion if recursion is not None else self.recursion
# Initialize our default insecure_includes value
insecure_includes = \
insecure_includes if insecure_includes is not None \
else self.insecure_includes
if asset is None:
# prepare default asset
asset = self.asset
if not isinstance(content, str):
logger.warning(
"An invalid configuration (type={}) was specified.".format(
type(content)))
return False
logger.debug("Loading raw configuration: {}".format(content))
# Create ourselves a ConfigMemory Object to store our configuration
instance = C_MGR['memory'](
content=content, format=format, asset=asset, tag=tag,
recursion=recursion, insecure_includes=insecure_includes)
if instance.config_format not in CONFIG_FORMATS:
logger.warning(
"The format of the configuration could not be deteced.")
return False
# Add our initialized plugin to our server listings
self.configs.append(instance)
# Return our status
return True
def servers(self, tag=common.MATCH_ALL_TAG, match_always=True, *args,
**kwargs):
"""
Returns all of our servers dynamically build based on parsed
configuration.
If a tag is specified, it applies to the configuration sources
themselves and not the notification services inside them.
This is for filtering the configuration files polled for
results.
If the anytag is set, then any notification that is found
set with that tag are included in the response.
"""
# A match_always flag allows us to pick up on our 'any' keyword
# and notify these services under all circumstances
match_always = common.MATCH_ALWAYS_TAG if match_always else None
# Build our tag setup
# - top level entries are treated as an 'or'
# - second level (or more) entries are treated as 'and'
#
# examples:
# tag="tagA, tagB" = tagA or tagB
# tag=['tagA', 'tagB'] = tagA or tagB
# tag=[('tagA', 'tagC'), 'tagB'] = (tagA and tagC) or tagB
# tag=[('tagB', 'tagC')] = tagB and tagC
response = list()
for entry in self.configs:
# Apply our tag matching based on our defined logic
if is_exclusive_match(
logic=tag, data=entry.tags, match_all=common.MATCH_ALL_TAG,
match_always=match_always):
# Build ourselves a list of services dynamically and return the
# as a list
response.extend(entry.servers())
return response
@staticmethod
def instantiate(url, asset=None, tag=None, cache=None,
recursion=0, insecure_includes=False,
suppress_exceptions=True):
"""
Returns the instance of a instantiated configuration plugin based on
the provided Config URL. If the url fails to be parsed, then None
is returned.
"""
# Attempt to acquire the schema at the very least to allow our
# configuration based urls.
schema = GET_SCHEMA_RE.match(url)
if schema is None:
# Plan B is to assume we're dealing with a file
schema = 'file'
url = '{}://{}'.format(schema, URLBase.quote(url))
else:
# Ensure our schema is always in lower case
schema = schema.group('schema').lower()
# Some basic validation
if schema not in C_MGR:
logger.warning('Unsupported schema {}.'.format(schema))
return None
# Parse our url details of the server object as dictionary containing
# all of the information parsed from our URL
results = C_MGR[schema].parse_url(url)
if not results:
# Failed to parse the server URL
logger.warning('Unparseable URL {}.'.format(url))
return None
# Build a list of tags to associate with the newly added notifications
results['tag'] = set(parse_list(tag))
# Prepare our Asset Object
results['asset'] = \
asset if isinstance(asset, AppriseAsset) else AppriseAsset()
if cache is not None:
# Force an over-ride of the cache value to what we have specified
results['cache'] = cache
# Recursion can never be parsed from the URL
results['recursion'] = recursion
# Insecure includes flag can never be parsed from the URL
results['insecure_includes'] = insecure_includes
if suppress_exceptions:
try:
# Attempt to create an instance of our plugin using the parsed
# URL information
cfg_plugin = C_MGR[results['schema']](**results)
except Exception:
# the arguments are invalid or can not be used.
logger.warning('Could not load URL: %s' % url)
return None
else:
# Attempt to create an instance of our plugin using the parsed
# URL information but don't wrap it in a try catch
cfg_plugin = C_MGR[results['schema']](**results)
return cfg_plugin
def clear(self):
"""
Empties our configuration list
"""
self.configs[:] = []
def server_pop(self, index):
"""
Removes an indexed Apprise Notification from the servers
"""
# Tracking variables
prev_offset = -1
offset = prev_offset
for entry in self.configs:
servers = entry.servers(cache=True)
if len(servers) > 0:
# Acquire a new maximum offset to work with
offset = prev_offset + len(servers)
if offset >= index:
# we can pop an notification from our config stack
return entry.pop(index if prev_offset == -1
else (index - prev_offset - 1))
# Update our old offset
prev_offset = offset
# If we reach here, then we indexed out of range
raise IndexError('list index out of range')
def pop(self, index=-1):
"""
Removes an indexed Apprise Configuration from the stack and returns it.
By default, the last element is removed from the list
"""
# Remove our entry
return self.configs.pop(index)
def __getitem__(self, index):
"""
Returns the indexed config entry of a loaded apprise configuration
"""
return self.configs[index]
def __bool__(self):
"""
Allows the Apprise object to be wrapped in an 'if statement'.
True is returned if at least one service has been loaded.
"""
return True if self.configs else False
def __iter__(self):
"""
Returns an iterator to our config list
"""
return iter(self.configs)
def __len__(self):
"""
Returns the number of config entries loaded
"""
return len(self.configs)
|
(paths=None, asset=None, cache=True, recursion=0, insecure_includes=False, **kwargs)
|
721,940 |
apprise.apprise_config
|
__bool__
|
Allows the Apprise object to be wrapped in an 'if statement'.
True is returned if at least one service has been loaded.
|
def __bool__(self):
"""
Allows the Apprise object to be wrapped in an 'if statement'.
True is returned if at least one service has been loaded.
"""
return True if self.configs else False
|
(self)
|
721,941 |
apprise.apprise_config
|
__getitem__
|
Returns the indexed config entry of a loaded apprise configuration
|
def __getitem__(self, index):
"""
Returns the indexed config entry of a loaded apprise configuration
"""
return self.configs[index]
|
(self, index)
|
721,942 |
apprise.apprise_config
|
__init__
|
Loads all of the paths specified (if any).
The path can either be a single string identifying one explicit
location, otherwise you can pass in a series of locations to scan
via a list.
If no path is specified then a default list is used.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. Setting this to False does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled and you're set up to
make remote calls. Only disable caching if you understand the
consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
It's also worth nothing that the cache value is only set to elements
that are not already of subclass ConfigBase()
recursion defines how deep we recursively handle entries that use the
`import` keyword. This keyword requires us to fetch more configuration
from another source and add it to our existing compilation. If the
file we remotely retrieve also has an `import` reference, we will only
advance through it if recursion is set to 2 deep. If set to zero
it is off. There is no limit to how high you set this value. It would
be recommended to keep it low if you do intend to use it.
insecure includes by default are disabled. When set to True, all
Apprise Config files marked to be in STRICT mode are treated as being
in ALWAYS mode.
Take a file:// based configuration for example, only a file:// based
configuration can import another file:// based one. because it is set
to STRICT mode. If an http:// based configuration file attempted to
import a file:// one it woul fail. However this import would be
possible if insecure_includes is set to True.
There are cases where a self hosting apprise developer may wish to load
configuration from memory (in a string format) that contains import
entries (even file:// based ones). In these circumstances if you want
these includes to be honored, this value must be set to True.
|
def __init__(self, paths=None, asset=None, cache=True, recursion=0,
insecure_includes=False, **kwargs):
"""
Loads all of the paths specified (if any).
The path can either be a single string identifying one explicit
location, otherwise you can pass in a series of locations to scan
via a list.
If no path is specified then a default list is used.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. Setting this to False does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled and you're set up to
make remote calls. Only disable caching if you understand the
consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
It's also worth nothing that the cache value is only set to elements
that are not already of subclass ConfigBase()
recursion defines how deep we recursively handle entries that use the
`import` keyword. This keyword requires us to fetch more configuration
from another source and add it to our existing compilation. If the
file we remotely retrieve also has an `import` reference, we will only
advance through it if recursion is set to 2 deep. If set to zero
it is off. There is no limit to how high you set this value. It would
be recommended to keep it low if you do intend to use it.
insecure includes by default are disabled. When set to True, all
Apprise Config files marked to be in STRICT mode are treated as being
in ALWAYS mode.
Take a file:// based configuration for example, only a file:// based
configuration can import another file:// based one. because it is set
to STRICT mode. If an http:// based configuration file attempted to
import a file:// one it woul fail. However this import would be
possible if insecure_includes is set to True.
There are cases where a self hosting apprise developer may wish to load
configuration from memory (in a string format) that contains import
entries (even file:// based ones). In these circumstances if you want
these includes to be honored, this value must be set to True.
"""
# Initialize a server list of URLs
self.configs = list()
# Prepare our Asset Object
self.asset = \
asset if isinstance(asset, AppriseAsset) else AppriseAsset()
# Set our cache flag
self.cache = cache
# Initialize our recursion value
self.recursion = recursion
# Initialize our insecure_includes flag
self.insecure_includes = insecure_includes
if paths is not None:
# Store our path(s)
self.add(paths)
return
|
(self, paths=None, asset=None, cache=True, recursion=0, insecure_includes=False, **kwargs)
|
721,943 |
apprise.apprise_config
|
__iter__
|
Returns an iterator to our config list
|
def __iter__(self):
"""
Returns an iterator to our config list
"""
return iter(self.configs)
|
(self)
|
721,944 |
apprise.apprise_config
|
__len__
|
Returns the number of config entries loaded
|
def __len__(self):
"""
Returns the number of config entries loaded
"""
return len(self.configs)
|
(self)
|
721,945 |
apprise.apprise_config
|
add
|
Adds one or more config URLs into our list.
You can override the global asset if you wish by including it with the
config(s) that you add.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. Setting this to False does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled and you're set up to
make remote calls. Only disable caching if you understand the
consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
It's also worth nothing that the cache value is only set to elements
that are not already of subclass ConfigBase()
Optionally override the default recursion value.
Optionally override the insecure_includes flag.
if insecure_includes is set to True then all plugins that are
set to a STRICT mode will be a treated as ALWAYS.
|
def add(self, configs, asset=None, tag=None, cache=True, recursion=None,
insecure_includes=None):
"""
Adds one or more config URLs into our list.
You can override the global asset if you wish by including it with the
config(s) that you add.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. Setting this to False does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled and you're set up to
make remote calls. Only disable caching if you understand the
consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
It's also worth nothing that the cache value is only set to elements
that are not already of subclass ConfigBase()
Optionally override the default recursion value.
Optionally override the insecure_includes flag.
if insecure_includes is set to True then all plugins that are
set to a STRICT mode will be a treated as ALWAYS.
"""
# Initialize our return status
return_status = True
# Initialize our default cache value
cache = cache if cache is not None else self.cache
# Initialize our default recursion value
recursion = recursion if recursion is not None else self.recursion
# Initialize our default insecure_includes value
insecure_includes = \
insecure_includes if insecure_includes is not None \
else self.insecure_includes
if asset is None:
# prepare default asset
asset = self.asset
if isinstance(configs, ConfigBase):
# Go ahead and just add our configuration into our list
self.configs.append(configs)
return True
elif isinstance(configs, str):
# Save our path
configs = (configs, )
elif not isinstance(configs, (tuple, set, list)):
logger.error(
'An invalid configuration path (type={}) was '
'specified.'.format(type(configs)))
return False
# Iterate over our configuration
for _config in configs:
if isinstance(_config, ConfigBase):
# Go ahead and just add our configuration into our list
self.configs.append(_config)
continue
elif not isinstance(_config, str):
logger.warning(
"An invalid configuration (type={}) was specified.".format(
type(_config)))
return_status = False
continue
logger.debug("Loading configuration: {}".format(_config))
# Instantiate ourselves an object, this function throws or
# returns None if it fails
instance = AppriseConfig.instantiate(
_config, asset=asset, tag=tag, cache=cache,
recursion=recursion, insecure_includes=insecure_includes)
if not isinstance(instance, ConfigBase):
return_status = False
continue
# Add our initialized plugin to our server listings
self.configs.append(instance)
# Return our status
return return_status
|
(self, configs, asset=None, tag=None, cache=True, recursion=None, insecure_includes=None)
|
721,946 |
apprise.apprise_config
|
add_config
|
Adds one configuration file in it's raw format. Content gets loaded as
a memory based object and only exists for the life of this
AppriseConfig object it was loaded into.
If you know the format ('yaml' or 'text') you can specify
it for slightly less overhead during this call. Otherwise the
configuration is auto-detected.
Optionally override the default recursion value.
Optionally override the insecure_includes flag.
if insecure_includes is set to True then all plugins that are
set to a STRICT mode will be a treated as ALWAYS.
|
def add_config(self, content, asset=None, tag=None, format=None,
recursion=None, insecure_includes=None):
"""
Adds one configuration file in it's raw format. Content gets loaded as
a memory based object and only exists for the life of this
AppriseConfig object it was loaded into.
If you know the format ('yaml' or 'text') you can specify
it for slightly less overhead during this call. Otherwise the
configuration is auto-detected.
Optionally override the default recursion value.
Optionally override the insecure_includes flag.
if insecure_includes is set to True then all plugins that are
set to a STRICT mode will be a treated as ALWAYS.
"""
# Initialize our default recursion value
recursion = recursion if recursion is not None else self.recursion
# Initialize our default insecure_includes value
insecure_includes = \
insecure_includes if insecure_includes is not None \
else self.insecure_includes
if asset is None:
# prepare default asset
asset = self.asset
if not isinstance(content, str):
logger.warning(
"An invalid configuration (type={}) was specified.".format(
type(content)))
return False
logger.debug("Loading raw configuration: {}".format(content))
# Create ourselves a ConfigMemory Object to store our configuration
instance = C_MGR['memory'](
content=content, format=format, asset=asset, tag=tag,
recursion=recursion, insecure_includes=insecure_includes)
if instance.config_format not in CONFIG_FORMATS:
logger.warning(
"The format of the configuration could not be deteced.")
return False
# Add our initialized plugin to our server listings
self.configs.append(instance)
# Return our status
return True
|
(self, content, asset=None, tag=None, format=None, recursion=None, insecure_includes=None)
|
721,947 |
apprise.apprise_config
|
clear
|
Empties our configuration list
|
def clear(self):
"""
Empties our configuration list
"""
self.configs[:] = []
|
(self)
|
721,948 |
apprise.apprise_config
|
instantiate
|
Returns the instance of a instantiated configuration plugin based on
the provided Config URL. If the url fails to be parsed, then None
is returned.
|
@staticmethod
def instantiate(url, asset=None, tag=None, cache=None,
recursion=0, insecure_includes=False,
suppress_exceptions=True):
"""
Returns the instance of a instantiated configuration plugin based on
the provided Config URL. If the url fails to be parsed, then None
is returned.
"""
# Attempt to acquire the schema at the very least to allow our
# configuration based urls.
schema = GET_SCHEMA_RE.match(url)
if schema is None:
# Plan B is to assume we're dealing with a file
schema = 'file'
url = '{}://{}'.format(schema, URLBase.quote(url))
else:
# Ensure our schema is always in lower case
schema = schema.group('schema').lower()
# Some basic validation
if schema not in C_MGR:
logger.warning('Unsupported schema {}.'.format(schema))
return None
# Parse our url details of the server object as dictionary containing
# all of the information parsed from our URL
results = C_MGR[schema].parse_url(url)
if not results:
# Failed to parse the server URL
logger.warning('Unparseable URL {}.'.format(url))
return None
# Build a list of tags to associate with the newly added notifications
results['tag'] = set(parse_list(tag))
# Prepare our Asset Object
results['asset'] = \
asset if isinstance(asset, AppriseAsset) else AppriseAsset()
if cache is not None:
# Force an over-ride of the cache value to what we have specified
results['cache'] = cache
# Recursion can never be parsed from the URL
results['recursion'] = recursion
# Insecure includes flag can never be parsed from the URL
results['insecure_includes'] = insecure_includes
if suppress_exceptions:
try:
# Attempt to create an instance of our plugin using the parsed
# URL information
cfg_plugin = C_MGR[results['schema']](**results)
except Exception:
# the arguments are invalid or can not be used.
logger.warning('Could not load URL: %s' % url)
return None
else:
# Attempt to create an instance of our plugin using the parsed
# URL information but don't wrap it in a try catch
cfg_plugin = C_MGR[results['schema']](**results)
return cfg_plugin
|
(url, asset=None, tag=None, cache=None, recursion=0, insecure_includes=False, suppress_exceptions=True)
|
721,949 |
apprise.apprise_config
|
pop
|
Removes an indexed Apprise Configuration from the stack and returns it.
By default, the last element is removed from the list
|
def pop(self, index=-1):
"""
Removes an indexed Apprise Configuration from the stack and returns it.
By default, the last element is removed from the list
"""
# Remove our entry
return self.configs.pop(index)
|
(self, index=-1)
|
721,950 |
apprise.apprise_config
|
server_pop
|
Removes an indexed Apprise Notification from the servers
|
def server_pop(self, index):
"""
Removes an indexed Apprise Notification from the servers
"""
# Tracking variables
prev_offset = -1
offset = prev_offset
for entry in self.configs:
servers = entry.servers(cache=True)
if len(servers) > 0:
# Acquire a new maximum offset to work with
offset = prev_offset + len(servers)
if offset >= index:
# we can pop an notification from our config stack
return entry.pop(index if prev_offset == -1
else (index - prev_offset - 1))
# Update our old offset
prev_offset = offset
# If we reach here, then we indexed out of range
raise IndexError('list index out of range')
|
(self, index)
|
721,951 |
apprise.apprise_config
|
servers
|
Returns all of our servers dynamically build based on parsed
configuration.
If a tag is specified, it applies to the configuration sources
themselves and not the notification services inside them.
This is for filtering the configuration files polled for
results.
If the anytag is set, then any notification that is found
set with that tag are included in the response.
|
def servers(self, tag=common.MATCH_ALL_TAG, match_always=True, *args,
**kwargs):
"""
Returns all of our servers dynamically build based on parsed
configuration.
If a tag is specified, it applies to the configuration sources
themselves and not the notification services inside them.
This is for filtering the configuration files polled for
results.
If the anytag is set, then any notification that is found
set with that tag are included in the response.
"""
# A match_always flag allows us to pick up on our 'any' keyword
# and notify these services under all circumstances
match_always = common.MATCH_ALWAYS_TAG if match_always else None
# Build our tag setup
# - top level entries are treated as an 'or'
# - second level (or more) entries are treated as 'and'
#
# examples:
# tag="tagA, tagB" = tagA or tagB
# tag=['tagA', 'tagB'] = tagA or tagB
# tag=[('tagA', 'tagC'), 'tagB'] = (tagA and tagC) or tagB
# tag=[('tagB', 'tagC')] = tagB and tagC
response = list()
for entry in self.configs:
# Apply our tag matching based on our defined logic
if is_exclusive_match(
logic=tag, data=entry.tags, match_all=common.MATCH_ALL_TAG,
match_always=match_always):
# Build ourselves a list of services dynamically and return the
# as a list
response.extend(entry.servers())
return response
|
(self, tag='all', match_always=True, *args, **kwargs)
|
721,952 |
apprise.locale
|
AppriseLocale
|
A wrapper class to gettext so that we can manipulate multiple lanaguages
on the fly if required.
|
class AppriseLocale:
"""
A wrapper class to gettext so that we can manipulate multiple lanaguages
on the fly if required.
"""
# Define our translation domain
_domain = 'apprise'
# The path to our translations
_locale_dir = abspath(join(dirname(__file__), 'i18n'))
# Locale regular expression
_local_re = re.compile(
r'^((?P<ansii>C)|(?P<lang>([a-z]{2}))([_:](?P<country>[a-z]{2}))?)'
r'(\.(?P<enc>[a-z0-9-]+))?$', re.IGNORECASE)
# Define our default encoding
_default_encoding = 'utf-8'
# The function to assign `_` by default
_fn = 'gettext'
# The language we should fall back to if all else fails
_default_language = 'en'
def __init__(self, language=None):
"""
Initializes our object, if a language is specified, then we
initialize ourselves to that, otherwise we use whatever we detect
from the local operating system. If all else fails, we resort to the
defined default_language.
"""
# Cache previously loaded translations
self._gtobjs = {}
# Get our language
self.lang = AppriseLocale.detect_language(language)
# Our mapping to our _fn
self.__fn_map = None
if GETTEXT_LOADED is False:
# We're done
return
# Add language
self.add(self.lang)
def add(self, lang=None, set_default=True):
"""
Add a language to our list
"""
lang = lang if lang else self._default_language
if lang not in self._gtobjs:
# Load our gettext object and install our language
try:
self._gtobjs[lang] = gettext.translation(
self._domain, localedir=self._locale_dir, languages=[lang],
fallback=False)
# The non-intrusive method of applying the gettext change to
# the global namespace only
self.__fn_map = getattr(self._gtobjs[lang], self._fn)
except FileNotFoundError:
# The translation directory does not exist
logger.debug(
'Could not load translation path: %s',
join(self._locale_dir, lang))
# Fallback (handle case where self.lang does not exist)
if self.lang not in self._gtobjs:
self._gtobjs[self.lang] = gettext
self.__fn_map = getattr(self._gtobjs[self.lang], self._fn)
return False
logger.trace('Loaded language %s', lang)
if set_default:
logger.debug('Language set to %s', lang)
self.lang = lang
return True
@contextlib.contextmanager
def lang_at(self, lang, mapto=_fn):
"""
The syntax works as:
with at.lang_at('fr'):
# apprise works as though the french language has been
# defined. afterwards, the language falls back to whatever
# it was.
"""
if GETTEXT_LOADED is False:
# Do nothing
yield None
# we're done
return
# Tidy the language
lang = AppriseLocale.detect_language(lang, detect_fallback=False)
if lang not in self._gtobjs and not self.add(lang, set_default=False):
# Do Nothing
yield getattr(self._gtobjs[self.lang], mapto)
else:
# Yield
yield getattr(self._gtobjs[lang], mapto)
return
@property
def gettext(self):
"""
Return the current language gettext() function
Useful for assigning to `_`
"""
return self._gtobjs[self.lang].gettext
@staticmethod
def detect_language(lang=None, detect_fallback=True):
"""
Returns the language (if it's retrievable)
"""
# We want to only use the 2 character version of this language
# hence en_CA becomes en, en_US becomes en.
if not isinstance(lang, str):
if detect_fallback is False:
# no detection enabled; we're done
return None
# Posix lookup
lookup = os.environ.get
localename = None
for variable in ('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE'):
localename = lookup(variable, None)
if localename:
result = AppriseLocale._local_re.match(localename)
if result and result.group('lang'):
return result.group('lang').lower()
# Windows handling
if hasattr(ctypes, 'windll'):
windll = ctypes.windll.kernel32
try:
lang = locale.windows_locale[
windll.GetUserDefaultUILanguage()]
# Our detected windows language
return lang[0:2].lower()
except (TypeError, KeyError):
# Fallback to posix detection
pass
# Built in locale library check
try:
# Acquire our locale
lang = locale.getlocale()[0]
# Compatibility for Python >= 3.12
if lang == 'C':
lang = AppriseLocale._default_language
except (ValueError, TypeError) as e:
# This occurs when an invalid locale was parsed from the
# environment variable. While we still return None in this
# case, we want to better notify the end user of this. Users
# receiving this error should check their environment
# variables.
logger.warning(
'Language detection failure / {}'.format(str(e)))
return None
return None if not lang else lang[0:2].lower()
def __getstate__(self):
"""
Pickle Support dumps()
"""
state = self.__dict__.copy()
# Remove the unpicklable entries.
del state['_gtobjs']
del state['_AppriseLocale__fn_map']
return state
def __setstate__(self, state):
"""
Pickle Support loads()
"""
self.__dict__.update(state)
# Our mapping to our _fn
self.__fn_map = None
self._gtobjs = {}
self.add(state['lang'], set_default=True)
|
(language=None)
|
721,953 |
apprise.locale
|
__getstate__
|
Pickle Support dumps()
|
def __getstate__(self):
"""
Pickle Support dumps()
"""
state = self.__dict__.copy()
# Remove the unpicklable entries.
del state['_gtobjs']
del state['_AppriseLocale__fn_map']
return state
|
(self)
|
721,954 |
apprise.locale
|
__init__
|
Initializes our object, if a language is specified, then we
initialize ourselves to that, otherwise we use whatever we detect
from the local operating system. If all else fails, we resort to the
defined default_language.
|
def __init__(self, language=None):
"""
Initializes our object, if a language is specified, then we
initialize ourselves to that, otherwise we use whatever we detect
from the local operating system. If all else fails, we resort to the
defined default_language.
"""
# Cache previously loaded translations
self._gtobjs = {}
# Get our language
self.lang = AppriseLocale.detect_language(language)
# Our mapping to our _fn
self.__fn_map = None
if GETTEXT_LOADED is False:
# We're done
return
# Add language
self.add(self.lang)
|
(self, language=None)
|
721,955 |
apprise.locale
|
__setstate__
|
Pickle Support loads()
|
def __setstate__(self, state):
"""
Pickle Support loads()
"""
self.__dict__.update(state)
# Our mapping to our _fn
self.__fn_map = None
self._gtobjs = {}
self.add(state['lang'], set_default=True)
|
(self, state)
|
721,956 |
apprise.locale
|
add
|
Add a language to our list
|
def add(self, lang=None, set_default=True):
"""
Add a language to our list
"""
lang = lang if lang else self._default_language
if lang not in self._gtobjs:
# Load our gettext object and install our language
try:
self._gtobjs[lang] = gettext.translation(
self._domain, localedir=self._locale_dir, languages=[lang],
fallback=False)
# The non-intrusive method of applying the gettext change to
# the global namespace only
self.__fn_map = getattr(self._gtobjs[lang], self._fn)
except FileNotFoundError:
# The translation directory does not exist
logger.debug(
'Could not load translation path: %s',
join(self._locale_dir, lang))
# Fallback (handle case where self.lang does not exist)
if self.lang not in self._gtobjs:
self._gtobjs[self.lang] = gettext
self.__fn_map = getattr(self._gtobjs[self.lang], self._fn)
return False
logger.trace('Loaded language %s', lang)
if set_default:
logger.debug('Language set to %s', lang)
self.lang = lang
return True
|
(self, lang=None, set_default=True)
|
721,957 |
apprise.locale
|
detect_language
|
Returns the language (if it's retrievable)
|
@staticmethod
def detect_language(lang=None, detect_fallback=True):
"""
Returns the language (if it's retrievable)
"""
# We want to only use the 2 character version of this language
# hence en_CA becomes en, en_US becomes en.
if not isinstance(lang, str):
if detect_fallback is False:
# no detection enabled; we're done
return None
# Posix lookup
lookup = os.environ.get
localename = None
for variable in ('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE'):
localename = lookup(variable, None)
if localename:
result = AppriseLocale._local_re.match(localename)
if result and result.group('lang'):
return result.group('lang').lower()
# Windows handling
if hasattr(ctypes, 'windll'):
windll = ctypes.windll.kernel32
try:
lang = locale.windows_locale[
windll.GetUserDefaultUILanguage()]
# Our detected windows language
return lang[0:2].lower()
except (TypeError, KeyError):
# Fallback to posix detection
pass
# Built in locale library check
try:
# Acquire our locale
lang = locale.getlocale()[0]
# Compatibility for Python >= 3.12
if lang == 'C':
lang = AppriseLocale._default_language
except (ValueError, TypeError) as e:
# This occurs when an invalid locale was parsed from the
# environment variable. While we still return None in this
# case, we want to better notify the end user of this. Users
# receiving this error should check their environment
# variables.
logger.warning(
'Language detection failure / {}'.format(str(e)))
return None
return None if not lang else lang[0:2].lower()
|
(lang=None, detect_fallback=True)
|
721,958 |
apprise.locale
|
lang_at
|
The syntax works as:
with at.lang_at('fr'):
# apprise works as though the french language has been
# defined. afterwards, the language falls back to whatever
# it was.
|
def __init__(self, text, *args, **kwargs):
"""
Store our text
"""
self.text = text
super().__init__(*args, **kwargs)
|
(self, lang, mapto='gettext')
|
721,959 |
apprise.attachment.base
|
AttachBase
|
This is the base class for all supported attachment types
|
class AttachBase(URLBase):
"""
This is the base class for all supported attachment types
"""
# For attachment type detection; this amount of data is read into memory
# 128KB (131072B)
max_detect_buffer_size = 131072
# Unknown mimetype
unknown_mimetype = 'application/octet-stream'
# Our filename when we can't otherwise determine one
unknown_filename = 'apprise-attachment'
# Our filename extension when we can't otherwise determine one
unknown_filename_extension = '.obj'
# The strict argument is a flag specifying whether the list of known MIME
# types is limited to only the official types registered with IANA. When
# strict is True, only the IANA types are supported; when strict is False
# (the default), some additional non-standard but commonly used MIME types
# are also recognized.
strict = False
# The maximum file-size we will accept for an attachment size. If this is
# set to zero (0), then no check is performed
# 1 MB = 1048576 bytes
# 5 MB = 5242880 bytes
# 1 GB = 1048576000 bytes
max_file_size = 1048576000
# By default all attachments types are inaccessible.
# Developers of items identified in the attachment plugin directory
# are requried to set a location
location = ContentLocation.INACCESSIBLE
# Here is where we define all of the arguments we accept on the url
# such as: schema://whatever/?overflow=upstream&format=text
# These act the same way as tokens except they are optional and/or
# have default values set if mandatory. This rule must be followed
template_args = {
'cache': {
'name': _('Cache Age'),
'type': 'int',
# We default to (600) which means we cache for 10 minutes
'default': 600,
},
'mime': {
'name': _('Forced Mime Type'),
'type': 'string',
},
'name': {
'name': _('Forced File Name'),
'type': 'string',
},
'verify': {
'name': _('Verify SSL'),
# SSL Certificate Authority Verification
'type': 'bool',
# Provide a default
'default': True,
},
}
def __init__(self, name=None, mimetype=None, cache=None, **kwargs):
"""
Initialize some general logging and common server arguments that will
keep things consistent when working with the configurations that
inherit this class.
Optionally provide a filename to over-ride name associated with the
actual file retrieved (from where-ever).
The mime-type is automatically detected, but you can over-ride this by
explicitly stating what it should be.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
"""
super().__init__(**kwargs)
if not mimetypes.inited:
# Ensure mimetypes has been initialized
mimetypes.init()
# Attach Filename (does not have to be the same as path)
self._name = name
# The mime type of the attached content. This is detected if not
# otherwise specified.
self._mimetype = mimetype
# The detected_mimetype, this is only used as a fallback if the
# mimetype wasn't forced by the user
self.detected_mimetype = None
# The detected filename by calling child class. A detected filename
# is always used if no force naming was specified.
self.detected_name = None
# Absolute path to attachment
self.download_path = None
# Set our cache flag; it can be True, False, None, or a (positive)
# integer... nothing else
if cache is not None:
try:
self.cache = cache if isinstance(cache, bool) else int(cache)
except (TypeError, ValueError):
err = 'An invalid cache value ({}) was specified.'.format(
cache)
self.logger.warning(err)
raise TypeError(err)
# Some simple error checking
if self.cache < 0:
err = 'A negative cache value ({}) was specified.'.format(
cache)
self.logger.warning(err)
raise TypeError(err)
else:
self.cache = None
# Validate mimetype if specified
if self._mimetype:
if next((t for t in mimetypes.types_map.values()
if self._mimetype == t), None) is None:
err = 'An invalid mime-type ({}) was specified.'.format(
mimetype)
self.logger.warning(err)
raise TypeError(err)
return
@property
def path(self):
"""
Returns the absolute path to the filename. If this is not known or
is know but has been considered expired (due to cache setting), then
content is re-retrieved prior to returning.
"""
if not self.exists():
# we could not obtain our path
return None
return self.download_path
@property
def name(self):
"""
Returns the filename
"""
if self._name:
# return our fixed content
return self._name
if not self.exists():
# we could not obtain our name
return None
if not self.detected_name:
# If we get here, our download was successful but we don't have a
# filename based on our content.
extension = mimetypes.guess_extension(self.mimetype)
self.detected_name = '{}{}'.format(
self.unknown_filename,
extension if extension else self.unknown_filename_extension)
return self.detected_name
@property
def mimetype(self):
"""
Returns mime type (if one is present).
Content is cached once determied to prevent overhead of future
calls.
"""
if self._mimetype:
# return our pre-calculated cached content
return self._mimetype
if not self.exists():
# we could not obtain our attachment
return None
if not self.detected_mimetype:
# guess_type() returns: (type, encoding) and sets type to None
# if it can't otherwise determine it.
try:
# Directly reference _name and detected_name to prevent
# recursion loop (as self.name calls this function)
self.detected_mimetype, _ = mimetypes.guess_type(
self._name if self._name
else self.detected_name, strict=self.strict)
except TypeError:
# Thrown if None was specified in filename section
pass
# Return our mime type
return self.detected_mimetype \
if self.detected_mimetype else self.unknown_mimetype
def exists(self, retrieve_if_missing=True):
"""
Simply returns true if the object has downloaded and stored the
attachment AND the attachment has not expired.
"""
cache = self.template_args['cache']['default'] \
if self.cache is None else self.cache
if self.download_path and os.path.isfile(self.download_path) \
and cache:
# We have enough reason to look further into our cached content
# and verify it has not expired.
if cache is True:
# return our fixed content as is; we will always cache it
return True
# Verify our cache time to determine whether we will get our
# content again.
try:
age_in_sec = time.time() - os.stat(self.download_path).st_mtime
if age_in_sec <= cache:
return True
except (OSError, IOError):
# The file is not present
pass
return False if not retrieve_if_missing else self.download()
def invalidate(self):
"""
Release any temporary data that may be open by child classes.
Externally fetched content should be automatically cleaned up when
this function is called.
This function should also reset the following entries to None:
- detected_name : Should identify a human readable filename
- download_path: Must contain a absolute path to content
- detected_mimetype: Should identify mimetype of content
"""
self.detected_name = None
self.download_path = None
self.detected_mimetype = None
return
def download(self):
"""
This function must be over-ridden by inheriting classes.
Inherited classes MUST populate:
- detected_name: Should identify a human readable filename
- download_path: Must contain a absolute path to content
- detected_mimetype: Should identify mimetype of content
If a download fails, you should ensure these values are set to None.
"""
raise NotImplementedError(
"download() is implimented by the child class.")
@staticmethod
def parse_url(url, verify_host=True, mimetype_db=None, sanitize=True):
"""Parses the URL and returns it broken apart into a dictionary.
This is very specific and customized for Apprise.
Args:
url (str): The URL you want to fully parse.
verify_host (:obj:`bool`, optional): a flag kept with the parsed
URL which some child classes will later use to verify SSL
keys (if SSL transactions take place). Unless under very
specific circumstances, it is strongly recomended that
you leave this default value set to True.
Returns:
A dictionary is returned containing the URL fully parsed if
successful, otherwise None is returned.
"""
results = URLBase.parse_url(
url, verify_host=verify_host, sanitize=sanitize)
if not results:
# We're done; we failed to parse our url
return results
# Allow overriding the default config mime type
if 'mime' in results['qsd']:
results['mimetype'] = results['qsd'].get('mime', '') \
.strip().lower()
# Allow overriding the default file name
if 'name' in results['qsd']:
results['name'] = results['qsd'].get('name', '') \
.strip().lower()
# Our cache value
if 'cache' in results['qsd']:
# First try to get it's integer value
try:
results['cache'] = int(results['qsd']['cache'])
except (ValueError, TypeError):
# No problem, it just isn't an integer; now treat it as a bool
# instead:
results['cache'] = parse_bool(results['qsd']['cache'])
return results
def __len__(self):
"""
Returns the filesize of the attachment.
"""
return os.path.getsize(self.path) if self.path else 0
def __bool__(self):
"""
Allows the Apprise object to be wrapped in an based 'if statement'.
True is returned if our content was downloaded correctly.
"""
return True if self.path else False
|
(name=None, mimetype=None, cache=None, **kwargs)
|
721,960 |
apprise.attachment.base
|
__bool__
|
Allows the Apprise object to be wrapped in an based 'if statement'.
True is returned if our content was downloaded correctly.
|
def __bool__(self):
"""
Allows the Apprise object to be wrapped in an based 'if statement'.
True is returned if our content was downloaded correctly.
"""
return True if self.path else False
|
(self)
|
721,961 |
apprise.url
|
__contains__
|
Returns true if the tag specified is associated with this notification.
tag can also be a tuple, set, and/or list
|
def __contains__(self, tags):
"""
Returns true if the tag specified is associated with this notification.
tag can also be a tuple, set, and/or list
"""
if isinstance(tags, (tuple, set, list)):
return bool(set(tags) & self.tags)
# return any match
return tags in self.tags
|
(self, tags)
|
721,962 |
apprise.attachment.base
|
__init__
|
Initialize some general logging and common server arguments that will
keep things consistent when working with the configurations that
inherit this class.
Optionally provide a filename to over-ride name associated with the
actual file retrieved (from where-ever).
The mime-type is automatically detected, but you can over-ride this by
explicitly stating what it should be.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
|
def __init__(self, name=None, mimetype=None, cache=None, **kwargs):
"""
Initialize some general logging and common server arguments that will
keep things consistent when working with the configurations that
inherit this class.
Optionally provide a filename to over-ride name associated with the
actual file retrieved (from where-ever).
The mime-type is automatically detected, but you can over-ride this by
explicitly stating what it should be.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
"""
super().__init__(**kwargs)
if not mimetypes.inited:
# Ensure mimetypes has been initialized
mimetypes.init()
# Attach Filename (does not have to be the same as path)
self._name = name
# The mime type of the attached content. This is detected if not
# otherwise specified.
self._mimetype = mimetype
# The detected_mimetype, this is only used as a fallback if the
# mimetype wasn't forced by the user
self.detected_mimetype = None
# The detected filename by calling child class. A detected filename
# is always used if no force naming was specified.
self.detected_name = None
# Absolute path to attachment
self.download_path = None
# Set our cache flag; it can be True, False, None, or a (positive)
# integer... nothing else
if cache is not None:
try:
self.cache = cache if isinstance(cache, bool) else int(cache)
except (TypeError, ValueError):
err = 'An invalid cache value ({}) was specified.'.format(
cache)
self.logger.warning(err)
raise TypeError(err)
# Some simple error checking
if self.cache < 0:
err = 'A negative cache value ({}) was specified.'.format(
cache)
self.logger.warning(err)
raise TypeError(err)
else:
self.cache = None
# Validate mimetype if specified
if self._mimetype:
if next((t for t in mimetypes.types_map.values()
if self._mimetype == t), None) is None:
err = 'An invalid mime-type ({}) was specified.'.format(
mimetype)
self.logger.warning(err)
raise TypeError(err)
return
|
(self, name=None, mimetype=None, cache=None, **kwargs)
|
721,963 |
apprise.attachment.base
|
__len__
|
Returns the filesize of the attachment.
|
def __len__(self):
"""
Returns the filesize of the attachment.
"""
return os.path.getsize(self.path) if self.path else 0
|
(self)
|
721,964 |
apprise.url
|
__str__
|
Returns the url path
|
def __str__(self):
"""
Returns the url path
"""
return self.url(privacy=True)
|
(self)
|
721,965 |
apprise.attachment.base
|
download
|
This function must be over-ridden by inheriting classes.
Inherited classes MUST populate:
- detected_name: Should identify a human readable filename
- download_path: Must contain a absolute path to content
- detected_mimetype: Should identify mimetype of content
If a download fails, you should ensure these values are set to None.
|
def download(self):
"""
This function must be over-ridden by inheriting classes.
Inherited classes MUST populate:
- detected_name: Should identify a human readable filename
- download_path: Must contain a absolute path to content
- detected_mimetype: Should identify mimetype of content
If a download fails, you should ensure these values are set to None.
"""
raise NotImplementedError(
"download() is implimented by the child class.")
|
(self)
|
721,966 |
apprise.url
|
escape_html
|
Takes html text as input and escapes it so that it won't
conflict with any xml/html wrapping characters.
Args:
html (str): The HTML code to escape
convert_new_lines (:obj:`bool`, optional): escape new lines (
)
whitespace (:obj:`bool`, optional): escape whitespace
Returns:
str: The escaped html
|
@staticmethod
def escape_html(html, convert_new_lines=False, whitespace=True):
"""
Takes html text as input and escapes it so that it won't
conflict with any xml/html wrapping characters.
Args:
html (str): The HTML code to escape
convert_new_lines (:obj:`bool`, optional): escape new lines (\n)
whitespace (:obj:`bool`, optional): escape whitespace
Returns:
str: The escaped html
"""
if not isinstance(html, str) or not html:
return ''
# Escape HTML
escaped = sax_escape(html, {"'": "'", "\"": """})
if whitespace:
# Tidy up whitespace too
escaped = escaped\
.replace(u'\t', u' ')\
.replace(u' ', u' ')
if convert_new_lines:
return escaped.replace(u'\n', u'<br/>')
return escaped
|
(html, convert_new_lines=False, whitespace=True)
|
721,967 |
apprise.attachment.base
|
exists
|
Simply returns true if the object has downloaded and stored the
attachment AND the attachment has not expired.
|
def exists(self, retrieve_if_missing=True):
"""
Simply returns true if the object has downloaded and stored the
attachment AND the attachment has not expired.
"""
cache = self.template_args['cache']['default'] \
if self.cache is None else self.cache
if self.download_path and os.path.isfile(self.download_path) \
and cache:
# We have enough reason to look further into our cached content
# and verify it has not expired.
if cache is True:
# return our fixed content as is; we will always cache it
return True
# Verify our cache time to determine whether we will get our
# content again.
try:
age_in_sec = time.time() - os.stat(self.download_path).st_mtime
if age_in_sec <= cache:
return True
except (OSError, IOError):
# The file is not present
pass
return False if not retrieve_if_missing else self.download()
|
(self, retrieve_if_missing=True)
|
721,968 |
apprise.url
|
http_response_code_lookup
|
Parses the interger response code returned by a remote call from
a web request into it's human readable string version.
You can over-ride codes or add new ones by providing your own
response_mask that contains a dictionary of integer -> string mapped
variables
|
@staticmethod
def http_response_code_lookup(code, response_mask=None):
"""Parses the interger response code returned by a remote call from
a web request into it's human readable string version.
You can over-ride codes or add new ones by providing your own
response_mask that contains a dictionary of integer -> string mapped
variables
"""
if isinstance(response_mask, dict):
# Apply any/all header over-rides defined
HTML_LOOKUP.update(response_mask)
# Look up our response
try:
response = HTML_LOOKUP[code]
except KeyError:
response = ''
return response
|
(code, response_mask=None)
|
721,969 |
apprise.attachment.base
|
invalidate
|
Release any temporary data that may be open by child classes.
Externally fetched content should be automatically cleaned up when
this function is called.
This function should also reset the following entries to None:
- detected_name : Should identify a human readable filename
- download_path: Must contain a absolute path to content
- detected_mimetype: Should identify mimetype of content
|
def invalidate(self):
"""
Release any temporary data that may be open by child classes.
Externally fetched content should be automatically cleaned up when
this function is called.
This function should also reset the following entries to None:
- detected_name : Should identify a human readable filename
- download_path: Must contain a absolute path to content
- detected_mimetype: Should identify mimetype of content
"""
self.detected_name = None
self.download_path = None
self.detected_mimetype = None
return
|
(self)
|
721,970 |
apprise.url
|
parse_list
|
A wrapper to utils.parse_list() with unquoting support
Parses a specified set of data and breaks it into a list.
Args:
content (str): The path to split up into a list. If a list is
provided, then it's individual entries are processed.
allow_whitespace (:obj:`bool`, optional): whitespace is to be
treated as a delimiter
unquote (:obj:`bool`, optional): call unquote on each element
added to the returned list.
Returns:
list: A unique list containing all of the elements in the path
|
@staticmethod
def parse_list(content, allow_whitespace=True, unquote=True):
"""A wrapper to utils.parse_list() with unquoting support
Parses a specified set of data and breaks it into a list.
Args:
content (str): The path to split up into a list. If a list is
provided, then it's individual entries are processed.
allow_whitespace (:obj:`bool`, optional): whitespace is to be
treated as a delimiter
unquote (:obj:`bool`, optional): call unquote on each element
added to the returned list.
Returns:
list: A unique list containing all of the elements in the path
"""
content = parse_list(content, allow_whitespace=allow_whitespace)
if unquote:
content = \
[URLBase.unquote(x) for x in filter(bool, content)]
return content
|
(content, allow_whitespace=True, unquote=True)
|
721,971 |
apprise.url
|
parse_phone_no
|
A wrapper to utils.parse_phone_no() with unquoting support
Parses a specified set of data and breaks it into a list.
Args:
content (str): The path to split up into a list. If a list is
provided, then it's individual entries are processed.
unquote (:obj:`bool`, optional): call unquote on each element
added to the returned list.
Returns:
list: A unique list containing all of the elements in the path
|
@staticmethod
def parse_phone_no(content, unquote=True):
"""A wrapper to utils.parse_phone_no() with unquoting support
Parses a specified set of data and breaks it into a list.
Args:
content (str): The path to split up into a list. If a list is
provided, then it's individual entries are processed.
unquote (:obj:`bool`, optional): call unquote on each element
added to the returned list.
Returns:
list: A unique list containing all of the elements in the path
"""
if unquote:
try:
content = URLBase.unquote(content)
except TypeError:
# Nothing further to do
return []
content = parse_phone_no(content)
return content
|
(content, unquote=True)
|
721,972 |
apprise.attachment.base
|
parse_url
|
Parses the URL and returns it broken apart into a dictionary.
This is very specific and customized for Apprise.
Args:
url (str): The URL you want to fully parse.
verify_host (:obj:`bool`, optional): a flag kept with the parsed
URL which some child classes will later use to verify SSL
keys (if SSL transactions take place). Unless under very
specific circumstances, it is strongly recomended that
you leave this default value set to True.
Returns:
A dictionary is returned containing the URL fully parsed if
successful, otherwise None is returned.
|
@staticmethod
def parse_url(url, verify_host=True, mimetype_db=None, sanitize=True):
"""Parses the URL and returns it broken apart into a dictionary.
This is very specific and customized for Apprise.
Args:
url (str): The URL you want to fully parse.
verify_host (:obj:`bool`, optional): a flag kept with the parsed
URL which some child classes will later use to verify SSL
keys (if SSL transactions take place). Unless under very
specific circumstances, it is strongly recomended that
you leave this default value set to True.
Returns:
A dictionary is returned containing the URL fully parsed if
successful, otherwise None is returned.
"""
results = URLBase.parse_url(
url, verify_host=verify_host, sanitize=sanitize)
if not results:
# We're done; we failed to parse our url
return results
# Allow overriding the default config mime type
if 'mime' in results['qsd']:
results['mimetype'] = results['qsd'].get('mime', '') \
.strip().lower()
# Allow overriding the default file name
if 'name' in results['qsd']:
results['name'] = results['qsd'].get('name', '') \
.strip().lower()
# Our cache value
if 'cache' in results['qsd']:
# First try to get it's integer value
try:
results['cache'] = int(results['qsd']['cache'])
except (ValueError, TypeError):
# No problem, it just isn't an integer; now treat it as a bool
# instead:
results['cache'] = parse_bool(results['qsd']['cache'])
return results
|
(url, verify_host=True, mimetype_db=None, sanitize=True)
|
721,973 |
apprise.url
|
post_process_parse_url_results
|
After parsing the URL, this function applies a bit of extra logic to
support extra entries like `pass` becoming `password`, etc
This function assumes that parse_url() was called previously setting
up the basics to be checked
|
@staticmethod
def post_process_parse_url_results(results):
"""
After parsing the URL, this function applies a bit of extra logic to
support extra entries like `pass` becoming `password`, etc
This function assumes that parse_url() was called previously setting
up the basics to be checked
"""
# if our URL ends with an 's', then assume our secure flag is set.
results['secure'] = (results['schema'][-1] == 's')
# QSD Checking (over-rides all)
qsd_exists = True if isinstance(results.get('qsd'), dict) else False
if qsd_exists and 'verify' in results['qsd']:
# Pulled from URL String
results['verify'] = parse_bool(
results['qsd'].get('verify', True))
elif 'verify' in results:
# Pulled from YAML Configuratoin
results['verify'] = parse_bool(results.get('verify', True))
else:
# Support SSL Certificate 'verify' keyword. Default to being
# enabled
results['verify'] = True
# Password overrides
if 'pass' in results:
results['password'] = results['pass']
del results['pass']
if qsd_exists:
if 'password' in results['qsd']:
results['password'] = results['qsd']['password']
if 'pass' in results['qsd']:
results['password'] = results['qsd']['pass']
# User overrides
if 'user' in results['qsd']:
results['user'] = results['qsd']['user']
# parse_url() always creates a 'password' and 'user' entry in the
# results returned. Entries are set to None if they weren't
# specified
if results['password'] is None and 'user' in results['qsd']:
# Handle cases where the user= provided in 2 locations, we want
# the original to fall back as a being a password (if one
# wasn't otherwise defined) e.g.
# mailtos://PASSWORD@hostname?user=admin@mail-domain.com
# - in the above, the PASSWORD gets lost in the parse url()
# since a user= over-ride is specified.
presults = parse_url(results['url'])
if presults:
# Store our Password
results['password'] = presults['user']
# Store our socket read timeout if specified
if 'rto' in results['qsd']:
results['rto'] = results['qsd']['rto']
# Store our socket connect timeout if specified
if 'cto' in results['qsd']:
results['cto'] = results['qsd']['cto']
if 'port' in results['qsd']:
results['port'] = results['qsd']['port']
return results
|
(results)
|
721,974 |
apprise.url
|
pprint
|
Privacy Print is used to mainpulate the string before passing it into
part of the URL. It is used to mask/hide private details such as
tokens, passwords, apikeys, etc from on-lookers. If the privacy=False
is set, then the quote variable is the next flag checked.
Quoting is never done if the privacy flag is set to true to avoid
skewing the expected output.
|
@staticmethod
def pprint(content, privacy=True, mode=PrivacyMode.Outer,
# privacy print; quoting is ignored when privacy is set to True
quote=True, safe='/', encoding=None, errors=None):
"""
Privacy Print is used to mainpulate the string before passing it into
part of the URL. It is used to mask/hide private details such as
tokens, passwords, apikeys, etc from on-lookers. If the privacy=False
is set, then the quote variable is the next flag checked.
Quoting is never done if the privacy flag is set to true to avoid
skewing the expected output.
"""
if not privacy:
if quote:
# Return quoted string if specified to do so
return URLBase.quote(
content, safe=safe, encoding=encoding, errors=errors)
# Return content 'as-is'
return content
if mode is PrivacyMode.Secret:
# Return 4 Asterisks
return '****'
if not isinstance(content, str) or not content:
# Nothing more to do
return ''
if mode is PrivacyMode.Tail:
# Return the trailing 4 characters
return '...{}'.format(content[-4:])
# Default mode is Outer Mode
return '{}...{}'.format(content[0:1], content[-1:])
|
(content, privacy=True, mode='o', quote=True, safe='/', encoding=None, errors=None)
|
721,975 |
apprise.url
|
quote
|
Replaces single character non-ascii characters and URI specific
ones by their %xx code.
Wrapper to Python's `quote` while remaining compatible with both
Python 2 & 3 since the reference to this function changed between
versions.
Args:
content (str): The URI string you wish to quote
safe (str): non-ascii characters and URI specific ones that you
do not wish to escape (if detected). Setting this
string to an empty one causes everything to be
escaped.
encoding (:obj:`str`, optional): encoding type
errors (:obj:`str`, errors): how to handle invalid character found
in encoded string (defined by encoding)
Returns:
str: The quoted URI string
|
@staticmethod
def quote(content, safe='/', encoding=None, errors=None):
""" Replaces single character non-ascii characters and URI specific
ones by their %xx code.
Wrapper to Python's `quote` while remaining compatible with both
Python 2 & 3 since the reference to this function changed between
versions.
Args:
content (str): The URI string you wish to quote
safe (str): non-ascii characters and URI specific ones that you
do not wish to escape (if detected). Setting this
string to an empty one causes everything to be
escaped.
encoding (:obj:`str`, optional): encoding type
errors (:obj:`str`, errors): how to handle invalid character found
in encoded string (defined by encoding)
Returns:
str: The quoted URI string
"""
if not content:
return ''
return _quote(content, safe=safe, encoding=encoding, errors=errors)
|
(content, safe='/', encoding=None, errors=None)
|
721,976 |
apprise.url
|
schemas
|
A simple function that returns a set of all schemas associated
with this object based on the object.protocol and
object.secure_protocol
|
def schemas(self):
"""A simple function that returns a set of all schemas associated
with this object based on the object.protocol and
object.secure_protocol
"""
schemas = set([])
for key in ('protocol', 'secure_protocol'):
schema = getattr(self, key, None)
if isinstance(schema, str):
schemas.add(schema)
elif isinstance(schema, (set, list, tuple)):
# Support iterables list types
for s in schema:
if isinstance(s, str):
schemas.add(s)
return schemas
|
(self)
|
721,977 |
apprise.url
|
split_path
|
Splits a URL up into a list object.
Parses a specified URL and breaks it into a list.
Args:
path (str): The path to split up into a list.
unquote (:obj:`bool`, optional): call unquote on each element
added to the returned list.
Returns:
list: A list containing all of the elements in the path
|
@staticmethod
def split_path(path, unquote=True):
"""Splits a URL up into a list object.
Parses a specified URL and breaks it into a list.
Args:
path (str): The path to split up into a list.
unquote (:obj:`bool`, optional): call unquote on each element
added to the returned list.
Returns:
list: A list containing all of the elements in the path
"""
try:
paths = PATHSPLIT_LIST_DELIM.split(path.lstrip('/'))
if unquote:
paths = \
[URLBase.unquote(x) for x in filter(bool, paths)]
except AttributeError:
# path is not useable, we still want to gracefully return an
# empty list
paths = []
return paths
|
(path, unquote=True)
|
721,978 |
apprise.url
|
throttle
|
A common throttle control
if a wait is specified, then it will force a sleep of the
specified time if it is larger then the calculated throttle
time.
|
def throttle(self, last_io=None, wait=None):
"""
A common throttle control
if a wait is specified, then it will force a sleep of the
specified time if it is larger then the calculated throttle
time.
"""
if last_io is not None:
# Assume specified last_io
self._last_io_datetime = last_io
# Get ourselves a reference time of 'now'
reference = datetime.now()
if self._last_io_datetime is None:
# Set time to 'now' and no need to throttle
self._last_io_datetime = reference
return
if self.request_rate_per_sec <= 0.0 and not wait:
# We're done if there is no throttle limit set
return
# If we reach here, we need to do additional logic.
# If the difference between the reference time and 'now' is less than
# the defined request_rate_per_sec then we need to throttle for the
# remaining balance of this time.
elapsed = (reference - self._last_io_datetime).total_seconds()
if wait is not None:
self.logger.debug('Throttling forced for {}s...'.format(wait))
time.sleep(wait)
elif elapsed < self.request_rate_per_sec:
self.logger.debug('Throttling for {}s...'.format(
self.request_rate_per_sec - elapsed))
time.sleep(self.request_rate_per_sec - elapsed)
# Update our timestamp before we leave
self._last_io_datetime = datetime.now()
return
|
(self, last_io=None, wait=None)
|
721,979 |
apprise.url
|
unquote
|
Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences.
Wrapper to Python's `unquote` while remaining compatible with both
Python 2 & 3 since the reference to this function changed between
versions.
Note: errors set to 'replace' means that invalid sequences are
replaced by a placeholder character.
Args:
content (str): The quoted URI string you wish to unquote
encoding (:obj:`str`, optional): encoding type
errors (:obj:`str`, errors): how to handle invalid character found
in encoded string (defined by encoding)
Returns:
str: The unquoted URI string
|
@staticmethod
def unquote(content, encoding='utf-8', errors='replace'):
"""
Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences.
Wrapper to Python's `unquote` while remaining compatible with both
Python 2 & 3 since the reference to this function changed between
versions.
Note: errors set to 'replace' means that invalid sequences are
replaced by a placeholder character.
Args:
content (str): The quoted URI string you wish to unquote
encoding (:obj:`str`, optional): encoding type
errors (:obj:`str`, errors): how to handle invalid character found
in encoded string (defined by encoding)
Returns:
str: The unquoted URI string
"""
if not content:
return ''
return _unquote(content, encoding=encoding, errors=errors)
|
(content, encoding='utf-8', errors='replace')
|
721,980 |
apprise.url
|
url
|
Assembles the URL associated with the notification based on the
arguments provied.
|
def url(self, privacy=False, *args, **kwargs):
"""
Assembles the URL associated with the notification based on the
arguments provied.
"""
# Our default parameters
params = self.url_parameters(privacy=privacy, *args, **kwargs)
# Determine Authentication
auth = ''
if self.user and self.password:
auth = '{user}:{password}@'.format(
user=URLBase.quote(self.user, safe=''),
password=self.pprint(
self.password, privacy, mode=PrivacyMode.Secret, safe=''),
)
elif self.user:
auth = '{user}@'.format(
user=URLBase.quote(self.user, safe=''),
)
default_port = 443 if self.secure else 80
return '{schema}://{auth}{hostname}{port}{fullpath}?{params}'.format(
schema='https' if self.secure else 'http',
auth=auth,
# never encode hostname since we're expecting it to be a valid one
hostname=self.host,
port='' if self.port is None or self.port == default_port
else ':{}'.format(self.port),
fullpath=URLBase.quote(self.fullpath, safe='/')
if self.fullpath else '/',
params=URLBase.urlencode(params),
)
|
(self, privacy=False, *args, **kwargs)
|
721,981 |
apprise.url
|
url_parameters
|
Provides a default set of args to work with. This can greatly
simplify URL construction in the acommpanied url() function.
The following property returns a dictionary (of strings) containing
all of the parameters that can be set on a URL and managed through
this class.
|
def url_parameters(self, *args, **kwargs):
"""
Provides a default set of args to work with. This can greatly
simplify URL construction in the acommpanied url() function.
The following property returns a dictionary (of strings) containing
all of the parameters that can be set on a URL and managed through
this class.
"""
return {
# The socket read timeout
'rto': str(self.socket_read_timeout),
# The request/socket connect timeout
'cto': str(self.socket_connect_timeout),
# Certificate verification
'verify': 'yes' if self.verify_certificate else 'no',
}
|
(self, *args, **kwargs)
|
721,982 |
apprise.url
|
urlencode
|
Convert a mapping object or a sequence of two-element tuples
Wrapper to Python's `urlencode` while remaining compatible with both
Python 2 & 3 since the reference to this function changed between
versions.
The resulting string is a series of key=value pairs separated by '&'
characters, where both key and value are quoted using the quote()
function.
Note: If the dictionary entry contains an entry that is set to None
it is not included in the final result set. If you want to
pass in an empty variable, set it to an empty string.
Args:
query (str): The dictionary to encode
doseq (:obj:`bool`, optional): Handle sequences
safe (:obj:`str`): non-ascii characters and URI specific ones that
you do not wish to escape (if detected). Setting this string
to an empty one causes everything to be escaped.
encoding (:obj:`str`, optional): encoding type
errors (:obj:`str`, errors): how to handle invalid character found
in encoded string (defined by encoding)
Returns:
str: The escaped parameters returned as a string
|
@staticmethod
def urlencode(query, doseq=False, safe='', encoding=None, errors=None):
"""Convert a mapping object or a sequence of two-element tuples
Wrapper to Python's `urlencode` while remaining compatible with both
Python 2 & 3 since the reference to this function changed between
versions.
The resulting string is a series of key=value pairs separated by '&'
characters, where both key and value are quoted using the quote()
function.
Note: If the dictionary entry contains an entry that is set to None
it is not included in the final result set. If you want to
pass in an empty variable, set it to an empty string.
Args:
query (str): The dictionary to encode
doseq (:obj:`bool`, optional): Handle sequences
safe (:obj:`str`): non-ascii characters and URI specific ones that
you do not wish to escape (if detected). Setting this string
to an empty one causes everything to be escaped.
encoding (:obj:`str`, optional): encoding type
errors (:obj:`str`, errors): how to handle invalid character found
in encoded string (defined by encoding)
Returns:
str: The escaped parameters returned as a string
"""
return urlencode(
query, doseq=doseq, safe=safe, encoding=encoding, errors=errors)
|
(query, doseq=False, safe='', encoding=None, errors=None)
|
721,983 |
apprise.config.base
|
ConfigBase
|
This is the base class for all supported configuration sources
|
class ConfigBase(URLBase):
"""
This is the base class for all supported configuration sources
"""
# The Default Encoding to use if not otherwise detected
encoding = 'utf-8'
# The default expected configuration format unless otherwise
# detected by the sub-modules
default_config_format = common.ConfigFormat.TEXT
# This is only set if the user overrides the config format on the URL
# this should always initialize itself as None
config_format = None
# Don't read any more of this amount of data into memory as there is no
# reason we should be reading in more. This is more of a safe guard then
# anything else. 128KB (131072B)
max_buffer_size = 131072
# By default all configuration is not includable using the 'include'
# line found in configuration files.
allow_cross_includes = common.ContentIncludeMode.NEVER
# the config path manages the handling of relative include
config_path = os.getcwd()
def __init__(self, cache=True, recursion=0, insecure_includes=False,
**kwargs):
"""
Initialize some general logging and common server arguments that will
keep things consistent when working with the configurations that
inherit this class.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
recursion defines how deep we recursively handle entries that use the
`include` keyword. This keyword requires us to fetch more configuration
from another source and add it to our existing compilation. If the
file we remotely retrieve also has an `include` reference, we will only
advance through it if recursion is set to 2 deep. If set to zero
it is off. There is no limit to how high you set this value. It would
be recommended to keep it low if you do intend to use it.
insecure_include by default are disabled. When set to True, all
Apprise Config files marked to be in STRICT mode are treated as being
in ALWAYS mode.
Take a file:// based configuration for example, only a file:// based
configuration can include another file:// based one. because it is set
to STRICT mode. If an http:// based configuration file attempted to
include a file:// one it woul fail. However this include would be
possible if insecure_includes is set to True.
There are cases where a self hosting apprise developer may wish to load
configuration from memory (in a string format) that contains 'include'
entries (even file:// based ones). In these circumstances if you want
these 'include' entries to be honored, this value must be set to True.
"""
super().__init__(**kwargs)
# Tracks the time the content was last retrieved on. This place a role
# for cases where we are not caching our response and are required to
# re-retrieve our settings.
self._cached_time = None
# Tracks previously loaded content for speed
self._cached_servers = None
# Initialize our recursion value
self.recursion = recursion
# Initialize our insecure_includes flag
self.insecure_includes = insecure_includes
if 'encoding' in kwargs:
# Store the encoding
self.encoding = kwargs.get('encoding')
if 'format' in kwargs \
and isinstance(kwargs['format'], str):
# Store the enforced config format
self.config_format = kwargs.get('format').lower()
if self.config_format not in common.CONFIG_FORMATS:
# Simple error checking
err = 'An invalid config format ({}) was specified.'.format(
self.config_format)
self.logger.warning(err)
raise TypeError(err)
# Set our cache flag; it can be True or a (positive) integer
try:
self.cache = cache if isinstance(cache, bool) else int(cache)
if self.cache < 0:
err = 'A negative cache value ({}) was specified.'.format(
cache)
self.logger.warning(err)
raise TypeError(err)
except (ValueError, TypeError):
err = 'An invalid cache value ({}) was specified.'.format(cache)
self.logger.warning(err)
raise TypeError(err)
return
def servers(self, asset=None, **kwargs):
"""
Performs reads loaded configuration and returns all of the services
that could be parsed and loaded.
"""
if not self.expired():
# We already have cached results to return; use them
return self._cached_servers
# Our cached response object
self._cached_servers = list()
# read() causes the child class to do whatever it takes for the
# config plugin to load the data source and return unparsed content
# None is returned if there was an error or simply no data
content = self.read(**kwargs)
if not isinstance(content, str):
# Set the time our content was cached at
self._cached_time = time.time()
# Nothing more to do; return our empty cache list
return self._cached_servers
# Our Configuration format uses a default if one wasn't one detected
# or enfored.
config_format = \
self.default_config_format \
if self.config_format is None else self.config_format
# Dynamically load our parse_ function based on our config format
fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format))
# Initialize our asset object
asset = asset if isinstance(asset, AppriseAsset) else self.asset
# Execute our config parse function which always returns a tuple
# of our servers and our configuration
servers, configs = fn(content=content, asset=asset)
self._cached_servers.extend(servers)
# Configuration files were detected; recursively populate them
# If we have been configured to do so
for url in configs:
if self.recursion > 0:
# Attempt to acquire the schema at the very least to allow
# our configuration based urls.
schema = GET_SCHEMA_RE.match(url)
if schema is None:
# Plan B is to assume we're dealing with a file
schema = 'file'
if not os.path.isabs(url):
# We're dealing with a relative path; prepend
# our current config path
url = os.path.join(self.config_path, url)
url = '{}://{}'.format(schema, URLBase.quote(url))
else:
# Ensure our schema is always in lower case
schema = schema.group('schema').lower()
# Some basic validation
if schema not in C_MGR:
ConfigBase.logger.warning(
'Unsupported include schema {}.'.format(schema))
continue
# CWE-312 (Secure Logging) Handling
loggable_url = url if not asset.secure_logging \
else cwe312_url(url)
# Parse our url details of the server object as dictionary
# containing all of the information parsed from our URL
results = C_MGR[schema].parse_url(url)
if not results:
# Failed to parse the server URL
self.logger.warning(
'Unparseable include URL {}'.format(loggable_url))
continue
# Handle cross inclusion based on allow_cross_includes rules
if (C_MGR[schema].allow_cross_includes ==
common.ContentIncludeMode.STRICT
and schema not in self.schemas()
and not self.insecure_includes) or C_MGR[schema] \
.allow_cross_includes == \
common.ContentIncludeMode.NEVER:
# Prevent the loading if insecure base protocols
ConfigBase.logger.warning(
'Including {}:// based configuration is prohibited. '
'Ignoring URL {}'.format(schema, loggable_url))
continue
# Prepare our Asset Object
results['asset'] = asset
# No cache is required because we're just lumping this in
# and associating it with the cache value we've already
# declared (prior to our recursion)
results['cache'] = False
# Recursion can never be parsed from the URL; we decrement
# it one level
results['recursion'] = self.recursion - 1
# Insecure Includes flag can never be parsed from the URL
results['insecure_includes'] = self.insecure_includes
try:
# Attempt to create an instance of our plugin using the
# parsed URL information
cfg_plugin = C_MGR[results['schema']](**results)
except Exception as e:
# the arguments are invalid or can not be used.
self.logger.warning(
'Could not load include URL: {}'.format(loggable_url))
self.logger.debug('Loading Exception: {}'.format(str(e)))
continue
# if we reach here, we can now add this servers found
# in this configuration file to our list
self._cached_servers.extend(
cfg_plugin.servers(asset=asset))
# We no longer need our configuration object
del cfg_plugin
else:
# CWE-312 (Secure Logging) Handling
loggable_url = url if not asset.secure_logging \
else cwe312_url(url)
self.logger.debug(
'Recursion limit reached; ignoring Include URL: %s',
loggable_url)
if self._cached_servers:
self.logger.info(
'Loaded {} entries from {}'.format(
len(self._cached_servers),
self.url(privacy=asset.secure_logging)))
else:
self.logger.warning(
'Failed to load Apprise configuration from {}'.format(
self.url(privacy=asset.secure_logging)))
# Set the time our content was cached at
self._cached_time = time.time()
return self._cached_servers
def read(self):
"""
This object should be implimented by the child classes
"""
return None
def expired(self):
"""
Simply returns True if the configuration should be considered
as expired or False if content should be retrieved.
"""
if isinstance(self._cached_servers, list) and self.cache:
# We have enough reason to look further into our cached content
# and verify it has not expired.
if self.cache is True:
# we have not expired, return False
return False
# Verify our cache time to determine whether we will get our
# content again.
age_in_sec = time.time() - self._cached_time
if age_in_sec <= self.cache:
# We have not expired; return False
return False
# If we reach here our configuration should be considered
# missing and/or expired.
return True
@staticmethod
def __normalize_tag_groups(group_tags):
"""
Used to normalize a tag assign map which looks like:
{
'group': set('{tag1}', '{group1}', '{tag2}'),
'group1': set('{tag2}','{tag3}'),
}
Then normalized it (merging groups); with respect to the above, the
output would be:
{
'group': set('{tag1}', '{tag2}', '{tag3}),
'group1': set('{tag2}','{tag3}'),
}
"""
# Prepare a key set list we can use
tag_groups = set([str(x) for x in group_tags.keys()])
def _expand(tags, ignore=None):
"""
Expands based on tag provided and returns a set
this also updates the group_tags while it goes
"""
# Prepare ourselves a return set
results = set()
ignore = set() if ignore is None else ignore
# track groups
groups = set()
for tag in tags:
if tag in ignore:
continue
# Track our groups
groups.add(tag)
# Store what we know is worth keeping
if tag not in group_tags: # pragma: no cover
# handle cases where the tag doesn't exist
group_tags[tag] = set()
results |= group_tags[tag] - tag_groups
# Get simple tag assignments
found = group_tags[tag] & tag_groups
if not found:
continue
for gtag in found:
if gtag in ignore:
continue
# Go deeper (recursion)
ignore.add(tag)
group_tags[gtag] = _expand(set([gtag]), ignore=ignore)
results |= group_tags[gtag]
# Pop ignore
ignore.remove(tag)
return results
for tag in tag_groups:
# Get our tags
group_tags[tag] |= _expand(set([tag]))
if not group_tags[tag]:
ConfigBase.logger.warning(
'The group {} has no tags assigned to it'.format(tag))
del group_tags[tag]
@staticmethod
def parse_url(url, verify_host=True):
"""Parses the URL and returns it broken apart into a dictionary.
This is very specific and customized for Apprise.
Args:
url (str): The URL you want to fully parse.
verify_host (:obj:`bool`, optional): a flag kept with the parsed
URL which some child classes will later use to verify SSL
keys (if SSL transactions take place). Unless under very
specific circumstances, it is strongly recomended that
you leave this default value set to True.
Returns:
A dictionary is returned containing the URL fully parsed if
successful, otherwise None is returned.
"""
results = URLBase.parse_url(url, verify_host=verify_host)
if not results:
# We're done; we failed to parse our url
return results
# Allow overriding the default config format
if 'format' in results['qsd']:
results['format'] = results['qsd'].get('format')
if results['format'] not in common.CONFIG_FORMATS:
URLBase.logger.warning(
'Unsupported format specified {}'.format(
results['format']))
del results['format']
# Defines the encoding of the payload
if 'encoding' in results['qsd']:
results['encoding'] = results['qsd'].get('encoding')
# Our cache value
if 'cache' in results['qsd']:
# First try to get it's integer value
try:
results['cache'] = int(results['qsd']['cache'])
except (ValueError, TypeError):
# No problem, it just isn't an integer; now treat it as a bool
# instead:
results['cache'] = parse_bool(results['qsd']['cache'])
return results
@staticmethod
def detect_config_format(content, **kwargs):
"""
Takes the specified content and attempts to detect the format type
The function returns the actual format type if detected, otherwise
it returns None
"""
# Detect Format Logic:
# - A pound/hashtag (#) is alawys a comment character so we skip over
# lines matched here.
# - Detection begins on the first non-comment and non blank line
# matched.
# - If we find a string followed by a colon, we know we're dealing
# with a YAML file.
# - If we find a string that starts with a URL, or our tag
# definitions (accepting commas) followed by an equal sign we know
# we're dealing with a TEXT format.
# Define what a valid line should look like
valid_line_re = re.compile(
r'^\s*(?P<line>([;#]+(?P<comment>.*))|'
r'(?P<text>((?P<tag>[ \t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|'
r'((?P<yaml>[a-z0-9]+):.*))?$', re.I)
try:
# split our content up to read line by line
content = re.split(r'\r*\n', content)
except TypeError:
# content was not expected string type
ConfigBase.logger.error(
'Invalid Apprise configuration specified.')
return None
# By default set our return value to None since we don't know
# what the format is yet
config_format = None
# iterate over each line of the file to attempt to detect it
# stop the moment a the type has been determined
for line, entry in enumerate(content, start=1):
result = valid_line_re.match(entry)
if not result:
# Invalid syntax
ConfigBase.logger.error(
'Undetectable Apprise configuration found '
'based on line {}.'.format(line))
# Take an early exit
return None
# Attempt to detect configuration
if result.group('yaml'):
config_format = common.ConfigFormat.YAML
ConfigBase.logger.debug(
'Detected YAML configuration '
'based on line {}.'.format(line))
break
elif result.group('text'):
config_format = common.ConfigFormat.TEXT
ConfigBase.logger.debug(
'Detected TEXT configuration '
'based on line {}.'.format(line))
break
# If we reach here, we have a comment entry
# Adjust default format to TEXT
config_format = common.ConfigFormat.TEXT
return config_format
@staticmethod
def config_parse(content, asset=None, config_format=None, **kwargs):
"""
Takes the specified config content and loads it based on the specified
config_format. If a format isn't specified, then it is auto detected.
"""
if config_format is None:
# Detect the format
config_format = ConfigBase.detect_config_format(content)
if not config_format:
# We couldn't detect configuration
ConfigBase.logger.error('Could not detect configuration')
return (list(), list())
if config_format not in common.CONFIG_FORMATS:
# Invalid configuration type specified
ConfigBase.logger.error(
'An invalid configuration format ({}) was specified'.format(
config_format))
return (list(), list())
# Dynamically load our parse_ function based on our config format
fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format))
# Execute our config parse function which always returns a list
return fn(content=content, asset=asset)
@staticmethod
def config_parse_text(content, asset=None):
"""
Parse the specified content as though it were a simple text file only
containing a list of URLs.
Return a tuple that looks like (servers, configs) where:
- servers contains a list of loaded notification plugins
- configs contains a list of additional configuration files
referenced.
You may also optionally associate an asset with the notification.
The file syntax is:
#
# pound/hashtag allow for line comments
#
# One or more tags can be idenified using comma's (,) to separate
# them.
<Tag(s)>=<URL>
# Or you can use this format (no tags associated)
<URL>
# you can also use the keyword 'include' and identify a
# configuration location (like this file) which will be included
# as additional configuration entries when loaded.
include <ConfigURL>
# Assign tag contents to a group identifier
<Group(s)>=<Tag(s)>
"""
# A list of loaded Notification Services
servers = list()
# A list of additional configuration files referenced using
# the include keyword
configs = list()
# Track all of the tags we want to assign later on
group_tags = {}
# Track our entries to preload
preloaded = []
# Prepare our Asset Object
asset = asset if isinstance(asset, AppriseAsset) else AppriseAsset()
# Define what a valid line should look like
valid_line_re = re.compile(
r'^\s*(?P<line>([;#]+(?P<comment>.*))|'
r'(\s*(?P<tags>[a-z0-9, \t_-]+)\s*=|=)?\s*'
r'((?P<url>[a-z0-9]{1,12}://.*)|(?P<assign>[a-z0-9, \t_-]+))|'
r'include\s+(?P<config>.+))?\s*$', re.I)
try:
# split our content up to read line by line
content = re.split(r'\r*\n', content)
except TypeError:
# content was not expected string type
ConfigBase.logger.error(
'Invalid Apprise TEXT based configuration specified.')
return (list(), list())
for line, entry in enumerate(content, start=1):
result = valid_line_re.match(entry)
if not result:
# Invalid syntax
ConfigBase.logger.error(
'Invalid Apprise TEXT configuration format found '
'{} on line {}.'.format(entry, line))
# Assume this is a file we shouldn't be parsing. It's owner
# can read the error printed to screen and take action
# otherwise.
return (list(), list())
# Retrieve our line
url, assign, config = \
result.group('url'), \
result.group('assign'), \
result.group('config')
if not (url or config or assign):
# Comment/empty line; do nothing
continue
if config:
# CWE-312 (Secure Logging) Handling
loggable_url = config if not asset.secure_logging \
else cwe312_url(config)
ConfigBase.logger.debug(
'Include URL: {}'.format(loggable_url))
# Store our include line
configs.append(config.strip())
continue
# CWE-312 (Secure Logging) Handling
loggable_url = url if not asset.secure_logging \
else cwe312_url(url)
if assign:
groups = set(parse_list(result.group('tags'), cast=str))
if not groups:
# no tags were assigned
ConfigBase.logger.warning(
'Unparseable tag assignment - no group(s) '
'on line {}'.format(line))
continue
# Get our tags
tags = set(parse_list(assign, cast=str))
if not tags:
# no tags were assigned
ConfigBase.logger.warning(
'Unparseable tag assignment - no tag(s) to assign '
'on line {}'.format(line))
continue
# Update our tag group map
for tag_group in groups:
if tag_group not in group_tags:
group_tags[tag_group] = set()
# ensure our tag group is never included in the assignment
group_tags[tag_group] |= tags - set([tag_group])
continue
# Acquire our url tokens
results = plugins.url_to_dict(
url, secure_logging=asset.secure_logging)
if results is None:
# Failed to parse the server URL
ConfigBase.logger.warning(
'Unparseable URL {} on line {}.'.format(
loggable_url, line))
continue
# Build a list of tags to associate with the newly added
# notifications if any were set
results['tag'] = set(parse_list(result.group('tags'), cast=str))
# Set our Asset Object
results['asset'] = asset
# Store our preloaded entries
preloaded.append({
'results': results,
'line': line,
'loggable_url': loggable_url,
})
#
# Normalize Tag Groups
# - Expand Groups of Groups so that they don't exist
#
ConfigBase.__normalize_tag_groups(group_tags)
#
# URL Processing
#
for entry in preloaded:
# Point to our results entry for easier reference below
results = entry['results']
#
# Apply our tag groups if they're defined
#
for group, tags in group_tags.items():
# Detect if anything assigned to this tag also maps back to a
# group. If so we want to add the group to our list
if next((True for tag in results['tag']
if tag in tags), False):
results['tag'].add(group)
try:
# Attempt to create an instance of our plugin using the
# parsed URL information
plugin = N_MGR[results['schema']](**results)
# Create log entry of loaded URL
ConfigBase.logger.debug(
'Loaded URL: %s', plugin.url(
privacy=results['asset'].secure_logging))
except Exception as e:
# the arguments are invalid or can not be used.
ConfigBase.logger.warning(
'Could not load URL {} on line {}.'.format(
entry['loggable_url'], entry['line']))
ConfigBase.logger.debug('Loading Exception: %s' % str(e))
continue
# if we reach here, we successfully loaded our data
servers.append(plugin)
# Return what was loaded
return (servers, configs)
@staticmethod
def config_parse_yaml(content, asset=None):
"""
Parse the specified content as though it were a yaml file
specifically formatted for Apprise.
Return a tuple that looks like (servers, configs) where:
- servers contains a list of loaded notification plugins
- configs contains a list of additional configuration files
referenced.
You may optionally associate an asset with the notification.
"""
# A list of loaded Notification Services
servers = list()
# A list of additional configuration files referenced using
# the include keyword
configs = list()
# Group Assignments
group_tags = {}
# Track our entries to preload
preloaded = []
try:
# Load our data (safely)
result = yaml.load(content, Loader=yaml.SafeLoader)
except (AttributeError,
yaml.parser.ParserError,
yaml.error.MarkedYAMLError) as e:
# Invalid content
ConfigBase.logger.error(
'Invalid Apprise YAML data specified.')
ConfigBase.logger.debug(
'YAML Exception:{}{}'.format(os.linesep, e))
return (list(), list())
if not isinstance(result, dict):
# Invalid content
ConfigBase.logger.error(
'Invalid Apprise YAML based configuration specified.')
return (list(), list())
# YAML Version
version = result.get('version', 1)
if version != 1:
# Invalid syntax
ConfigBase.logger.error(
'Invalid Apprise YAML version specified {}.'.format(version))
return (list(), list())
#
# global asset object
#
asset = asset if isinstance(asset, AppriseAsset) else AppriseAsset()
tokens = result.get('asset', None)
if tokens and isinstance(tokens, dict):
for k, v in tokens.items():
if k.startswith('_') or k.endswith('_'):
# Entries are considered reserved if they start or end
# with an underscore
ConfigBase.logger.warning(
'Ignored asset key "{}".'.format(k))
continue
if not (hasattr(asset, k) and
isinstance(getattr(asset, k),
(bool, str))):
# We can't set a function or non-string set value
ConfigBase.logger.warning(
'Invalid asset key "{}".'.format(k))
continue
if v is None:
# Convert to an empty string
v = ''
if (isinstance(v, (bool, str))
and isinstance(getattr(asset, k), bool)):
# If the object in the Asset is a boolean, then
# we want to convert the specified string to
# match that.
setattr(asset, k, parse_bool(v))
elif isinstance(v, str):
# Set our asset object with the new value
setattr(asset, k, v.strip())
else:
# we must set strings with a string
ConfigBase.logger.warning(
'Invalid asset value to "{}".'.format(k))
continue
#
# global tag root directive
#
global_tags = set()
tags = result.get('tag', None)
if tags and isinstance(tags, (list, tuple, str)):
# Store any preset tags
global_tags = set(parse_list(tags, cast=str))
#
# groups root directive
#
groups = result.get('groups', None)
if isinstance(groups, dict):
#
# Dictionary
#
for _groups, tags in groups.items():
for group in parse_list(_groups, cast=str):
if isinstance(tags, (list, tuple)):
_tags = set()
for e in tags:
if isinstance(e, dict):
_tags |= set(e.keys())
else:
_tags |= set(parse_list(e, cast=str))
# Final assignment
tags = _tags
else:
tags = set(parse_list(tags, cast=str))
if group not in group_tags:
group_tags[group] = tags
else:
group_tags[group] |= tags
elif isinstance(groups, (list, tuple)):
#
# List of Dictionaries
#
# Iterate over each group defined and store it
for no, entry in enumerate(groups):
if not isinstance(entry, dict):
ConfigBase.logger.warning(
'No assignment for group {}, entry #{}'.format(
entry, no + 1))
continue
for _groups, tags in entry.items():
for group in parse_list(_groups, cast=str):
if isinstance(tags, (list, tuple)):
_tags = set()
for e in tags:
if isinstance(e, dict):
_tags |= set(e.keys())
else:
_tags |= set(parse_list(e, cast=str))
# Final assignment
tags = _tags
else:
tags = set(parse_list(tags, cast=str))
if group not in group_tags:
group_tags[group] = tags
else:
group_tags[group] |= tags
# include root directive
#
includes = result.get('include', None)
if isinstance(includes, str):
# Support a single inline string or multiple ones separated by a
# comma and/or space
includes = parse_urls(includes)
elif not isinstance(includes, (list, tuple)):
# Not a problem; we simply have no includes
includes = list()
# Iterate over each config URL
for no, url in enumerate(includes):
if isinstance(url, str):
# Support a single inline string or multiple ones separated by
# a comma and/or space
configs.extend(parse_urls(url))
elif isinstance(url, dict):
# Store the url and ignore arguments associated
configs.extend(u for u in url.keys())
#
# urls root directive
#
urls = result.get('urls', None)
if not isinstance(urls, (list, tuple)):
# Not a problem; we simply have no urls
urls = list()
# Iterate over each URL
for no, url in enumerate(urls):
# Our results object is what we use to instantiate our object if
# we can. Reset it to None on each iteration
results = list()
# CWE-312 (Secure Logging) Handling
loggable_url = url if not asset.secure_logging \
else cwe312_url(url)
if isinstance(url, str):
# We're just a simple URL string...
schema = GET_SCHEMA_RE.match(url)
if schema is None:
# Log invalid entries so that maintainer of config
# config file at least has something to take action
# with.
ConfigBase.logger.warning(
'Invalid URL {}, entry #{}'.format(
loggable_url, no + 1))
continue
# We found a valid schema worthy of tracking; store it's
# details:
_results = plugins.url_to_dict(
url, secure_logging=asset.secure_logging)
if _results is None:
ConfigBase.logger.warning(
'Unparseable URL {}, entry #{}'.format(
loggable_url, no + 1))
continue
# add our results to our global set
results.append(_results)
elif isinstance(url, dict):
# We are a url string with additional unescaped options. In
# this case we want to iterate over all of our options so we
# can at least tell the end user what entries were ignored
# due to errors
it = iter(url.items())
# Track the URL to-load
_url = None
# Track last acquired schema
schema = None
for key, tokens in it:
# Test our schema
_schema = GET_SCHEMA_RE.match(key)
if _schema is None:
# Log invalid entries so that maintainer of config
# config file at least has something to take action
# with.
ConfigBase.logger.warning(
'Ignored entry {} found under urls, entry #{}'
.format(key, no + 1))
continue
# Store our schema
schema = _schema.group('schema').lower()
# Store our URL and Schema Regex
_url = key
if _url is None:
# the loop above failed to match anything
ConfigBase.logger.warning(
'Unsupported URL, entry #{}'.format(no + 1))
continue
_results = plugins.url_to_dict(
_url, secure_logging=asset.secure_logging)
if _results is None:
# Setup dictionary
_results = {
# Minimum requirements
'schema': schema,
}
if isinstance(tokens, (list, tuple, set)):
# populate and/or override any results populated by
# parse_url()
for entries in tokens:
# Copy ourselves a template of our parsed URL as a base
# to work with
r = _results.copy()
# We are a url string with additional unescaped options
if isinstance(entries, dict):
_url, tokens = next(iter(url.items()))
# Tags you just can't over-ride
if 'schema' in entries:
del entries['schema']
# support our special tokens (if they're present)
if schema in N_MGR:
entries = ConfigBase._special_token_handler(
schema, entries)
# Extend our dictionary with our new entries
r.update(entries)
# add our results to our global set
results.append(r)
elif isinstance(tokens, dict):
# support our special tokens (if they're present)
if schema in N_MGR:
tokens = ConfigBase._special_token_handler(
schema, tokens)
# Copy ourselves a template of our parsed URL as a base to
# work with
r = _results.copy()
# add our result set
r.update(tokens)
# add our results to our global set
results.append(r)
else:
# add our results to our global set
results.append(_results)
else:
# Unsupported
ConfigBase.logger.warning(
'Unsupported Apprise YAML entry #{}'.format(no + 1))
continue
# Track our entries
entry = 0
while len(results):
# Increment our entry count
entry += 1
# Grab our first item
_results = results.pop(0)
if _results['schema'] not in N_MGR:
# the arguments are invalid or can not be used.
ConfigBase.logger.warning(
'An invalid Apprise schema ({}) in YAML configuration '
'entry #{}, item #{}'
.format(_results['schema'], no + 1, entry))
continue
# tag is a special keyword that is managed by Apprise object.
# The below ensures our tags are set correctly
if 'tag' in _results:
# Tidy our list up
_results['tag'] = set(
parse_list(_results['tag'], cast=str)) | global_tags
else:
# Just use the global settings
_results['tag'] = global_tags
for key in list(_results.keys()):
# Strip out any tokens we know that we can't accept and
# warn the user
match = VALID_TOKEN.match(key)
if not match:
ConfigBase.logger.warning(
'Ignoring invalid token ({}) found in YAML '
'configuration entry #{}, item #{}'
.format(key, no + 1, entry))
del _results[key]
ConfigBase.logger.trace(
'URL #{}: {} unpacked as:{}{}'
.format(no + 1, url, os.linesep, os.linesep.join(
['{}="{}"'.format(k, a)
for k, a in _results.items()])))
# Prepare our Asset Object
_results['asset'] = asset
# Handle post processing of result set
_results = URLBase.post_process_parse_url_results(_results)
# Store our preloaded entries
preloaded.append({
'results': _results,
'entry': no + 1,
'item': entry,
})
#
# Normalize Tag Groups
# - Expand Groups of Groups so that they don't exist
#
ConfigBase.__normalize_tag_groups(group_tags)
#
# URL Processing
#
for entry in preloaded:
# Point to our results entry for easier reference below
results = entry['results']
#
# Apply our tag groups if they're defined
#
for group, tags in group_tags.items():
# Detect if anything assigned to this tag also maps back to a
# group. If so we want to add the group to our list
if next((True for tag in results['tag']
if tag in tags), False):
results['tag'].add(group)
# Now we generate our plugin
try:
# Attempt to create an instance of our plugin using the
# parsed URL information
plugin = N_MGR[results['schema']](**results)
# Create log entry of loaded URL
ConfigBase.logger.debug(
'Loaded URL: %s', plugin.url(
privacy=results['asset'].secure_logging))
except Exception as e:
# the arguments are invalid or can not be used.
ConfigBase.logger.warning(
'Could not load Apprise YAML configuration '
'entry #{}, item #{}'
.format(entry['entry'], entry['item']))
ConfigBase.logger.debug('Loading Exception: %s' % str(e))
continue
# if we reach here, we successfully loaded our data
servers.append(plugin)
return (servers, configs)
def pop(self, index=-1):
"""
Removes an indexed Notification Service from the stack and returns it.
By default, the last element of the list is removed.
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
# Pop the element off of the stack
return self._cached_servers.pop(index)
@staticmethod
def _special_token_handler(schema, tokens):
"""
This function takes a list of tokens and updates them to no longer
include any special tokens such as +,-, and :
- schema must be a valid schema of a supported plugin type
- tokens must be a dictionary containing the yaml entries parsed.
The idea here is we can post process a set of tokens provided in
a YAML file where the user provided some of the special keywords.
We effectivley look up what these keywords map to their appropriate
value they're expected
"""
# Create a copy of our dictionary
tokens = tokens.copy()
for kw, meta in N_MGR[schema].template_kwargs.items():
# Determine our prefix:
prefix = meta.get('prefix', '+')
# Detect any matches
matches = \
{k[1:]: str(v) for k, v in tokens.items()
if k.startswith(prefix)}
if not matches:
# we're done with this entry
continue
if not isinstance(tokens.get(kw), dict):
# Invalid; correct it
tokens[kw] = dict()
# strip out processed tokens
tokens = {k: v for k, v in tokens.items()
if not k.startswith(prefix)}
# Update our entries
tokens[kw].update(matches)
# Now map our tokens accordingly to the class templates defined by
# each service.
#
# This is specifically used for YAML file parsing. It allows a user to
# define an entry such as:
#
# urls:
# - mailto://user:pass@domain:
# - to: user1@hotmail.com
# - to: user2@hotmail.com
#
# Under the hood, the NotifyEmail() class does not parse the `to`
# argument. It's contents needs to be mapped to `targets`. This is
# defined in the class via the `template_args` and template_tokens`
# section.
#
# This function here allows these mappings to take place within the
# YAML file as independant arguments.
class_templates = plugins.details(N_MGR[schema])
for key in list(tokens.keys()):
if key not in class_templates['args']:
# No need to handle non-arg entries
continue
# get our `map_to` and/or 'alias_of' value (if it exists)
map_to = class_templates['args'][key].get(
'alias_of', class_templates['args'][key].get('map_to', ''))
if map_to == key:
# We're already good as we are now
continue
if map_to in class_templates['tokens']:
meta = class_templates['tokens'][map_to]
else:
meta = class_templates['args'].get(
map_to, class_templates['args'][key])
# Perform a translation/mapping if our code reaches here
value = tokens[key]
del tokens[key]
# Detect if we're dealign with a list or not
is_list = re.search(
r'^list:.*',
meta.get('type'),
re.IGNORECASE)
if map_to not in tokens:
tokens[map_to] = [] if is_list \
else meta.get('default')
elif is_list and not isinstance(tokens.get(map_to), list):
# Convert ourselves to a list if we aren't already
tokens[map_to] = [tokens[map_to]]
# Type Conversion
if re.search(
r'^(choice:)?string',
meta.get('type'),
re.IGNORECASE) \
and not isinstance(value, str):
# Ensure our format is as expected
value = str(value)
# Apply any further translations if required (absolute map)
# This is the case when an arg maps to a token which further
# maps to a different function arg on the class constructor
abs_map = meta.get('map_to', map_to)
# Set our token as how it was provided by the configuration
if isinstance(tokens.get(map_to), list):
tokens[abs_map].append(value)
else:
tokens[abs_map] = value
# Return our tokens
return tokens
def __getitem__(self, index):
"""
Returns the indexed server entry associated with the loaded
notification servers
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return self._cached_servers[index]
def __iter__(self):
"""
Returns an iterator to our server list
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return iter(self._cached_servers)
def __len__(self):
"""
Returns the total number of servers loaded
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return len(self._cached_servers)
def __bool__(self):
"""
Allows the Apprise object to be wrapped in an 'if statement'.
True is returned if our content was downloaded correctly.
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return True if self._cached_servers else False
|
(cache=True, recursion=0, insecure_includes=False, **kwargs)
|
721,984 |
apprise.config.base
|
__normalize_tag_groups
|
Used to normalize a tag assign map which looks like:
{
'group': set('{tag1}', '{group1}', '{tag2}'),
'group1': set('{tag2}','{tag3}'),
}
Then normalized it (merging groups); with respect to the above, the
output would be:
{
'group': set('{tag1}', '{tag2}', '{tag3}),
'group1': set('{tag2}','{tag3}'),
}
|
@staticmethod
def __normalize_tag_groups(group_tags):
"""
Used to normalize a tag assign map which looks like:
{
'group': set('{tag1}', '{group1}', '{tag2}'),
'group1': set('{tag2}','{tag3}'),
}
Then normalized it (merging groups); with respect to the above, the
output would be:
{
'group': set('{tag1}', '{tag2}', '{tag3}),
'group1': set('{tag2}','{tag3}'),
}
"""
# Prepare a key set list we can use
tag_groups = set([str(x) for x in group_tags.keys()])
def _expand(tags, ignore=None):
"""
Expands based on tag provided and returns a set
this also updates the group_tags while it goes
"""
# Prepare ourselves a return set
results = set()
ignore = set() if ignore is None else ignore
# track groups
groups = set()
for tag in tags:
if tag in ignore:
continue
# Track our groups
groups.add(tag)
# Store what we know is worth keeping
if tag not in group_tags: # pragma: no cover
# handle cases where the tag doesn't exist
group_tags[tag] = set()
results |= group_tags[tag] - tag_groups
# Get simple tag assignments
found = group_tags[tag] & tag_groups
if not found:
continue
for gtag in found:
if gtag in ignore:
continue
# Go deeper (recursion)
ignore.add(tag)
group_tags[gtag] = _expand(set([gtag]), ignore=ignore)
results |= group_tags[gtag]
# Pop ignore
ignore.remove(tag)
return results
for tag in tag_groups:
# Get our tags
group_tags[tag] |= _expand(set([tag]))
if not group_tags[tag]:
ConfigBase.logger.warning(
'The group {} has no tags assigned to it'.format(tag))
del group_tags[tag]
|
(group_tags)
|
721,985 |
apprise.config.base
|
__bool__
|
Allows the Apprise object to be wrapped in an 'if statement'.
True is returned if our content was downloaded correctly.
|
def __bool__(self):
"""
Allows the Apprise object to be wrapped in an 'if statement'.
True is returned if our content was downloaded correctly.
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return True if self._cached_servers else False
|
(self)
|
721,987 |
apprise.config.base
|
__getitem__
|
Returns the indexed server entry associated with the loaded
notification servers
|
def __getitem__(self, index):
"""
Returns the indexed server entry associated with the loaded
notification servers
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return self._cached_servers[index]
|
(self, index)
|
721,988 |
apprise.config.base
|
__init__
|
Initialize some general logging and common server arguments that will
keep things consistent when working with the configurations that
inherit this class.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
recursion defines how deep we recursively handle entries that use the
`include` keyword. This keyword requires us to fetch more configuration
from another source and add it to our existing compilation. If the
file we remotely retrieve also has an `include` reference, we will only
advance through it if recursion is set to 2 deep. If set to zero
it is off. There is no limit to how high you set this value. It would
be recommended to keep it low if you do intend to use it.
insecure_include by default are disabled. When set to True, all
Apprise Config files marked to be in STRICT mode are treated as being
in ALWAYS mode.
Take a file:// based configuration for example, only a file:// based
configuration can include another file:// based one. because it is set
to STRICT mode. If an http:// based configuration file attempted to
include a file:// one it woul fail. However this include would be
possible if insecure_includes is set to True.
There are cases where a self hosting apprise developer may wish to load
configuration from memory (in a string format) that contains 'include'
entries (even file:// based ones). In these circumstances if you want
these 'include' entries to be honored, this value must be set to True.
|
def __init__(self, cache=True, recursion=0, insecure_includes=False,
**kwargs):
"""
Initialize some general logging and common server arguments that will
keep things consistent when working with the configurations that
inherit this class.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
recursion defines how deep we recursively handle entries that use the
`include` keyword. This keyword requires us to fetch more configuration
from another source and add it to our existing compilation. If the
file we remotely retrieve also has an `include` reference, we will only
advance through it if recursion is set to 2 deep. If set to zero
it is off. There is no limit to how high you set this value. It would
be recommended to keep it low if you do intend to use it.
insecure_include by default are disabled. When set to True, all
Apprise Config files marked to be in STRICT mode are treated as being
in ALWAYS mode.
Take a file:// based configuration for example, only a file:// based
configuration can include another file:// based one. because it is set
to STRICT mode. If an http:// based configuration file attempted to
include a file:// one it woul fail. However this include would be
possible if insecure_includes is set to True.
There are cases where a self hosting apprise developer may wish to load
configuration from memory (in a string format) that contains 'include'
entries (even file:// based ones). In these circumstances if you want
these 'include' entries to be honored, this value must be set to True.
"""
super().__init__(**kwargs)
# Tracks the time the content was last retrieved on. This place a role
# for cases where we are not caching our response and are required to
# re-retrieve our settings.
self._cached_time = None
# Tracks previously loaded content for speed
self._cached_servers = None
# Initialize our recursion value
self.recursion = recursion
# Initialize our insecure_includes flag
self.insecure_includes = insecure_includes
if 'encoding' in kwargs:
# Store the encoding
self.encoding = kwargs.get('encoding')
if 'format' in kwargs \
and isinstance(kwargs['format'], str):
# Store the enforced config format
self.config_format = kwargs.get('format').lower()
if self.config_format not in common.CONFIG_FORMATS:
# Simple error checking
err = 'An invalid config format ({}) was specified.'.format(
self.config_format)
self.logger.warning(err)
raise TypeError(err)
# Set our cache flag; it can be True or a (positive) integer
try:
self.cache = cache if isinstance(cache, bool) else int(cache)
if self.cache < 0:
err = 'A negative cache value ({}) was specified.'.format(
cache)
self.logger.warning(err)
raise TypeError(err)
except (ValueError, TypeError):
err = 'An invalid cache value ({}) was specified.'.format(cache)
self.logger.warning(err)
raise TypeError(err)
return
|
(self, cache=True, recursion=0, insecure_includes=False, **kwargs)
|
721,989 |
apprise.config.base
|
__iter__
|
Returns an iterator to our server list
|
def __iter__(self):
"""
Returns an iterator to our server list
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return iter(self._cached_servers)
|
(self)
|
721,990 |
apprise.config.base
|
__len__
|
Returns the total number of servers loaded
|
def __len__(self):
"""
Returns the total number of servers loaded
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return len(self._cached_servers)
|
(self)
|
721,992 |
apprise.config.base
|
_special_token_handler
|
This function takes a list of tokens and updates them to no longer
include any special tokens such as +,-, and :
- schema must be a valid schema of a supported plugin type
- tokens must be a dictionary containing the yaml entries parsed.
The idea here is we can post process a set of tokens provided in
a YAML file where the user provided some of the special keywords.
We effectivley look up what these keywords map to their appropriate
value they're expected
|
@staticmethod
def _special_token_handler(schema, tokens):
"""
This function takes a list of tokens and updates them to no longer
include any special tokens such as +,-, and :
- schema must be a valid schema of a supported plugin type
- tokens must be a dictionary containing the yaml entries parsed.
The idea here is we can post process a set of tokens provided in
a YAML file where the user provided some of the special keywords.
We effectivley look up what these keywords map to their appropriate
value they're expected
"""
# Create a copy of our dictionary
tokens = tokens.copy()
for kw, meta in N_MGR[schema].template_kwargs.items():
# Determine our prefix:
prefix = meta.get('prefix', '+')
# Detect any matches
matches = \
{k[1:]: str(v) for k, v in tokens.items()
if k.startswith(prefix)}
if not matches:
# we're done with this entry
continue
if not isinstance(tokens.get(kw), dict):
# Invalid; correct it
tokens[kw] = dict()
# strip out processed tokens
tokens = {k: v for k, v in tokens.items()
if not k.startswith(prefix)}
# Update our entries
tokens[kw].update(matches)
# Now map our tokens accordingly to the class templates defined by
# each service.
#
# This is specifically used for YAML file parsing. It allows a user to
# define an entry such as:
#
# urls:
# - mailto://user:pass@domain:
# - to: user1@hotmail.com
# - to: user2@hotmail.com
#
# Under the hood, the NotifyEmail() class does not parse the `to`
# argument. It's contents needs to be mapped to `targets`. This is
# defined in the class via the `template_args` and template_tokens`
# section.
#
# This function here allows these mappings to take place within the
# YAML file as independant arguments.
class_templates = plugins.details(N_MGR[schema])
for key in list(tokens.keys()):
if key not in class_templates['args']:
# No need to handle non-arg entries
continue
# get our `map_to` and/or 'alias_of' value (if it exists)
map_to = class_templates['args'][key].get(
'alias_of', class_templates['args'][key].get('map_to', ''))
if map_to == key:
# We're already good as we are now
continue
if map_to in class_templates['tokens']:
meta = class_templates['tokens'][map_to]
else:
meta = class_templates['args'].get(
map_to, class_templates['args'][key])
# Perform a translation/mapping if our code reaches here
value = tokens[key]
del tokens[key]
# Detect if we're dealign with a list or not
is_list = re.search(
r'^list:.*',
meta.get('type'),
re.IGNORECASE)
if map_to not in tokens:
tokens[map_to] = [] if is_list \
else meta.get('default')
elif is_list and not isinstance(tokens.get(map_to), list):
# Convert ourselves to a list if we aren't already
tokens[map_to] = [tokens[map_to]]
# Type Conversion
if re.search(
r'^(choice:)?string',
meta.get('type'),
re.IGNORECASE) \
and not isinstance(value, str):
# Ensure our format is as expected
value = str(value)
# Apply any further translations if required (absolute map)
# This is the case when an arg maps to a token which further
# maps to a different function arg on the class constructor
abs_map = meta.get('map_to', map_to)
# Set our token as how it was provided by the configuration
if isinstance(tokens.get(map_to), list):
tokens[abs_map].append(value)
else:
tokens[abs_map] = value
# Return our tokens
return tokens
|
(schema, tokens)
|
721,993 |
apprise.config.base
|
config_parse
|
Takes the specified config content and loads it based on the specified
config_format. If a format isn't specified, then it is auto detected.
|
@staticmethod
def config_parse(content, asset=None, config_format=None, **kwargs):
"""
Takes the specified config content and loads it based on the specified
config_format. If a format isn't specified, then it is auto detected.
"""
if config_format is None:
# Detect the format
config_format = ConfigBase.detect_config_format(content)
if not config_format:
# We couldn't detect configuration
ConfigBase.logger.error('Could not detect configuration')
return (list(), list())
if config_format not in common.CONFIG_FORMATS:
# Invalid configuration type specified
ConfigBase.logger.error(
'An invalid configuration format ({}) was specified'.format(
config_format))
return (list(), list())
# Dynamically load our parse_ function based on our config format
fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format))
# Execute our config parse function which always returns a list
return fn(content=content, asset=asset)
|
(content, asset=None, config_format=None, **kwargs)
|
721,994 |
apprise.config.base
|
config_parse_text
|
Parse the specified content as though it were a simple text file only
containing a list of URLs.
Return a tuple that looks like (servers, configs) where:
- servers contains a list of loaded notification plugins
- configs contains a list of additional configuration files
referenced.
You may also optionally associate an asset with the notification.
The file syntax is:
#
# pound/hashtag allow for line comments
#
# One or more tags can be idenified using comma's (,) to separate
# them.
<Tag(s)>=<URL>
# Or you can use this format (no tags associated)
<URL>
# you can also use the keyword 'include' and identify a
# configuration location (like this file) which will be included
# as additional configuration entries when loaded.
include <ConfigURL>
# Assign tag contents to a group identifier
<Group(s)>=<Tag(s)>
|
@staticmethod
def config_parse_text(content, asset=None):
"""
Parse the specified content as though it were a simple text file only
containing a list of URLs.
Return a tuple that looks like (servers, configs) where:
- servers contains a list of loaded notification plugins
- configs contains a list of additional configuration files
referenced.
You may also optionally associate an asset with the notification.
The file syntax is:
#
# pound/hashtag allow for line comments
#
# One or more tags can be idenified using comma's (,) to separate
# them.
<Tag(s)>=<URL>
# Or you can use this format (no tags associated)
<URL>
# you can also use the keyword 'include' and identify a
# configuration location (like this file) which will be included
# as additional configuration entries when loaded.
include <ConfigURL>
# Assign tag contents to a group identifier
<Group(s)>=<Tag(s)>
"""
# A list of loaded Notification Services
servers = list()
# A list of additional configuration files referenced using
# the include keyword
configs = list()
# Track all of the tags we want to assign later on
group_tags = {}
# Track our entries to preload
preloaded = []
# Prepare our Asset Object
asset = asset if isinstance(asset, AppriseAsset) else AppriseAsset()
# Define what a valid line should look like
valid_line_re = re.compile(
r'^\s*(?P<line>([;#]+(?P<comment>.*))|'
r'(\s*(?P<tags>[a-z0-9, \t_-]+)\s*=|=)?\s*'
r'((?P<url>[a-z0-9]{1,12}://.*)|(?P<assign>[a-z0-9, \t_-]+))|'
r'include\s+(?P<config>.+))?\s*$', re.I)
try:
# split our content up to read line by line
content = re.split(r'\r*\n', content)
except TypeError:
# content was not expected string type
ConfigBase.logger.error(
'Invalid Apprise TEXT based configuration specified.')
return (list(), list())
for line, entry in enumerate(content, start=1):
result = valid_line_re.match(entry)
if not result:
# Invalid syntax
ConfigBase.logger.error(
'Invalid Apprise TEXT configuration format found '
'{} on line {}.'.format(entry, line))
# Assume this is a file we shouldn't be parsing. It's owner
# can read the error printed to screen and take action
# otherwise.
return (list(), list())
# Retrieve our line
url, assign, config = \
result.group('url'), \
result.group('assign'), \
result.group('config')
if not (url or config or assign):
# Comment/empty line; do nothing
continue
if config:
# CWE-312 (Secure Logging) Handling
loggable_url = config if not asset.secure_logging \
else cwe312_url(config)
ConfigBase.logger.debug(
'Include URL: {}'.format(loggable_url))
# Store our include line
configs.append(config.strip())
continue
# CWE-312 (Secure Logging) Handling
loggable_url = url if not asset.secure_logging \
else cwe312_url(url)
if assign:
groups = set(parse_list(result.group('tags'), cast=str))
if not groups:
# no tags were assigned
ConfigBase.logger.warning(
'Unparseable tag assignment - no group(s) '
'on line {}'.format(line))
continue
# Get our tags
tags = set(parse_list(assign, cast=str))
if not tags:
# no tags were assigned
ConfigBase.logger.warning(
'Unparseable tag assignment - no tag(s) to assign '
'on line {}'.format(line))
continue
# Update our tag group map
for tag_group in groups:
if tag_group not in group_tags:
group_tags[tag_group] = set()
# ensure our tag group is never included in the assignment
group_tags[tag_group] |= tags - set([tag_group])
continue
# Acquire our url tokens
results = plugins.url_to_dict(
url, secure_logging=asset.secure_logging)
if results is None:
# Failed to parse the server URL
ConfigBase.logger.warning(
'Unparseable URL {} on line {}.'.format(
loggable_url, line))
continue
# Build a list of tags to associate with the newly added
# notifications if any were set
results['tag'] = set(parse_list(result.group('tags'), cast=str))
# Set our Asset Object
results['asset'] = asset
# Store our preloaded entries
preloaded.append({
'results': results,
'line': line,
'loggable_url': loggable_url,
})
#
# Normalize Tag Groups
# - Expand Groups of Groups so that they don't exist
#
ConfigBase.__normalize_tag_groups(group_tags)
#
# URL Processing
#
for entry in preloaded:
# Point to our results entry for easier reference below
results = entry['results']
#
# Apply our tag groups if they're defined
#
for group, tags in group_tags.items():
# Detect if anything assigned to this tag also maps back to a
# group. If so we want to add the group to our list
if next((True for tag in results['tag']
if tag in tags), False):
results['tag'].add(group)
try:
# Attempt to create an instance of our plugin using the
# parsed URL information
plugin = N_MGR[results['schema']](**results)
# Create log entry of loaded URL
ConfigBase.logger.debug(
'Loaded URL: %s', plugin.url(
privacy=results['asset'].secure_logging))
except Exception as e:
# the arguments are invalid or can not be used.
ConfigBase.logger.warning(
'Could not load URL {} on line {}.'.format(
entry['loggable_url'], entry['line']))
ConfigBase.logger.debug('Loading Exception: %s' % str(e))
continue
# if we reach here, we successfully loaded our data
servers.append(plugin)
# Return what was loaded
return (servers, configs)
|
(content, asset=None)
|
721,995 |
apprise.config.base
|
config_parse_yaml
|
Parse the specified content as though it were a yaml file
specifically formatted for Apprise.
Return a tuple that looks like (servers, configs) where:
- servers contains a list of loaded notification plugins
- configs contains a list of additional configuration files
referenced.
You may optionally associate an asset with the notification.
|
@staticmethod
def config_parse_yaml(content, asset=None):
"""
Parse the specified content as though it were a yaml file
specifically formatted for Apprise.
Return a tuple that looks like (servers, configs) where:
- servers contains a list of loaded notification plugins
- configs contains a list of additional configuration files
referenced.
You may optionally associate an asset with the notification.
"""
# A list of loaded Notification Services
servers = list()
# A list of additional configuration files referenced using
# the include keyword
configs = list()
# Group Assignments
group_tags = {}
# Track our entries to preload
preloaded = []
try:
# Load our data (safely)
result = yaml.load(content, Loader=yaml.SafeLoader)
except (AttributeError,
yaml.parser.ParserError,
yaml.error.MarkedYAMLError) as e:
# Invalid content
ConfigBase.logger.error(
'Invalid Apprise YAML data specified.')
ConfigBase.logger.debug(
'YAML Exception:{}{}'.format(os.linesep, e))
return (list(), list())
if not isinstance(result, dict):
# Invalid content
ConfigBase.logger.error(
'Invalid Apprise YAML based configuration specified.')
return (list(), list())
# YAML Version
version = result.get('version', 1)
if version != 1:
# Invalid syntax
ConfigBase.logger.error(
'Invalid Apprise YAML version specified {}.'.format(version))
return (list(), list())
#
# global asset object
#
asset = asset if isinstance(asset, AppriseAsset) else AppriseAsset()
tokens = result.get('asset', None)
if tokens and isinstance(tokens, dict):
for k, v in tokens.items():
if k.startswith('_') or k.endswith('_'):
# Entries are considered reserved if they start or end
# with an underscore
ConfigBase.logger.warning(
'Ignored asset key "{}".'.format(k))
continue
if not (hasattr(asset, k) and
isinstance(getattr(asset, k),
(bool, str))):
# We can't set a function or non-string set value
ConfigBase.logger.warning(
'Invalid asset key "{}".'.format(k))
continue
if v is None:
# Convert to an empty string
v = ''
if (isinstance(v, (bool, str))
and isinstance(getattr(asset, k), bool)):
# If the object in the Asset is a boolean, then
# we want to convert the specified string to
# match that.
setattr(asset, k, parse_bool(v))
elif isinstance(v, str):
# Set our asset object with the new value
setattr(asset, k, v.strip())
else:
# we must set strings with a string
ConfigBase.logger.warning(
'Invalid asset value to "{}".'.format(k))
continue
#
# global tag root directive
#
global_tags = set()
tags = result.get('tag', None)
if tags and isinstance(tags, (list, tuple, str)):
# Store any preset tags
global_tags = set(parse_list(tags, cast=str))
#
# groups root directive
#
groups = result.get('groups', None)
if isinstance(groups, dict):
#
# Dictionary
#
for _groups, tags in groups.items():
for group in parse_list(_groups, cast=str):
if isinstance(tags, (list, tuple)):
_tags = set()
for e in tags:
if isinstance(e, dict):
_tags |= set(e.keys())
else:
_tags |= set(parse_list(e, cast=str))
# Final assignment
tags = _tags
else:
tags = set(parse_list(tags, cast=str))
if group not in group_tags:
group_tags[group] = tags
else:
group_tags[group] |= tags
elif isinstance(groups, (list, tuple)):
#
# List of Dictionaries
#
# Iterate over each group defined and store it
for no, entry in enumerate(groups):
if not isinstance(entry, dict):
ConfigBase.logger.warning(
'No assignment for group {}, entry #{}'.format(
entry, no + 1))
continue
for _groups, tags in entry.items():
for group in parse_list(_groups, cast=str):
if isinstance(tags, (list, tuple)):
_tags = set()
for e in tags:
if isinstance(e, dict):
_tags |= set(e.keys())
else:
_tags |= set(parse_list(e, cast=str))
# Final assignment
tags = _tags
else:
tags = set(parse_list(tags, cast=str))
if group not in group_tags:
group_tags[group] = tags
else:
group_tags[group] |= tags
# include root directive
#
includes = result.get('include', None)
if isinstance(includes, str):
# Support a single inline string or multiple ones separated by a
# comma and/or space
includes = parse_urls(includes)
elif not isinstance(includes, (list, tuple)):
# Not a problem; we simply have no includes
includes = list()
# Iterate over each config URL
for no, url in enumerate(includes):
if isinstance(url, str):
# Support a single inline string or multiple ones separated by
# a comma and/or space
configs.extend(parse_urls(url))
elif isinstance(url, dict):
# Store the url and ignore arguments associated
configs.extend(u for u in url.keys())
#
# urls root directive
#
urls = result.get('urls', None)
if not isinstance(urls, (list, tuple)):
# Not a problem; we simply have no urls
urls = list()
# Iterate over each URL
for no, url in enumerate(urls):
# Our results object is what we use to instantiate our object if
# we can. Reset it to None on each iteration
results = list()
# CWE-312 (Secure Logging) Handling
loggable_url = url if not asset.secure_logging \
else cwe312_url(url)
if isinstance(url, str):
# We're just a simple URL string...
schema = GET_SCHEMA_RE.match(url)
if schema is None:
# Log invalid entries so that maintainer of config
# config file at least has something to take action
# with.
ConfigBase.logger.warning(
'Invalid URL {}, entry #{}'.format(
loggable_url, no + 1))
continue
# We found a valid schema worthy of tracking; store it's
# details:
_results = plugins.url_to_dict(
url, secure_logging=asset.secure_logging)
if _results is None:
ConfigBase.logger.warning(
'Unparseable URL {}, entry #{}'.format(
loggable_url, no + 1))
continue
# add our results to our global set
results.append(_results)
elif isinstance(url, dict):
# We are a url string with additional unescaped options. In
# this case we want to iterate over all of our options so we
# can at least tell the end user what entries were ignored
# due to errors
it = iter(url.items())
# Track the URL to-load
_url = None
# Track last acquired schema
schema = None
for key, tokens in it:
# Test our schema
_schema = GET_SCHEMA_RE.match(key)
if _schema is None:
# Log invalid entries so that maintainer of config
# config file at least has something to take action
# with.
ConfigBase.logger.warning(
'Ignored entry {} found under urls, entry #{}'
.format(key, no + 1))
continue
# Store our schema
schema = _schema.group('schema').lower()
# Store our URL and Schema Regex
_url = key
if _url is None:
# the loop above failed to match anything
ConfigBase.logger.warning(
'Unsupported URL, entry #{}'.format(no + 1))
continue
_results = plugins.url_to_dict(
_url, secure_logging=asset.secure_logging)
if _results is None:
# Setup dictionary
_results = {
# Minimum requirements
'schema': schema,
}
if isinstance(tokens, (list, tuple, set)):
# populate and/or override any results populated by
# parse_url()
for entries in tokens:
# Copy ourselves a template of our parsed URL as a base
# to work with
r = _results.copy()
# We are a url string with additional unescaped options
if isinstance(entries, dict):
_url, tokens = next(iter(url.items()))
# Tags you just can't over-ride
if 'schema' in entries:
del entries['schema']
# support our special tokens (if they're present)
if schema in N_MGR:
entries = ConfigBase._special_token_handler(
schema, entries)
# Extend our dictionary with our new entries
r.update(entries)
# add our results to our global set
results.append(r)
elif isinstance(tokens, dict):
# support our special tokens (if they're present)
if schema in N_MGR:
tokens = ConfigBase._special_token_handler(
schema, tokens)
# Copy ourselves a template of our parsed URL as a base to
# work with
r = _results.copy()
# add our result set
r.update(tokens)
# add our results to our global set
results.append(r)
else:
# add our results to our global set
results.append(_results)
else:
# Unsupported
ConfigBase.logger.warning(
'Unsupported Apprise YAML entry #{}'.format(no + 1))
continue
# Track our entries
entry = 0
while len(results):
# Increment our entry count
entry += 1
# Grab our first item
_results = results.pop(0)
if _results['schema'] not in N_MGR:
# the arguments are invalid or can not be used.
ConfigBase.logger.warning(
'An invalid Apprise schema ({}) in YAML configuration '
'entry #{}, item #{}'
.format(_results['schema'], no + 1, entry))
continue
# tag is a special keyword that is managed by Apprise object.
# The below ensures our tags are set correctly
if 'tag' in _results:
# Tidy our list up
_results['tag'] = set(
parse_list(_results['tag'], cast=str)) | global_tags
else:
# Just use the global settings
_results['tag'] = global_tags
for key in list(_results.keys()):
# Strip out any tokens we know that we can't accept and
# warn the user
match = VALID_TOKEN.match(key)
if not match:
ConfigBase.logger.warning(
'Ignoring invalid token ({}) found in YAML '
'configuration entry #{}, item #{}'
.format(key, no + 1, entry))
del _results[key]
ConfigBase.logger.trace(
'URL #{}: {} unpacked as:{}{}'
.format(no + 1, url, os.linesep, os.linesep.join(
['{}="{}"'.format(k, a)
for k, a in _results.items()])))
# Prepare our Asset Object
_results['asset'] = asset
# Handle post processing of result set
_results = URLBase.post_process_parse_url_results(_results)
# Store our preloaded entries
preloaded.append({
'results': _results,
'entry': no + 1,
'item': entry,
})
#
# Normalize Tag Groups
# - Expand Groups of Groups so that they don't exist
#
ConfigBase.__normalize_tag_groups(group_tags)
#
# URL Processing
#
for entry in preloaded:
# Point to our results entry for easier reference below
results = entry['results']
#
# Apply our tag groups if they're defined
#
for group, tags in group_tags.items():
# Detect if anything assigned to this tag also maps back to a
# group. If so we want to add the group to our list
if next((True for tag in results['tag']
if tag in tags), False):
results['tag'].add(group)
# Now we generate our plugin
try:
# Attempt to create an instance of our plugin using the
# parsed URL information
plugin = N_MGR[results['schema']](**results)
# Create log entry of loaded URL
ConfigBase.logger.debug(
'Loaded URL: %s', plugin.url(
privacy=results['asset'].secure_logging))
except Exception as e:
# the arguments are invalid or can not be used.
ConfigBase.logger.warning(
'Could not load Apprise YAML configuration '
'entry #{}, item #{}'
.format(entry['entry'], entry['item']))
ConfigBase.logger.debug('Loading Exception: %s' % str(e))
continue
# if we reach here, we successfully loaded our data
servers.append(plugin)
return (servers, configs)
|
(content, asset=None)
|
721,996 |
apprise.config.base
|
detect_config_format
|
Takes the specified content and attempts to detect the format type
The function returns the actual format type if detected, otherwise
it returns None
|
@staticmethod
def detect_config_format(content, **kwargs):
"""
Takes the specified content and attempts to detect the format type
The function returns the actual format type if detected, otherwise
it returns None
"""
# Detect Format Logic:
# - A pound/hashtag (#) is alawys a comment character so we skip over
# lines matched here.
# - Detection begins on the first non-comment and non blank line
# matched.
# - If we find a string followed by a colon, we know we're dealing
# with a YAML file.
# - If we find a string that starts with a URL, or our tag
# definitions (accepting commas) followed by an equal sign we know
# we're dealing with a TEXT format.
# Define what a valid line should look like
valid_line_re = re.compile(
r'^\s*(?P<line>([;#]+(?P<comment>.*))|'
r'(?P<text>((?P<tag>[ \t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|'
r'((?P<yaml>[a-z0-9]+):.*))?$', re.I)
try:
# split our content up to read line by line
content = re.split(r'\r*\n', content)
except TypeError:
# content was not expected string type
ConfigBase.logger.error(
'Invalid Apprise configuration specified.')
return None
# By default set our return value to None since we don't know
# what the format is yet
config_format = None
# iterate over each line of the file to attempt to detect it
# stop the moment a the type has been determined
for line, entry in enumerate(content, start=1):
result = valid_line_re.match(entry)
if not result:
# Invalid syntax
ConfigBase.logger.error(
'Undetectable Apprise configuration found '
'based on line {}.'.format(line))
# Take an early exit
return None
# Attempt to detect configuration
if result.group('yaml'):
config_format = common.ConfigFormat.YAML
ConfigBase.logger.debug(
'Detected YAML configuration '
'based on line {}.'.format(line))
break
elif result.group('text'):
config_format = common.ConfigFormat.TEXT
ConfigBase.logger.debug(
'Detected TEXT configuration '
'based on line {}.'.format(line))
break
# If we reach here, we have a comment entry
# Adjust default format to TEXT
config_format = common.ConfigFormat.TEXT
return config_format
|
(content, **kwargs)
|
721,998 |
apprise.config.base
|
expired
|
Simply returns True if the configuration should be considered
as expired or False if content should be retrieved.
|
def expired(self):
"""
Simply returns True if the configuration should be considered
as expired or False if content should be retrieved.
"""
if isinstance(self._cached_servers, list) and self.cache:
# We have enough reason to look further into our cached content
# and verify it has not expired.
if self.cache is True:
# we have not expired, return False
return False
# Verify our cache time to determine whether we will get our
# content again.
age_in_sec = time.time() - self._cached_time
if age_in_sec <= self.cache:
# We have not expired; return False
return False
# If we reach here our configuration should be considered
# missing and/or expired.
return True
|
(self)
|
722,002 |
apprise.config.base
|
parse_url
|
Parses the URL and returns it broken apart into a dictionary.
This is very specific and customized for Apprise.
Args:
url (str): The URL you want to fully parse.
verify_host (:obj:`bool`, optional): a flag kept with the parsed
URL which some child classes will later use to verify SSL
keys (if SSL transactions take place). Unless under very
specific circumstances, it is strongly recomended that
you leave this default value set to True.
Returns:
A dictionary is returned containing the URL fully parsed if
successful, otherwise None is returned.
|
@staticmethod
def parse_url(url, verify_host=True):
"""Parses the URL and returns it broken apart into a dictionary.
This is very specific and customized for Apprise.
Args:
url (str): The URL you want to fully parse.
verify_host (:obj:`bool`, optional): a flag kept with the parsed
URL which some child classes will later use to verify SSL
keys (if SSL transactions take place). Unless under very
specific circumstances, it is strongly recomended that
you leave this default value set to True.
Returns:
A dictionary is returned containing the URL fully parsed if
successful, otherwise None is returned.
"""
results = URLBase.parse_url(url, verify_host=verify_host)
if not results:
# We're done; we failed to parse our url
return results
# Allow overriding the default config format
if 'format' in results['qsd']:
results['format'] = results['qsd'].get('format')
if results['format'] not in common.CONFIG_FORMATS:
URLBase.logger.warning(
'Unsupported format specified {}'.format(
results['format']))
del results['format']
# Defines the encoding of the payload
if 'encoding' in results['qsd']:
results['encoding'] = results['qsd'].get('encoding')
# Our cache value
if 'cache' in results['qsd']:
# First try to get it's integer value
try:
results['cache'] = int(results['qsd']['cache'])
except (ValueError, TypeError):
# No problem, it just isn't an integer; now treat it as a bool
# instead:
results['cache'] = parse_bool(results['qsd']['cache'])
return results
|
(url, verify_host=True)
|
722,003 |
apprise.config.base
|
pop
|
Removes an indexed Notification Service from the stack and returns it.
By default, the last element of the list is removed.
|
def pop(self, index=-1):
"""
Removes an indexed Notification Service from the stack and returns it.
By default, the last element of the list is removed.
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
# Pop the element off of the stack
return self._cached_servers.pop(index)
|
(self, index=-1)
|
722,007 |
apprise.config.base
|
read
|
This object should be implimented by the child classes
|
def read(self):
"""
This object should be implimented by the child classes
"""
return None
|
(self)
|
722,009 |
apprise.config.base
|
servers
|
Performs reads loaded configuration and returns all of the services
that could be parsed and loaded.
|
def servers(self, asset=None, **kwargs):
"""
Performs reads loaded configuration and returns all of the services
that could be parsed and loaded.
"""
if not self.expired():
# We already have cached results to return; use them
return self._cached_servers
# Our cached response object
self._cached_servers = list()
# read() causes the child class to do whatever it takes for the
# config plugin to load the data source and return unparsed content
# None is returned if there was an error or simply no data
content = self.read(**kwargs)
if not isinstance(content, str):
# Set the time our content was cached at
self._cached_time = time.time()
# Nothing more to do; return our empty cache list
return self._cached_servers
# Our Configuration format uses a default if one wasn't one detected
# or enfored.
config_format = \
self.default_config_format \
if self.config_format is None else self.config_format
# Dynamically load our parse_ function based on our config format
fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format))
# Initialize our asset object
asset = asset if isinstance(asset, AppriseAsset) else self.asset
# Execute our config parse function which always returns a tuple
# of our servers and our configuration
servers, configs = fn(content=content, asset=asset)
self._cached_servers.extend(servers)
# Configuration files were detected; recursively populate them
# If we have been configured to do so
for url in configs:
if self.recursion > 0:
# Attempt to acquire the schema at the very least to allow
# our configuration based urls.
schema = GET_SCHEMA_RE.match(url)
if schema is None:
# Plan B is to assume we're dealing with a file
schema = 'file'
if not os.path.isabs(url):
# We're dealing with a relative path; prepend
# our current config path
url = os.path.join(self.config_path, url)
url = '{}://{}'.format(schema, URLBase.quote(url))
else:
# Ensure our schema is always in lower case
schema = schema.group('schema').lower()
# Some basic validation
if schema not in C_MGR:
ConfigBase.logger.warning(
'Unsupported include schema {}.'.format(schema))
continue
# CWE-312 (Secure Logging) Handling
loggable_url = url if not asset.secure_logging \
else cwe312_url(url)
# Parse our url details of the server object as dictionary
# containing all of the information parsed from our URL
results = C_MGR[schema].parse_url(url)
if not results:
# Failed to parse the server URL
self.logger.warning(
'Unparseable include URL {}'.format(loggable_url))
continue
# Handle cross inclusion based on allow_cross_includes rules
if (C_MGR[schema].allow_cross_includes ==
common.ContentIncludeMode.STRICT
and schema not in self.schemas()
and not self.insecure_includes) or C_MGR[schema] \
.allow_cross_includes == \
common.ContentIncludeMode.NEVER:
# Prevent the loading if insecure base protocols
ConfigBase.logger.warning(
'Including {}:// based configuration is prohibited. '
'Ignoring URL {}'.format(schema, loggable_url))
continue
# Prepare our Asset Object
results['asset'] = asset
# No cache is required because we're just lumping this in
# and associating it with the cache value we've already
# declared (prior to our recursion)
results['cache'] = False
# Recursion can never be parsed from the URL; we decrement
# it one level
results['recursion'] = self.recursion - 1
# Insecure Includes flag can never be parsed from the URL
results['insecure_includes'] = self.insecure_includes
try:
# Attempt to create an instance of our plugin using the
# parsed URL information
cfg_plugin = C_MGR[results['schema']](**results)
except Exception as e:
# the arguments are invalid or can not be used.
self.logger.warning(
'Could not load include URL: {}'.format(loggable_url))
self.logger.debug('Loading Exception: {}'.format(str(e)))
continue
# if we reach here, we can now add this servers found
# in this configuration file to our list
self._cached_servers.extend(
cfg_plugin.servers(asset=asset))
# We no longer need our configuration object
del cfg_plugin
else:
# CWE-312 (Secure Logging) Handling
loggable_url = url if not asset.secure_logging \
else cwe312_url(url)
self.logger.debug(
'Recursion limit reached; ignoring Include URL: %s',
loggable_url)
if self._cached_servers:
self.logger.info(
'Loaded {} entries from {}'.format(
len(self._cached_servers),
self.url(privacy=asset.secure_logging)))
else:
self.logger.warning(
'Failed to load Apprise configuration from {}'.format(
self.url(privacy=asset.secure_logging)))
# Set the time our content was cached at
self._cached_time = time.time()
return self._cached_servers
|
(self, asset=None, **kwargs)
|
722,016 |
apprise.common
|
ConfigFormat
|
A list of pre-defined config formats that can be passed via the
apprise library.
|
class ConfigFormat:
"""
A list of pre-defined config formats that can be passed via the
apprise library.
"""
# A text based configuration. This consists of a list of URLs delimited by
# a new line. pound/hashtag (#) or semi-colon (;) can be used as comment
# characters.
TEXT = 'text'
# YAML files allow a more rich of an experience when settig up your
# apprise configuration files.
YAML = 'yaml'
|
()
|
722,017 |
apprise.common
|
ContentIncludeMode
|
The different Content inclusion modes. All content based plugins will
have one of these associated with it.
|
class ContentIncludeMode:
"""
The different Content inclusion modes. All content based plugins will
have one of these associated with it.
"""
# - Content inclusion of same type only; hence a file:// can include
# a file://
# - Cross file inclusion is not allowed unless insecure_includes (a flag)
# is set to True. In these cases STRICT acts as type ALWAYS
STRICT = 'strict'
# This content type can never be included
NEVER = 'never'
# This content can always be included
ALWAYS = 'always'
|
()
|
722,018 |
apprise.common
|
ContentLocation
|
This is primarily used for handling file attachments. The idea is
to track the source of the attachment itself. We don't want
remote calls to a server to access local attachments for example.
By knowing the attachment type and cross-associating it with how
we plan on accessing the content, we can make a judgement call
(for security reasons) if we will allow it.
Obviously local uses of apprise can access both local and remote
type files.
|
class ContentLocation:
"""
This is primarily used for handling file attachments. The idea is
to track the source of the attachment itself. We don't want
remote calls to a server to access local attachments for example.
By knowing the attachment type and cross-associating it with how
we plan on accessing the content, we can make a judgement call
(for security reasons) if we will allow it.
Obviously local uses of apprise can access both local and remote
type files.
"""
# Content is located locally (on the same server as apprise)
LOCAL = 'local'
# Content is located in a remote location
HOSTED = 'hosted'
# Content is inaccessible
INACCESSIBLE = 'n/a'
|
()
|
722,019 |
apprise.logger
|
LogCapture
|
A class used to allow one to instantiate loggers that write to
memory for temporary purposes. e.g.:
1. with LogCapture() as captured:
2.
3. # Send our notification(s)
4. aobj.notify("hello world")
5.
6. # retrieve our logs produced by the above call via our
7. # `captured` StringIO object we have access to within the `with`
8. # block here:
9. print(captured.getvalue())
|
class LogCapture:
"""
A class used to allow one to instantiate loggers that write to
memory for temporary purposes. e.g.:
1. with LogCapture() as captured:
2.
3. # Send our notification(s)
4. aobj.notify("hello world")
5.
6. # retrieve our logs produced by the above call via our
7. # `captured` StringIO object we have access to within the `with`
8. # block here:
9. print(captured.getvalue())
"""
def __init__(self, path=None, level=None, name=LOGGER_NAME, delete=True,
fmt='%(asctime)s - %(levelname)s - %(message)s'):
"""
Instantiate a temporary log capture object
If a path is specified, then log content is sent to that file instead
of a StringIO object.
You can optionally specify a logging level such as logging.INFO if you
wish, otherwise by default the script uses whatever logging has been
set globally. If you set delete to `False` then when using log files,
they are not automatically cleaned up afterwards.
Optionally over-ride the fmt as well if you wish.
"""
# Our memory buffer placeholder
self.__buffer_ptr = StringIO()
# Store our file path as it will determine whether or not we write to
# memory and a file
self.__path = path
self.__delete = delete
# Our logging level tracking
self.__level = level
self.__restore_level = None
# Acquire a pointer to our logger
self.__logger = logging.getLogger(name)
# Prepare our handler
self.__handler = logging.StreamHandler(self.__buffer_ptr) \
if not self.__path else logging.FileHandler(
self.__path, mode='a', encoding='utf-8')
# Use the specified level, otherwise take on the already
# effective level of our logger
self.__handler.setLevel(
self.__level if self.__level is not None
else self.__logger.getEffectiveLevel())
# Prepare our formatter
self.__handler.setFormatter(logging.Formatter(fmt))
def __enter__(self):
"""
Allows logger manipulation within a 'with' block
"""
if self.__level is not None:
# Temporary adjust our log level if required
self.__restore_level = self.__logger.getEffectiveLevel()
if self.__restore_level > self.__level:
# Bump our log level up for the duration of our `with`
self.__logger.setLevel(self.__level)
else:
# No restoration required
self.__restore_level = None
else:
# Do nothing but enforce that we have nothing to restore to
self.__restore_level = None
if self.__path:
# If a path has been identified, ensure we can write to the path
# and that the file exists
with open(self.__path, 'a'):
os.utime(self.__path, None)
# Update our buffer pointer
self.__buffer_ptr = open(self.__path, 'r')
# Add our handler
self.__logger.addHandler(self.__handler)
# return our memory pointer
return self.__buffer_ptr
def __exit__(self, exc_type, exc_value, tb):
"""
removes the handler gracefully when the with block has completed
"""
# Flush our content
self.__handler.flush()
self.__buffer_ptr.flush()
# Drop our handler
self.__logger.removeHandler(self.__handler)
if self.__restore_level is not None:
# Restore level
self.__logger.setLevel(self.__restore_level)
if self.__path:
# Close our file pointer
self.__buffer_ptr.close()
self.__handler.close()
if self.__delete:
try:
# Always remove file afterwards
os.unlink(self.__path)
except OSError:
# It's okay if the file does not exist
pass
if exc_type is not None:
# pass exception on if one was generated
return False
return True
|
(path=None, level=None, name='apprise', delete=True, fmt='%(asctime)s - %(levelname)s - %(message)s')
|
722,020 |
apprise.logger
|
__enter__
|
Allows logger manipulation within a 'with' block
|
def __enter__(self):
"""
Allows logger manipulation within a 'with' block
"""
if self.__level is not None:
# Temporary adjust our log level if required
self.__restore_level = self.__logger.getEffectiveLevel()
if self.__restore_level > self.__level:
# Bump our log level up for the duration of our `with`
self.__logger.setLevel(self.__level)
else:
# No restoration required
self.__restore_level = None
else:
# Do nothing but enforce that we have nothing to restore to
self.__restore_level = None
if self.__path:
# If a path has been identified, ensure we can write to the path
# and that the file exists
with open(self.__path, 'a'):
os.utime(self.__path, None)
# Update our buffer pointer
self.__buffer_ptr = open(self.__path, 'r')
# Add our handler
self.__logger.addHandler(self.__handler)
# return our memory pointer
return self.__buffer_ptr
|
(self)
|
722,021 |
apprise.logger
|
__exit__
|
removes the handler gracefully when the with block has completed
|
def __exit__(self, exc_type, exc_value, tb):
"""
removes the handler gracefully when the with block has completed
"""
# Flush our content
self.__handler.flush()
self.__buffer_ptr.flush()
# Drop our handler
self.__logger.removeHandler(self.__handler)
if self.__restore_level is not None:
# Restore level
self.__logger.setLevel(self.__restore_level)
if self.__path:
# Close our file pointer
self.__buffer_ptr.close()
self.__handler.close()
if self.__delete:
try:
# Always remove file afterwards
os.unlink(self.__path)
except OSError:
# It's okay if the file does not exist
pass
if exc_type is not None:
# pass exception on if one was generated
return False
return True
|
(self, exc_type, exc_value, tb)
|
722,022 |
apprise.logger
|
__init__
|
Instantiate a temporary log capture object
If a path is specified, then log content is sent to that file instead
of a StringIO object.
You can optionally specify a logging level such as logging.INFO if you
wish, otherwise by default the script uses whatever logging has been
set globally. If you set delete to `False` then when using log files,
they are not automatically cleaned up afterwards.
Optionally over-ride the fmt as well if you wish.
|
def __init__(self, path=None, level=None, name=LOGGER_NAME, delete=True,
fmt='%(asctime)s - %(levelname)s - %(message)s'):
"""
Instantiate a temporary log capture object
If a path is specified, then log content is sent to that file instead
of a StringIO object.
You can optionally specify a logging level such as logging.INFO if you
wish, otherwise by default the script uses whatever logging has been
set globally. If you set delete to `False` then when using log files,
they are not automatically cleaned up afterwards.
Optionally over-ride the fmt as well if you wish.
"""
# Our memory buffer placeholder
self.__buffer_ptr = StringIO()
# Store our file path as it will determine whether or not we write to
# memory and a file
self.__path = path
self.__delete = delete
# Our logging level tracking
self.__level = level
self.__restore_level = None
# Acquire a pointer to our logger
self.__logger = logging.getLogger(name)
# Prepare our handler
self.__handler = logging.StreamHandler(self.__buffer_ptr) \
if not self.__path else logging.FileHandler(
self.__path, mode='a', encoding='utf-8')
# Use the specified level, otherwise take on the already
# effective level of our logger
self.__handler.setLevel(
self.__level if self.__level is not None
else self.__logger.getEffectiveLevel())
# Prepare our formatter
self.__handler.setFormatter(logging.Formatter(fmt))
|
(self, path=None, level=None, name='apprise', delete=True, fmt='%(asctime)s - %(levelname)s - %(message)s')
|
722,023 |
apprise.plugins.base
|
NotifyBase
|
This is the base class for all notification services
|
class NotifyBase(URLBase):
"""
This is the base class for all notification services
"""
# An internal flag used to test the state of the plugin. If set to
# False, then the plugin is not used. Plugins can disable themselves
# due to enviroment issues (such as missing libraries, or platform
# dependencies that are not present). By default all plugins are
# enabled.
enabled = True
# The category allows for parent inheritance of this object to alter
# this when it's function/use is intended to behave differently. The
# following category types exist:
#
# native: Is a native plugin written/stored in `apprise/plugins/Notify*`
# custom: Is a custom plugin written/stored in a users plugin directory
# that they loaded at execution time.
category = 'native'
# Some plugins may require additional packages above what is provided
# already by Apprise.
#
# Use this section to relay this information to the users of the script to
# help guide them with what they need to know if they plan on using your
# plugin. The below configuration should otherwise accomodate all normal
# situations and will not requrie any updating:
requirements = {
# Use the description to provide a human interpretable description of
# what is required to make the plugin work. This is only nessisary
# if there are package dependencies. Setting this to default will
# cause a general response to be returned. Only set this if you plan
# on over-riding the default. Always consider language support here.
# So before providing a value do the following in your code base:
#
# from apprise.AppriseLocale import gettext_lazy as _
#
# 'details': _('My detailed requirements')
'details': None,
# Define any required packages needed for the plugin to run. This is
# an array of strings that simply look like lines residing in a
# `requirements.txt` file...
#
# As an example, an entry may look like:
# 'packages_required': [
# 'cryptography < 3.4`,
# ]
'packages_required': [],
# Recommended packages identify packages that are not required to make
# your plugin work, but would improve it's use or grant it access to
# full functionality (that might otherwise be limited).
# Similar to `packages_required`, you would identify each entry in
# the array as you would in a `requirements.txt` file.
#
# - Do not re-provide entries already in the `packages_required`
'packages_recommended': [],
}
# The services URL
service_url = None
# A URL that takes you to the setup/help of the specific protocol
setup_url = None
# Most Servers do not like more then 1 request per 5 seconds, so 5.5 gives
# us a safe play range. Override the one defined already in the URLBase
request_rate_per_sec = 5.5
# Allows the user to specify the NotifyImageSize object
image_size = None
# The maximum allowable characters allowed in the body per message
body_maxlen = 32768
# Defines the maximum allowable characters in the title; set this to zero
# if a title can't be used. Titles that are not used but are defined are
# automatically placed into the body
title_maxlen = 250
# Set the maximum line count; if this is set to anything larger then zero
# the message (prior to it being sent) will be truncated to this number
# of lines. Setting this to zero disables this feature.
body_max_line_count = 0
# Default Notify Format
notify_format = NotifyFormat.TEXT
# Default Overflow Mode
overflow_mode = OverflowMode.UPSTREAM
# Default Emoji Interpretation
interpret_emojis = False
# Support Attachments; this defaults to being disabled.
# Since apprise allows you to send attachments without a body or title
# defined, by letting Apprise know the plugin won't support attachments
# up front, it can quickly pass over and ignore calls to these end points.
# You must set this to true if your application can handle attachments.
# You must also consider a flow change to your notification if this is set
# to True as well as now there will be cases where both the body and title
# may not be set. There will never be a case where a body, or attachment
# isn't set in the same call to your notify() function.
attachment_support = False
# Default Title HTML Tagging
# When a title is specified for a notification service that doesn't accept
# titles, by default apprise tries to give a plesant view and convert the
# title so that it can be placed into the body. The default is to just
# use a <b> tag. The below causes the <b>title</b> to get generated:
default_html_tag_id = 'b'
# Here is where we define all of the arguments we accept on the url
# such as: schema://whatever/?overflow=upstream&format=text
# These act the same way as tokens except they are optional and/or
# have default values set if mandatory. This rule must be followed
template_args = dict(URLBase.template_args, **{
'overflow': {
'name': _('Overflow Mode'),
'type': 'choice:string',
'values': OVERFLOW_MODES,
# Provide a default
'default': overflow_mode,
# look up default using the following parent class value at
# runtime. The variable name identified here (in this case
# overflow_mode) is checked and it's result is placed over-top of
# the 'default'. This is done because once a parent class inherits
# this one, the overflow_mode already set as a default 'could' be
# potentially over-ridden and changed to a different value.
'_lookup_default': 'overflow_mode',
},
'format': {
'name': _('Notify Format'),
'type': 'choice:string',
'values': NOTIFY_FORMATS,
# Provide a default
'default': notify_format,
# look up default using the following parent class value at
# runtime.
'_lookup_default': 'notify_format',
},
'emojis': {
'name': _('Interpret Emojis'),
# SSL Certificate Authority Verification
'type': 'bool',
# Provide a default
'default': interpret_emojis,
# look up default using the following parent class value at
# runtime.
'_lookup_default': 'interpret_emojis',
},
})
#
# Overflow Defaults / Configuration applicable to SPLIT mode only
#
# Display Count [X/X]
# ^^^^^^
# \\\\\\
# 6 characters (space + count)
# Display Count [XX/XX]
# ^^^^^^^^
# \\\\\\\\
# 8 characters (space + count)
# Display Count [XXX/XXX]
# ^^^^^^^^^^
# \\\\\\\\\\
# 10 characters (space + count)
# Display Count [XXXX/XXXX]
# ^^^^^^^^^^^^
# \\\\\\\\\\\\
# 12 characters (space + count)
#
# Given the above + some buffer we come up with the following:
# If this value is exceeded, display counts automatically shut off
overflow_max_display_count_width = 12
# The number of characters to reserver for whitespace buffering
# This is detected automatically, but you can enforce a value if
# you desire:
overflow_buffer = 0
# the min accepted length of a title to allow for a counter display
overflow_display_count_threshold = 130
# Whether or not when over-flow occurs, if the title should be repeated
# each time the message is split up
# - None: Detect
# - True: Always display title once
# - False: Display the title for each occurance
overflow_display_title_once = None
# If this is set to to True:
# The title_maxlen should be considered as a subset of the body_maxlen
# Hence: len(title) + len(body) should never be greater then body_maxlen
#
# If set to False, then there is no corrorlation between title_maxlen
# restrictions and that of body_maxlen
overflow_amalgamate_title = False
def __init__(self, **kwargs):
"""
Initialize some general configuration that will keep things consistent
when working with the notifiers that will inherit this class.
"""
super().__init__(**kwargs)
# Store our interpret_emoji's setting
# If asset emoji value is set to a default of True and the user
# specifies it to be false, this is accepted and False over-rides.
#
# If asset emoji value is set to a default of None, a user may
# optionally over-ride this and set it to True from the Apprise
# URL. ?emojis=yes
#
# If asset emoji value is set to a default of False, then all emoji's
# are turned off (no user over-rides allowed)
#
# Take a default
self.interpret_emojis = self.asset.interpret_emojis
if 'emojis' in kwargs:
# possibly over-ride default
self.interpret_emojis = True if self.interpret_emojis \
in (None, True) and \
parse_bool(
kwargs.get('emojis', False),
default=NotifyBase.template_args['emojis']['default']) \
else False
if 'format' in kwargs:
# Store the specified format if specified
notify_format = kwargs.get('format', '')
if notify_format.lower() not in NOTIFY_FORMATS:
msg = 'Invalid notification format {}'.format(notify_format)
self.logger.error(msg)
raise TypeError(msg)
# Provide override
self.notify_format = notify_format
if 'overflow' in kwargs:
# Store the specified format if specified
overflow = kwargs.get('overflow', '')
if overflow.lower() not in OVERFLOW_MODES:
msg = 'Invalid overflow method {}'.format(overflow)
self.logger.error(msg)
raise TypeError(msg)
# Provide override
self.overflow_mode = overflow
def image_url(self, notify_type, logo=False, extension=None,
image_size=None):
"""
Returns Image URL if possible
"""
if not self.image_size:
return None
if notify_type not in NOTIFY_TYPES:
return None
return self.asset.image_url(
notify_type=notify_type,
image_size=self.image_size if image_size is None else image_size,
logo=logo,
extension=extension,
)
def image_path(self, notify_type, extension=None):
"""
Returns the path of the image if it can
"""
if not self.image_size:
return None
if notify_type not in NOTIFY_TYPES:
return None
return self.asset.image_path(
notify_type=notify_type,
image_size=self.image_size,
extension=extension,
)
def image_raw(self, notify_type, extension=None):
"""
Returns the raw image if it can
"""
if not self.image_size:
return None
if notify_type not in NOTIFY_TYPES:
return None
return self.asset.image_raw(
notify_type=notify_type,
image_size=self.image_size,
extension=extension,
)
def color(self, notify_type, color_type=None):
"""
Returns the html color (hex code) associated with the notify_type
"""
if notify_type not in NOTIFY_TYPES:
return None
return self.asset.color(
notify_type=notify_type,
color_type=color_type,
)
def notify(self, *args, **kwargs):
"""
Performs notification
"""
try:
# Build a list of dictionaries that can be used to call send().
send_calls = list(self._build_send_calls(*args, **kwargs))
except TypeError:
# Internal error
return False
else:
# Loop through each call, one at a time. (Use a list rather than a
# generator to call all the partials, even in case of a failure.)
the_calls = [self.send(**kwargs2) for kwargs2 in send_calls]
return all(the_calls)
async def async_notify(self, *args, **kwargs):
"""
Performs notification for asynchronous callers
"""
try:
# Build a list of dictionaries that can be used to call send().
send_calls = list(self._build_send_calls(*args, **kwargs))
except TypeError:
# Internal error
return False
else:
loop = asyncio.get_event_loop()
# Wrap each call in a coroutine that uses the default executor.
# TODO: In the future, allow plugins to supply a native
# async_send() method.
async def do_send(**kwargs2):
send = partial(self.send, **kwargs2)
result = await loop.run_in_executor(None, send)
return result
# gather() all calls in parallel.
the_cors = (do_send(**kwargs2) for kwargs2 in send_calls)
return all(await asyncio.gather(*the_cors))
def _build_send_calls(self, body=None, title=None,
notify_type=NotifyType.INFO, overflow=None,
attach=None, body_format=None, **kwargs):
"""
Get a list of dictionaries that can be used to call send() or
(in the future) async_send().
"""
if not self.enabled:
# Deny notifications issued to services that are disabled
msg = f"{self.service_name} is currently disabled on this system."
self.logger.warning(msg)
raise TypeError(msg)
# Prepare attachments if required
if attach is not None and not isinstance(attach, AppriseAttachment):
try:
attach = AppriseAttachment(attach, asset=self.asset)
except TypeError:
# bad attachments
raise
# Handle situations where the body is None
body = '' if not body else body
elif not (body or attach):
# If there is not an attachment at the very least, a body must be
# present
msg = "No message body or attachment was specified."
self.logger.warning(msg)
raise TypeError(msg)
if not body and not self.attachment_support:
# If no body was specified, then we know that an attachment
# was. This is logic checked earlier in the code.
#
# Knowing this, if the plugin itself doesn't support sending
# attachments, there is nothing further to do here, just move
# along.
msg = f"{self.service_name} does not support attachments; " \
" service skipped"
self.logger.warning(msg)
raise TypeError(msg)
# Handle situations where the title is None
title = '' if not title else title
# Truncate flag set with attachments ensures that only 1
# attachment passes through. In the event there could be many
# services specified, we only want to do this logic once.
# The logic is only applicable if ther was more then 1 attachment
# specified
overflow = self.overflow_mode if overflow is None else overflow
if attach and len(attach) > 1 and overflow == OverflowMode.TRUNCATE:
# Save first attachment
_attach = AppriseAttachment(attach[0], asset=self.asset)
else:
# reference same attachment
_attach = attach
# Apply our overflow (if defined)
for chunk in self._apply_overflow(
body=body, title=title, overflow=overflow,
body_format=body_format):
# Send notification
yield dict(
body=chunk['body'], title=chunk['title'],
notify_type=notify_type, attach=_attach,
body_format=body_format
)
def _apply_overflow(self, body, title=None, overflow=None,
body_format=None):
"""
Takes the message body and title as input. This function then
applies any defined overflow restrictions associated with the
notification service and may alter the message if/as required.
The function will always return a list object in the following
structure:
[
{
title: 'the title goes here',
body: 'the message body goes here',
},
{
title: 'the title goes here',
body: 'the continued message body goes here',
},
]
"""
response = list()
# tidy
title = '' if not title else title.strip()
body = '' if not body else body.rstrip()
if overflow is None:
# default
overflow = self.overflow_mode
if self.title_maxlen <= 0 and len(title) > 0:
if self.notify_format == NotifyFormat.HTML:
# Content is appended to body as html
body = '<{open_tag}>{title}</{close_tag}>' \
'<br />\r\n{body}'.format(
open_tag=self.default_html_tag_id,
title=title,
close_tag=self.default_html_tag_id,
body=body)
elif self.notify_format == NotifyFormat.MARKDOWN and \
body_format == NotifyFormat.TEXT:
# Content is appended to body as markdown
title = title.lstrip('\r\n \t\v\f#-')
if title:
# Content is appended to body as text
body = '# {}\r\n{}'.format(title, body)
else:
# Content is appended to body as text
body = '{}\r\n{}'.format(title, body)
title = ''
# Enforce the line count first always
if self.body_max_line_count > 0:
# Limit results to just the first 2 line otherwise
# there is just to much content to display
body = re.split(r'\r*\n', body)
body = '\r\n'.join(body[0:self.body_max_line_count])
if overflow == OverflowMode.UPSTREAM:
# Nothing more to do
response.append({'body': body, 'title': title})
return response
# a value of '2' allows for the \r\n that is applied when
# amalgamating the title
overflow_buffer = max(2, self.overflow_buffer) \
if (self.title_maxlen == 0 and len(title)) \
else self.overflow_buffer
#
# If we reach here in our code, then we're using TRUNCATE, or SPLIT
# actions which require some math to handle the data
#
# Handle situations where our body and title are amalamated into one
# calculation
title_maxlen = self.title_maxlen \
if not self.overflow_amalgamate_title \
else min(len(title) + self.overflow_max_display_count_width,
self.title_maxlen, self.body_maxlen)
if len(title) > title_maxlen:
# Truncate our Title
title = title[:title_maxlen].rstrip()
if self.overflow_amalgamate_title and (
self.body_maxlen - overflow_buffer) >= title_maxlen:
body_maxlen = (self.body_maxlen if not title else (
self.body_maxlen - title_maxlen)) - overflow_buffer
else:
# status quo
body_maxlen = self.body_maxlen \
if not self.overflow_amalgamate_title else \
(self.body_maxlen - overflow_buffer)
if body_maxlen > 0 and len(body) <= body_maxlen:
response.append({'body': body, 'title': title})
return response
if overflow == OverflowMode.TRUNCATE:
# Truncate our body and return
response.append({
'body': body[:body_maxlen].lstrip('\r\n\x0b\x0c').rstrip(),
'title': title,
})
# For truncate mode, we're done now
return response
if self.overflow_display_title_once is None:
# Detect if we only display our title once or not:
overflow_display_title_once = \
True if self.overflow_amalgamate_title and \
body_maxlen < self.overflow_display_count_threshold \
else False
else:
# Take on defined value
overflow_display_title_once = self.overflow_display_title_once
# If we reach here, then we are in SPLIT mode.
# For here, we want to split the message as many times as we have to
# in order to fit it within the designated limits.
if not overflow_display_title_once and not (
# edge case that can occur when overflow_display_title_once is
# forced off, but no body exists
self.overflow_amalgamate_title and body_maxlen <= 0):
show_counter = title and len(body) > body_maxlen and \
((self.overflow_amalgamate_title and
body_maxlen >= self.overflow_display_count_threshold) or
(not self.overflow_amalgamate_title and
title_maxlen > self.overflow_display_count_threshold)) and (
title_maxlen > (self.overflow_max_display_count_width +
overflow_buffer) and
self.title_maxlen >= self.overflow_display_count_threshold)
count = 0
template = ''
if show_counter:
# introduce padding
body_maxlen -= overflow_buffer
count = int(len(body) / body_maxlen) \
+ (1 if len(body) % body_maxlen else 0)
# Detect padding and prepare template
digits = len(str(count))
template = ' [{:0%d}/{:0%d}]' % (digits, digits)
# Update our counter
overflow_display_count_width = 4 + (digits * 2)
if overflow_display_count_width <= \
self.overflow_max_display_count_width:
if len(title) > \
title_maxlen - overflow_display_count_width:
# Truncate our title further
title = title[:title_maxlen -
overflow_display_count_width]
else: # Way to many messages to display
show_counter = False
response = [{
'body': body[i: i + body_maxlen]
.lstrip('\r\n\x0b\x0c').rstrip(),
'title': title + (
'' if not show_counter else
template.format(idx, count))} for idx, i in
enumerate(range(0, len(body), body_maxlen), start=1)]
else: # Display title once and move on
response = []
try:
i = range(0, len(body), body_maxlen)[0]
response.append({
'body': body[i: i + body_maxlen]
.lstrip('\r\n\x0b\x0c').rstrip(),
'title': title,
})
except (ValueError, IndexError):
# IndexError:
# - This happens if there simply was no body to display
# ValueError:
# - This happens when body_maxlen < 0 (due to title being
# so large)
# No worries; send title along
response.append({
'body': '',
'title': title,
})
# Ensure our start is set properly
body_maxlen = 0
# Now re-calculate based on the increased length
for i in range(body_maxlen, len(body), self.body_maxlen):
response.append({
'body': body[i: i + self.body_maxlen]
.lstrip('\r\n\x0b\x0c').rstrip(),
'title': '',
})
return response
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Should preform the actual notification itself.
"""
raise NotImplementedError(
"send() is not implimented by the child class.")
def url_parameters(self, *args, **kwargs):
"""
Provides a default set of parameters to work with. This can greatly
simplify URL construction in the acommpanied url() function in all
defined plugin services.
"""
params = {
'format': self.notify_format,
'overflow': self.overflow_mode,
}
params.update(super().url_parameters(*args, **kwargs))
# return default parameters
return params
@staticmethod
def parse_url(url, verify_host=True, plus_to_space=False):
"""Parses the URL and returns it broken apart into a dictionary.
This is very specific and customized for Apprise.
Args:
url (str): The URL you want to fully parse.
verify_host (:obj:`bool`, optional): a flag kept with the parsed
URL which some child classes will later use to verify SSL
keys (if SSL transactions take place). Unless under very
specific circumstances, it is strongly recomended that
you leave this default value set to True.
Returns:
A dictionary is returned containing the URL fully parsed if
successful, otherwise None is returned.
"""
results = URLBase.parse_url(
url, verify_host=verify_host, plus_to_space=plus_to_space)
if not results:
# We're done; we failed to parse our url
return results
# Allow overriding the default format
if 'format' in results['qsd']:
results['format'] = results['qsd'].get('format')
if results['format'] not in NOTIFY_FORMATS:
URLBase.logger.warning(
'Unsupported format specified {}'.format(
results['format']))
del results['format']
# Allow overriding the default overflow
if 'overflow' in results['qsd']:
results['overflow'] = results['qsd'].get('overflow')
if results['overflow'] not in OVERFLOW_MODES:
URLBase.logger.warning(
'Unsupported overflow specified {}'.format(
results['overflow']))
del results['overflow']
# Allow emoji's override
if 'emojis' in results['qsd']:
results['emojis'] = parse_bool(results['qsd'].get('emojis'))
return results
@staticmethod
def parse_native_url(url):
"""
This is a base class that can be optionally over-ridden by child
classes who can build their Apprise URL based on the one provided
by the notification service they choose to use.
The intent of this is to make Apprise a little more userfriendly
to people who aren't familiar with constructing URLs and wish to
use the ones that were just provied by their notification serivice
that they're using.
This function will return None if the passed in URL can't be matched
as belonging to the notification service. Otherwise this function
should return the same set of results that parse_url() does.
"""
return None
|
(**kwargs)
|
722,025 |
apprise.plugins.base
|
__init__
|
Initialize some general configuration that will keep things consistent
when working with the notifiers that will inherit this class.
|
def __init__(self, **kwargs):
"""
Initialize some general configuration that will keep things consistent
when working with the notifiers that will inherit this class.
"""
super().__init__(**kwargs)
# Store our interpret_emoji's setting
# If asset emoji value is set to a default of True and the user
# specifies it to be false, this is accepted and False over-rides.
#
# If asset emoji value is set to a default of None, a user may
# optionally over-ride this and set it to True from the Apprise
# URL. ?emojis=yes
#
# If asset emoji value is set to a default of False, then all emoji's
# are turned off (no user over-rides allowed)
#
# Take a default
self.interpret_emojis = self.asset.interpret_emojis
if 'emojis' in kwargs:
# possibly over-ride default
self.interpret_emojis = True if self.interpret_emojis \
in (None, True) and \
parse_bool(
kwargs.get('emojis', False),
default=NotifyBase.template_args['emojis']['default']) \
else False
if 'format' in kwargs:
# Store the specified format if specified
notify_format = kwargs.get('format', '')
if notify_format.lower() not in NOTIFY_FORMATS:
msg = 'Invalid notification format {}'.format(notify_format)
self.logger.error(msg)
raise TypeError(msg)
# Provide override
self.notify_format = notify_format
if 'overflow' in kwargs:
# Store the specified format if specified
overflow = kwargs.get('overflow', '')
if overflow.lower() not in OVERFLOW_MODES:
msg = 'Invalid overflow method {}'.format(overflow)
self.logger.error(msg)
raise TypeError(msg)
# Provide override
self.overflow_mode = overflow
|
(self, **kwargs)
|
722,026 |
apprise.url
|
__len__
|
Should be over-ridden and allows the tracking of how many targets
are associated with each URLBase object.
Default is always 1
|
def __len__(self):
"""
Should be over-ridden and allows the tracking of how many targets
are associated with each URLBase object.
Default is always 1
"""
return 1
|
(self)
|
722,028 |
apprise.plugins.base
|
_apply_overflow
|
Takes the message body and title as input. This function then
applies any defined overflow restrictions associated with the
notification service and may alter the message if/as required.
The function will always return a list object in the following
structure:
[
{
title: 'the title goes here',
body: 'the message body goes here',
},
{
title: 'the title goes here',
body: 'the continued message body goes here',
},
]
|
def _apply_overflow(self, body, title=None, overflow=None,
body_format=None):
"""
Takes the message body and title as input. This function then
applies any defined overflow restrictions associated with the
notification service and may alter the message if/as required.
The function will always return a list object in the following
structure:
[
{
title: 'the title goes here',
body: 'the message body goes here',
},
{
title: 'the title goes here',
body: 'the continued message body goes here',
},
]
"""
response = list()
# tidy
title = '' if not title else title.strip()
body = '' if not body else body.rstrip()
if overflow is None:
# default
overflow = self.overflow_mode
if self.title_maxlen <= 0 and len(title) > 0:
if self.notify_format == NotifyFormat.HTML:
# Content is appended to body as html
body = '<{open_tag}>{title}</{close_tag}>' \
'<br />\r\n{body}'.format(
open_tag=self.default_html_tag_id,
title=title,
close_tag=self.default_html_tag_id,
body=body)
elif self.notify_format == NotifyFormat.MARKDOWN and \
body_format == NotifyFormat.TEXT:
# Content is appended to body as markdown
title = title.lstrip('\r\n \t\v\f#-')
if title:
# Content is appended to body as text
body = '# {}\r\n{}'.format(title, body)
else:
# Content is appended to body as text
body = '{}\r\n{}'.format(title, body)
title = ''
# Enforce the line count first always
if self.body_max_line_count > 0:
# Limit results to just the first 2 line otherwise
# there is just to much content to display
body = re.split(r'\r*\n', body)
body = '\r\n'.join(body[0:self.body_max_line_count])
if overflow == OverflowMode.UPSTREAM:
# Nothing more to do
response.append({'body': body, 'title': title})
return response
# a value of '2' allows for the \r\n that is applied when
# amalgamating the title
overflow_buffer = max(2, self.overflow_buffer) \
if (self.title_maxlen == 0 and len(title)) \
else self.overflow_buffer
#
# If we reach here in our code, then we're using TRUNCATE, or SPLIT
# actions which require some math to handle the data
#
# Handle situations where our body and title are amalamated into one
# calculation
title_maxlen = self.title_maxlen \
if not self.overflow_amalgamate_title \
else min(len(title) + self.overflow_max_display_count_width,
self.title_maxlen, self.body_maxlen)
if len(title) > title_maxlen:
# Truncate our Title
title = title[:title_maxlen].rstrip()
if self.overflow_amalgamate_title and (
self.body_maxlen - overflow_buffer) >= title_maxlen:
body_maxlen = (self.body_maxlen if not title else (
self.body_maxlen - title_maxlen)) - overflow_buffer
else:
# status quo
body_maxlen = self.body_maxlen \
if not self.overflow_amalgamate_title else \
(self.body_maxlen - overflow_buffer)
if body_maxlen > 0 and len(body) <= body_maxlen:
response.append({'body': body, 'title': title})
return response
if overflow == OverflowMode.TRUNCATE:
# Truncate our body and return
response.append({
'body': body[:body_maxlen].lstrip('\r\n\x0b\x0c').rstrip(),
'title': title,
})
# For truncate mode, we're done now
return response
if self.overflow_display_title_once is None:
# Detect if we only display our title once or not:
overflow_display_title_once = \
True if self.overflow_amalgamate_title and \
body_maxlen < self.overflow_display_count_threshold \
else False
else:
# Take on defined value
overflow_display_title_once = self.overflow_display_title_once
# If we reach here, then we are in SPLIT mode.
# For here, we want to split the message as many times as we have to
# in order to fit it within the designated limits.
if not overflow_display_title_once and not (
# edge case that can occur when overflow_display_title_once is
# forced off, but no body exists
self.overflow_amalgamate_title and body_maxlen <= 0):
show_counter = title and len(body) > body_maxlen and \
((self.overflow_amalgamate_title and
body_maxlen >= self.overflow_display_count_threshold) or
(not self.overflow_amalgamate_title and
title_maxlen > self.overflow_display_count_threshold)) and (
title_maxlen > (self.overflow_max_display_count_width +
overflow_buffer) and
self.title_maxlen >= self.overflow_display_count_threshold)
count = 0
template = ''
if show_counter:
# introduce padding
body_maxlen -= overflow_buffer
count = int(len(body) / body_maxlen) \
+ (1 if len(body) % body_maxlen else 0)
# Detect padding and prepare template
digits = len(str(count))
template = ' [{:0%d}/{:0%d}]' % (digits, digits)
# Update our counter
overflow_display_count_width = 4 + (digits * 2)
if overflow_display_count_width <= \
self.overflow_max_display_count_width:
if len(title) > \
title_maxlen - overflow_display_count_width:
# Truncate our title further
title = title[:title_maxlen -
overflow_display_count_width]
else: # Way to many messages to display
show_counter = False
response = [{
'body': body[i: i + body_maxlen]
.lstrip('\r\n\x0b\x0c').rstrip(),
'title': title + (
'' if not show_counter else
template.format(idx, count))} for idx, i in
enumerate(range(0, len(body), body_maxlen), start=1)]
else: # Display title once and move on
response = []
try:
i = range(0, len(body), body_maxlen)[0]
response.append({
'body': body[i: i + body_maxlen]
.lstrip('\r\n\x0b\x0c').rstrip(),
'title': title,
})
except (ValueError, IndexError):
# IndexError:
# - This happens if there simply was no body to display
# ValueError:
# - This happens when body_maxlen < 0 (due to title being
# so large)
# No worries; send title along
response.append({
'body': '',
'title': title,
})
# Ensure our start is set properly
body_maxlen = 0
# Now re-calculate based on the increased length
for i in range(body_maxlen, len(body), self.body_maxlen):
response.append({
'body': body[i: i + self.body_maxlen]
.lstrip('\r\n\x0b\x0c').rstrip(),
'title': '',
})
return response
|
(self, body, title=None, overflow=None, body_format=None)
|
722,029 |
apprise.plugins.base
|
_build_send_calls
|
Get a list of dictionaries that can be used to call send() or
(in the future) async_send().
|
def _build_send_calls(self, body=None, title=None,
notify_type=NotifyType.INFO, overflow=None,
attach=None, body_format=None, **kwargs):
"""
Get a list of dictionaries that can be used to call send() or
(in the future) async_send().
"""
if not self.enabled:
# Deny notifications issued to services that are disabled
msg = f"{self.service_name} is currently disabled on this system."
self.logger.warning(msg)
raise TypeError(msg)
# Prepare attachments if required
if attach is not None and not isinstance(attach, AppriseAttachment):
try:
attach = AppriseAttachment(attach, asset=self.asset)
except TypeError:
# bad attachments
raise
# Handle situations where the body is None
body = '' if not body else body
elif not (body or attach):
# If there is not an attachment at the very least, a body must be
# present
msg = "No message body or attachment was specified."
self.logger.warning(msg)
raise TypeError(msg)
if not body and not self.attachment_support:
# If no body was specified, then we know that an attachment
# was. This is logic checked earlier in the code.
#
# Knowing this, if the plugin itself doesn't support sending
# attachments, there is nothing further to do here, just move
# along.
msg = f"{self.service_name} does not support attachments; " \
" service skipped"
self.logger.warning(msg)
raise TypeError(msg)
# Handle situations where the title is None
title = '' if not title else title
# Truncate flag set with attachments ensures that only 1
# attachment passes through. In the event there could be many
# services specified, we only want to do this logic once.
# The logic is only applicable if ther was more then 1 attachment
# specified
overflow = self.overflow_mode if overflow is None else overflow
if attach and len(attach) > 1 and overflow == OverflowMode.TRUNCATE:
# Save first attachment
_attach = AppriseAttachment(attach[0], asset=self.asset)
else:
# reference same attachment
_attach = attach
# Apply our overflow (if defined)
for chunk in self._apply_overflow(
body=body, title=title, overflow=overflow,
body_format=body_format):
# Send notification
yield dict(
body=chunk['body'], title=chunk['title'],
notify_type=notify_type, attach=_attach,
body_format=body_format
)
|
(self, body=None, title=None, notify_type='info', overflow=None, attach=None, body_format=None, **kwargs)
|
722,030 |
apprise.plugins.base
|
async_notify
|
Performs notification for asynchronous callers
|
def notify(self, *args, **kwargs):
"""
Performs notification
"""
try:
# Build a list of dictionaries that can be used to call send().
send_calls = list(self._build_send_calls(*args, **kwargs))
except TypeError:
# Internal error
return False
else:
# Loop through each call, one at a time. (Use a list rather than a
# generator to call all the partials, even in case of a failure.)
the_calls = [self.send(**kwargs2) for kwargs2 in send_calls]
return all(the_calls)
|
(self, *args, **kwargs)
|
722,031 |
apprise.plugins.base
|
color
|
Returns the html color (hex code) associated with the notify_type
|
def color(self, notify_type, color_type=None):
"""
Returns the html color (hex code) associated with the notify_type
"""
if notify_type not in NOTIFY_TYPES:
return None
return self.asset.color(
notify_type=notify_type,
color_type=color_type,
)
|
(self, notify_type, color_type=None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.