index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
721,740
aiopg.connection
__aenter__
null
def __await__(self) -> Generator[Any, None, "Connection"]: return self._connect().__await__()
(self) -> aiopg.connection.Connection
721,743
aiopg.connection
__del__
null
def __del__(self) -> None: try: _conn = self._conn except AttributeError: return if _conn is not None and not _conn.closed: self.close() warnings.warn(f"Unclosed connection {self!r}", ResourceWarning) context = {"connection": self, "message": "Unclosed connection"} if self._source_traceback is not None: context["source_traceback"] = self._source_traceback self._loop.call_exception_handler(context)
(self) -> NoneType
721,744
aiopg.connection
__init__
null
def __init__( self, dsn: Optional[str], timeout: float, echo: bool = False, enable_json: bool = True, enable_hstore: bool = True, enable_uuid: bool = True, **kwargs: Any, ): self._enable_json = enable_json self._enable_hstore = enable_hstore self._enable_uuid = enable_uuid self._loop = get_running_loop() self._waiter: Optional[ "asyncio.Future[None]" ] = self._loop.create_future() kwargs["async_"] = kwargs.pop("async", True) kwargs.pop("loop", None) # backward compatibility self._conn = psycopg2.connect(dsn, **kwargs) self._dsn = self._conn.dsn assert self._conn.isexecuting(), "Is conn an async at all???" self._fileno: Optional[int] = self._conn.fileno() self._timeout = timeout self._last_usage = self._loop.time() self._writing = False self._echo = echo self._notifies = asyncio.Queue() # type: ignore self._notifies_proxy = ClosableQueue(self._notifies, self._loop) self._weakref = weakref.ref(self) self._loop.add_reader( self._fileno, self._ready, self._weakref # type: ignore ) if self._loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1))
(self, dsn: Optional[str], timeout: float, echo: bool = False, enable_json: bool = True, enable_hstore: bool = True, enable_uuid: bool = True, **kwargs: Any)
721,745
aiopg.connection
__repr__
null
def __repr__(self) -> str: return ( f"<" f"{type(self).__module__}::{type(self).__name__} " f"isexecuting={self.isexecuting()}, " f"closed={self.closed}, " f"echo={self.echo}, " f">" )
(self) -> str
721,746
aiopg.connection
_close
Remove the connection from the event_loop and close it.
def _close(self) -> None: """Remove the connection from the event_loop and close it.""" # N.B. If connection contains uncommitted transaction the # transaction will be discarded if self._fileno is not None: self._loop.remove_reader(self._fileno) if self._writing: self._writing = False self._loop.remove_writer(self._fileno) self._conn.close() if not self._loop.is_closed(): if self._waiter is not None and not self._waiter.done(): self._waiter.set_exception( psycopg2.OperationalError("Connection closed") ) self._notifies_proxy.close( psycopg2.OperationalError("Connection closed") )
(self) -> NoneType
721,747
aiopg.connection
_connect
null
@property def notifies(self) -> ClosableQueue: """Return notification queue (an asyncio.Queue -like object).""" return self._notifies_proxy
(self) -> aiopg.connection.Connection
721,748
aiopg.connection
_create_waiter
null
def _create_waiter(self, func_name: str) -> "asyncio.Future[None]": if self._waiter is not None: raise RuntimeError( f"{func_name}() called while another coroutine " f"is already waiting for incoming data" ) self._waiter = self._loop.create_future() return self._waiter
(self, func_name: str) -> _asyncio.Future[None]
721,749
aiopg.connection
_cursor
null
def cursor( self, name: Optional[str] = None, cursor_factory: Any = None, scrollable: Optional[bool] = None, withhold: bool = False, timeout: Optional[float] = None, isolation_level: Optional[IsolationLevel] = None, ) -> _ContextManager[Cursor]: """A coroutine that returns a new cursor object using the connection. *cursor_factory* argument can be used to create non-standard cursors. The argument must be subclass of `psycopg2.extensions.cursor`. *name*, *scrollable* and *withhold* parameters are not supported by psycopg in asynchronous mode. """ self._last_usage = self._loop.time() coro = self._cursor( name=name, cursor_factory=cursor_factory, scrollable=scrollable, withhold=withhold, timeout=timeout, isolation_level=isolation_level, ) return _ContextManager[Cursor](coro, _close_cursor)
(self, name: Optional[str] = None, cursor_factory: Optional[Any] = None, scrollable: Optional[bool] = None, withhold: bool = False, timeout: Optional[float] = None, isolation_level: Optional[aiopg.connection.IsolationLevel] = None) -> aiopg.connection.Cursor
721,751
aiopg.connection
_fatal_error
null
def _fatal_error(self, message: str) -> None: # Should be called from exception handler only. self._loop.call_exception_handler( { "message": message, "connection": self, } ) self.close() if self._waiter and not self._waiter.done(): self._waiter.set_exception(psycopg2.OperationalError(message))
(self, message: str) -> NoneType
721,754
aiopg.connection
_ready
null
@staticmethod def _ready(weak_self: "weakref.ref[Any]") -> None: self = cast(Connection, weak_self()) if self is None: return waiter = self._waiter try: state = self._conn.poll() while self._conn.notifies: notify = self._conn.notifies.pop(0) self._notifies.put_nowait(notify) except (psycopg2.Warning, psycopg2.Error) as exc: if self._fileno is not None: try: select.select([self._fileno], [], [], 0) except OSError as os_exc: if _is_bad_descriptor_error(os_exc): with contextlib.suppress(OSError): self._loop.remove_reader(self._fileno) # forget a bad file descriptor, don't try to # touch it self._fileno = None try: if self._writing: self._writing = False if self._fileno is not None: self._loop.remove_writer(self._fileno) except OSError as exc2: if exc2.errno != errno.EBADF: # EBADF is ok for closed file descriptor # chain exception otherwise exc2.__cause__ = exc exc = exc2 self._notifies_proxy.close(exc) if waiter is not None and not waiter.done(): waiter.set_exception(exc) else: if self._fileno is None: # connection closed if waiter is not None and not waiter.done(): waiter.set_exception( psycopg2.OperationalError("Connection closed") ) if state == psycopg2.extensions.POLL_OK: if self._writing: self._loop.remove_writer(self._fileno) # type: ignore self._writing = False if waiter is not None and not waiter.done(): waiter.set_result(None) elif state == psycopg2.extensions.POLL_READ: if self._writing: self._loop.remove_writer(self._fileno) # type: ignore self._writing = False elif state == psycopg2.extensions.POLL_WRITE: if not self._writing: self._loop.add_writer( self._fileno, self._ready, weak_self # type: ignore ) self._writing = True elif state == psycopg2.extensions.POLL_ERROR: self._fatal_error( "Fatal error on aiopg connection: " "POLL_ERROR from underlying .poll() call" ) else: self._fatal_error( f"Fatal error on aiopg connection: " f"unknown answer {state} from underlying " f".poll() call" )
(weak_self: weakref.ReferenceType[typing.Any]) -> NoneType
721,755
aiopg.connection
cancel
null
@property def raw(self) -> Any: """Underlying psycopg connection object, readonly""" return self._conn
(self) -> NoneType
721,756
aiopg.connection
close
null
def close(self) -> "asyncio.Future[None]": self._close() return create_completed_future(self._loop)
(self) -> _asyncio.Future[None]
721,758
aiopg.connection
cursor
A coroutine that returns a new cursor object using the connection. *cursor_factory* argument can be used to create non-standard cursors. The argument must be subclass of `psycopg2.extensions.cursor`. *name*, *scrollable* and *withhold* parameters are not supported by psycopg in asynchronous mode.
def cursor( self, name: Optional[str] = None, cursor_factory: Any = None, scrollable: Optional[bool] = None, withhold: bool = False, timeout: Optional[float] = None, isolation_level: Optional[IsolationLevel] = None, ) -> _ContextManager[Cursor]: """A coroutine that returns a new cursor object using the connection. *cursor_factory* argument can be used to create non-standard cursors. The argument must be subclass of `psycopg2.extensions.cursor`. *name*, *scrollable* and *withhold* parameters are not supported by psycopg in asynchronous mode. """ self._last_usage = self._loop.time() coro = self._cursor( name=name, cursor_factory=cursor_factory, scrollable=scrollable, withhold=withhold, timeout=timeout, isolation_level=isolation_level, ) return _ContextManager[Cursor](coro, _close_cursor)
(self, name: Optional[str] = None, cursor_factory: Optional[Any] = None, scrollable: Optional[bool] = None, withhold: bool = False, timeout: Optional[float] = None, isolation_level: Optional[aiopg.connection.IsolationLevel] = None) -> aiopg.utils._ContextManager[aiopg.connection.Cursor]
721,759
aiopg.connection
get_backend_pid
Returns the PID of the backend server process.
@property def cursor_factory(self) -> Any: """The default cursor factory used by .cursor().""" return self._conn.cursor_factory
(self) -> int
721,760
aiopg.connection
get_parameter_status
Look up a current parameter setting of the server.
@property def cursor_factory(self) -> Any: """The default cursor factory used by .cursor().""" return self._conn.cursor_factory
(self, parameter: str) -> Optional[str]
721,761
aiopg.connection
get_transaction_status
Return the current session transaction status as an integer.
@property def cursor_factory(self) -> Any: """The default cursor factory used by .cursor().""" return self._conn.cursor_factory
(self) -> int
721,762
aiopg.connection
isexecuting
null
def isexecuting(self) -> bool: return self._conn.isexecuting() # type: ignore
(self) -> bool
721,763
aiopg.connection
lobject
null
@property def status(self) -> int: """A read-only integer representing the status of the connection.""" return self._conn.status # type: ignore
(self, *args: Any, **kwargs: Any) -> NoneType
721,766
aiopg.connection
set_client_encoding
null
@property def encoding(self) -> str: """Client encoding for SQL operations.""" return self._conn.encoding # type: ignore
(self, val: str) -> NoneType
721,767
aiopg.connection
set_isolation_level
Transaction isolation level. The only allowed value is ISOLATION_LEVEL_READ_COMMITTED.
@property def isolation_level(self) -> int: """Transaction isolation level. The only allowed value is ISOLATION_LEVEL_READ_COMMITTED. """ return self._conn.isolation_level # type: ignore
(self, val: int) -> NoneType
721,768
aiopg.connection
set_session
null
@property def dsn(self) -> Optional[str]: """DSN connection string. Read-only attribute representing dsn connection string used for connectint to PostgreSQL server. """ return self._dsn # type: ignore
(self, *args: Any, **kwargs: Any) -> NoneType
721,775
aiopg.connection
Cursor
null
class Cursor: def __init__( self, conn: "Connection", impl: Any, timeout: float, echo: bool, isolation_level: Optional[IsolationLevel] = None, ): self._conn = conn self._impl = impl self._timeout = timeout self._echo = echo self._transaction = Transaction( self, isolation_level or IsolationLevel.default ) @property def echo(self) -> bool: """Return echo mode status.""" return self._echo @property def description(self) -> Optional[Sequence[Any]]: """This read-only attribute is a sequence of 7-item sequences. Each of these sequences is a collections.namedtuple containing information describing one result column: 0. name: the name of the column returned. 1. type_code: the PostgreSQL OID of the column. 2. display_size: the actual length of the column in bytes. 3. internal_size: the size in bytes of the column associated to this column on the server. 4. precision: total number of significant digits in columns of type NUMERIC. None for other types. 5. scale: count of decimal digits in the fractional part in columns of type NUMERIC. None for other types. 6. null_ok: always None as not easy to retrieve from the libpq. This attribute will be None for operations that do not return rows or if the cursor has not had an operation invoked via the execute() method yet. """ return self._impl.description # type: ignore def close(self) -> None: """Close the cursor now.""" if not self.closed: self._impl.close() @property def closed(self) -> bool: """Read-only boolean attribute: specifies if the cursor is closed.""" return self._impl.closed # type: ignore @property def connection(self) -> "Connection": """Read-only attribute returning a reference to the `Connection`.""" return self._conn @property def raw(self) -> Any: """Underlying psycopg cursor object, readonly""" return self._impl @property def name(self) -> str: # Not supported return self._impl.name # type: ignore @property def scrollable(self) -> Optional[bool]: # Not supported return self._impl.scrollable # type: ignore @scrollable.setter def scrollable(self, val: bool) -> None: # Not supported self._impl.scrollable = val @property def withhold(self) -> bool: # Not supported return self._impl.withhold # type: ignore @withhold.setter def withhold(self, val: bool) -> None: # Not supported self._impl.withhold = val async def execute( self, operation: str, parameters: Any = None, *, timeout: Optional[float] = None, ) -> None: """Prepare and execute a database operation (query or command). Parameters may be provided as sequence or mapping and will be bound to variables in the operation. Variables are specified either with positional %s or named %({name})s placeholders. """ if timeout is None: timeout = self._timeout waiter = self._conn._create_waiter("cursor.execute") if self._echo: logger.info(operation) logger.info("%r", parameters) try: self._impl.execute(operation, parameters) except BaseException: self._conn._waiter = None raise try: await self._conn._poll(waiter, timeout) except asyncio.TimeoutError: self._impl.close() raise async def executemany(self, *args: Any, **kwargs: Any) -> None: # Not supported raise psycopg2.ProgrammingError( "executemany cannot be used in asynchronous mode" ) async def callproc( self, procname: str, parameters: Any = None, *, timeout: Optional[float] = None, ) -> None: """Call a stored database procedure with the given name. The sequence of parameters must contain one entry for each argument that the procedure expects. The result of the call is returned as modified copy of the input sequence. Input parameters are left untouched, output and input/output parameters replaced with possibly new values. """ if timeout is None: timeout = self._timeout waiter = self._conn._create_waiter("cursor.callproc") if self._echo: logger.info("CALL %s", procname) logger.info("%r", parameters) try: self._impl.callproc(procname, parameters) except BaseException: self._conn._waiter = None raise else: await self._conn._poll(waiter, timeout) def begin(self) -> _ContextManager[Transaction]: return _ContextManager[Transaction]( self._transaction.begin(), _commit_transaction, _rollback_transaction, ) def begin_nested(self) -> _ContextManager[Transaction]: if self._transaction.is_begin: return self._transaction.point() return _ContextManager[Transaction]( self._transaction.begin(), _commit_transaction, _rollback_transaction, ) def mogrify(self, operation: str, parameters: Any = None) -> bytes: """Return a query string after arguments binding. The byte string returned is exactly the one that would be sent to the database running the .execute() method or similar. """ ret = self._impl.mogrify(operation, parameters) assert ( not self._conn.isexecuting() ), "Don't support server side mogrify" return ret # type: ignore async def setinputsizes(self, sizes: int) -> None: """This method is exposed in compliance with the DBAPI. It currently does nothing but it is safe to call it. """ self._impl.setinputsizes(sizes) async def fetchone(self) -> Any: """Fetch the next row of a query result set. Returns a single tuple, or None when no more data is available. """ ret = self._impl.fetchone() assert ( not self._conn.isexecuting() ), "Don't support server side cursors yet" return ret async def fetchmany(self, size: Optional[int] = None) -> List[Any]: """Fetch the next set of rows of a query result. Returns a list of tuples. An empty list is returned when no more rows are available. The number of rows to fetch per call is specified by the parameter. If it is not given, the cursor's .arraysize determines the number of rows to be fetched. The method should try to fetch as many rows as indicated by the size parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned. """ if size is None: size = self._impl.arraysize ret = self._impl.fetchmany(size) assert ( not self._conn.isexecuting() ), "Don't support server side cursors yet" return ret # type: ignore async def fetchall(self) -> List[Any]: """Fetch all (remaining) rows of a query result. Returns them as a list of tuples. An empty list is returned if there is no more record to fetch. """ ret = self._impl.fetchall() assert ( not self._conn.isexecuting() ), "Don't support server side cursors yet" return ret # type: ignore async def scroll(self, value: int, mode: str = "relative") -> None: """Scroll to a new position according to mode. If mode is relative (default), value is taken as offset to the current position in the result set, if set to absolute, value states an absolute target position. """ self._impl.scroll(value, mode) assert ( not self._conn.isexecuting() ), "Don't support server side cursors yet" @property def arraysize(self) -> int: """How many rows will be returned by fetchmany() call. This read/write attribute specifies the number of rows to fetch at a time with fetchmany(). It defaults to 1 meaning to fetch a single row at a time. """ return self._impl.arraysize # type: ignore @arraysize.setter def arraysize(self, val: int) -> None: """How many rows will be returned by fetchmany() call. This read/write attribute specifies the number of rows to fetch at a time with fetchmany(). It defaults to 1 meaning to fetch a single row at a time. """ self._impl.arraysize = val @property def itersize(self) -> int: # Not supported return self._impl.itersize # type: ignore @itersize.setter def itersize(self, val: int) -> None: # Not supported self._impl.itersize = val @property def rowcount(self) -> int: """Returns the number of rows that has been produced of affected. This read-only attribute specifies the number of rows that the last :meth:`execute` produced (for Data Query Language statements like SELECT) or affected (for Data Manipulation Language statements like UPDATE or INSERT). The attribute is -1 in case no .execute() has been performed on the cursor or the row count of the last operation if it can't be determined by the interface. """ return self._impl.rowcount # type: ignore @property def rownumber(self) -> int: """Row index. This read-only attribute provides the current 0-based index of the cursor in the result set or ``None`` if the index cannot be determined.""" return self._impl.rownumber # type: ignore @property def lastrowid(self) -> int: """OID of the last inserted row. This read-only attribute provides the OID of the last row inserted by the cursor. If the table wasn't created with OID support or the last operation is not a single record insert, the attribute is set to None. """ return self._impl.lastrowid # type: ignore @property def query(self) -> Optional[str]: """The last executed query string. Read-only attribute containing the body of the last query sent to the backend (including bound arguments) as bytes string. None if no query has been executed yet. """ return self._impl.query # type: ignore @property def statusmessage(self) -> str: """the message returned by the last command.""" return self._impl.statusmessage # type: ignore @property def tzinfo_factory(self) -> datetime.tzinfo: """The time zone factory used to handle data types such as `TIMESTAMP WITH TIME ZONE`. """ return self._impl.tzinfo_factory # type: ignore @tzinfo_factory.setter def tzinfo_factory(self, val: datetime.tzinfo) -> None: """The time zone factory used to handle data types such as `TIMESTAMP WITH TIME ZONE`. """ self._impl.tzinfo_factory = val async def nextset(self) -> None: # Not supported self._impl.nextset() # raises psycopg2.NotSupportedError async def setoutputsize( self, size: int, column: Optional[int] = None ) -> None: # Does nothing self._impl.setoutputsize(size, column) async def copy_from(self, *args: Any, **kwargs: Any) -> None: raise psycopg2.ProgrammingError( "copy_from cannot be used in asynchronous mode" ) async def copy_to(self, *args: Any, **kwargs: Any) -> None: raise psycopg2.ProgrammingError( "copy_to cannot be used in asynchronous mode" ) async def copy_expert(self, *args: Any, **kwargs: Any) -> None: raise psycopg2.ProgrammingError( "copy_expert cannot be used in asynchronous mode" ) @property def timeout(self) -> float: """Return default timeout for cursor operations.""" return self._timeout def __aiter__(self) -> "Cursor": return self async def __anext__(self) -> Any: ret = await self.fetchone() if ret is not None: return ret raise StopAsyncIteration async def __aenter__(self) -> "Cursor": return self async def __aexit__( self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ) -> None: self.close() def __repr__(self) -> str: return ( f"<" f"{type(self).__module__}::{type(self).__name__} " f"name={self.name}, " f"closed={self.closed}" f">" )
(conn: 'Connection', impl: Any, timeout: float, echo: bool, isolation_level: Optional[aiopg.connection.IsolationLevel] = None)
721,776
aiopg.connection
__aenter__
null
def __aiter__(self) -> "Cursor": return self
(self) -> aiopg.connection.Cursor
721,780
aiopg.connection
__init__
null
def __init__( self, conn: "Connection", impl: Any, timeout: float, echo: bool, isolation_level: Optional[IsolationLevel] = None, ): self._conn = conn self._impl = impl self._timeout = timeout self._echo = echo self._transaction = Transaction( self, isolation_level or IsolationLevel.default )
(self, conn: aiopg.connection.Connection, impl: Any, timeout: float, echo: bool, isolation_level: Optional[aiopg.connection.IsolationLevel] = None)
721,781
aiopg.connection
__repr__
null
def __repr__(self) -> str: return ( f"<" f"{type(self).__module__}::{type(self).__name__} " f"name={self.name}, " f"closed={self.closed}" f">" )
(self) -> str
721,782
aiopg.connection
begin
null
def begin(self) -> _ContextManager[Transaction]: return _ContextManager[Transaction]( self._transaction.begin(), _commit_transaction, _rollback_transaction, )
(self) -> aiopg.utils._ContextManager[aiopg.connection.Transaction]
721,783
aiopg.connection
begin_nested
null
def begin_nested(self) -> _ContextManager[Transaction]: if self._transaction.is_begin: return self._transaction.point() return _ContextManager[Transaction]( self._transaction.begin(), _commit_transaction, _rollback_transaction, )
(self) -> aiopg.utils._ContextManager[aiopg.connection.Transaction]
721,784
aiopg.connection
callproc
Call a stored database procedure with the given name. The sequence of parameters must contain one entry for each argument that the procedure expects. The result of the call is returned as modified copy of the input sequence. Input parameters are left untouched, output and input/output parameters replaced with possibly new values.
@withhold.setter def withhold(self, val: bool) -> None: # Not supported self._impl.withhold = val
(self, procname: str, parameters: Optional[Any] = None, *, timeout: Optional[float] = None) -> NoneType
721,785
aiopg.connection
close
Close the cursor now.
def close(self) -> None: """Close the cursor now.""" if not self.closed: self._impl.close()
(self) -> NoneType
721,786
aiopg.connection
copy_expert
null
@tzinfo_factory.setter def tzinfo_factory(self, val: datetime.tzinfo) -> None: """The time zone factory used to handle data types such as `TIMESTAMP WITH TIME ZONE`. """ self._impl.tzinfo_factory = val
(self, *args: Any, **kwargs: Any) -> NoneType
721,789
aiopg.connection
execute
Prepare and execute a database operation (query or command). Parameters may be provided as sequence or mapping and will be bound to variables in the operation. Variables are specified either with positional %s or named %({name})s placeholders.
@withhold.setter def withhold(self, val: bool) -> None: # Not supported self._impl.withhold = val
(self, operation: str, parameters: Optional[Any] = None, *, timeout: Optional[float] = None) -> NoneType
721,790
aiopg.connection
executemany
null
@withhold.setter def withhold(self, val: bool) -> None: # Not supported self._impl.withhold = val
(self, *args: Any, **kwargs: Any) -> NoneType
721,791
aiopg.connection
fetchall
Fetch all (remaining) rows of a query result. Returns them as a list of tuples. An empty list is returned if there is no more record to fetch.
def mogrify(self, operation: str, parameters: Any = None) -> bytes: """Return a query string after arguments binding. The byte string returned is exactly the one that would be sent to the database running the .execute() method or similar. """ ret = self._impl.mogrify(operation, parameters) assert ( not self._conn.isexecuting() ), "Don't support server side mogrify" return ret # type: ignore
(self) -> List[Any]
721,792
aiopg.connection
fetchmany
Fetch the next set of rows of a query result. Returns a list of tuples. An empty list is returned when no more rows are available. The number of rows to fetch per call is specified by the parameter. If it is not given, the cursor's .arraysize determines the number of rows to be fetched. The method should try to fetch as many rows as indicated by the size parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned.
def mogrify(self, operation: str, parameters: Any = None) -> bytes: """Return a query string after arguments binding. The byte string returned is exactly the one that would be sent to the database running the .execute() method or similar. """ ret = self._impl.mogrify(operation, parameters) assert ( not self._conn.isexecuting() ), "Don't support server side mogrify" return ret # type: ignore
(self, size: Optional[int] = None) -> List[Any]
721,793
aiopg.connection
fetchone
Fetch the next row of a query result set. Returns a single tuple, or None when no more data is available.
def mogrify(self, operation: str, parameters: Any = None) -> bytes: """Return a query string after arguments binding. The byte string returned is exactly the one that would be sent to the database running the .execute() method or similar. """ ret = self._impl.mogrify(operation, parameters) assert ( not self._conn.isexecuting() ), "Don't support server side mogrify" return ret # type: ignore
(self) -> Any
721,794
aiopg.connection
mogrify
Return a query string after arguments binding. The byte string returned is exactly the one that would be sent to the database running the .execute() method or similar.
def mogrify(self, operation: str, parameters: Any = None) -> bytes: """Return a query string after arguments binding. The byte string returned is exactly the one that would be sent to the database running the .execute() method or similar. """ ret = self._impl.mogrify(operation, parameters) assert ( not self._conn.isexecuting() ), "Don't support server side mogrify" return ret # type: ignore
(self, operation: str, parameters: Optional[Any] = None) -> bytes
721,796
aiopg.connection
scroll
Scroll to a new position according to mode. If mode is relative (default), value is taken as offset to the current position in the result set, if set to absolute, value states an absolute target position.
def mogrify(self, operation: str, parameters: Any = None) -> bytes: """Return a query string after arguments binding. The byte string returned is exactly the one that would be sent to the database running the .execute() method or similar. """ ret = self._impl.mogrify(operation, parameters) assert ( not self._conn.isexecuting() ), "Don't support server side mogrify" return ret # type: ignore
(self, value: int, mode: str = 'relative') -> NoneType
721,797
aiopg.connection
setinputsizes
This method is exposed in compliance with the DBAPI. It currently does nothing but it is safe to call it.
def mogrify(self, operation: str, parameters: Any = None) -> bytes: """Return a query string after arguments binding. The byte string returned is exactly the one that would be sent to the database running the .execute() method or similar. """ ret = self._impl.mogrify(operation, parameters) assert ( not self._conn.isexecuting() ), "Don't support server side mogrify" return ret # type: ignore
(self, sizes: int) -> NoneType
721,799
aiopg.connection
DefaultCompiler
null
class DefaultCompiler(IsolationCompiler): __slots__ = () def __init__(self, readonly: bool, deferrable: bool): super().__init__(None, readonly, deferrable) @property def name(self) -> str: return "Default"
(readonly: bool, deferrable: bool)
721,800
aiopg.connection
__init__
null
def __init__(self, readonly: bool, deferrable: bool): super().__init__(None, readonly, deferrable)
(self, readonly: bool, deferrable: bool)
721,802
aiopg.connection
begin
null
def begin(self) -> str: query = "BEGIN" if self._isolation_level is not None: query += f" ISOLATION LEVEL {self._isolation_level.upper()}" if self._readonly: query += " READ ONLY" if self._deferrable: query += " DEFERRABLE" return query
(self) -> str
721,803
aiopg.connection
commit
null
def commit(self) -> str: return "COMMIT"
(self) -> str
721,804
aiopg.connection
release_savepoint
null
def release_savepoint(self, unique_id: str) -> str: return f"RELEASE SAVEPOINT {unique_id}"
(self, unique_id: str) -> str
721,805
aiopg.connection
rollback
null
def rollback(self) -> str: return "ROLLBACK"
(self) -> str
721,806
aiopg.connection
rollback_savepoint
null
def rollback_savepoint(self, unique_id: str) -> str: return f"ROLLBACK TO SAVEPOINT {unique_id}"
(self, unique_id: str) -> str
721,807
aiopg.connection
savepoint
null
def savepoint(self, unique_id: str) -> str: return f"SAVEPOINT {unique_id}"
(self, unique_id: str) -> str
721,808
aiopg.connection
IsolationCompiler
null
class IsolationCompiler(abc.ABC): __slots__ = ("_isolation_level", "_readonly", "_deferrable") def __init__( self, isolation_level: Optional[str], readonly: bool, deferrable: bool ): self._isolation_level = isolation_level self._readonly = readonly self._deferrable = deferrable @property def name(self) -> str: return self._isolation_level or "Unknown" def savepoint(self, unique_id: str) -> str: return f"SAVEPOINT {unique_id}" def release_savepoint(self, unique_id: str) -> str: return f"RELEASE SAVEPOINT {unique_id}" def rollback_savepoint(self, unique_id: str) -> str: return f"ROLLBACK TO SAVEPOINT {unique_id}" def commit(self) -> str: return "COMMIT" def rollback(self) -> str: return "ROLLBACK" def begin(self) -> str: query = "BEGIN" if self._isolation_level is not None: query += f" ISOLATION LEVEL {self._isolation_level.upper()}" if self._readonly: query += " READ ONLY" if self._deferrable: query += " DEFERRABLE" return query def __repr__(self) -> str: return self.name
(isolation_level: Optional[str], readonly: bool, deferrable: bool)
721,809
aiopg.connection
__init__
null
def __init__( self, isolation_level: Optional[str], readonly: bool, deferrable: bool ): self._isolation_level = isolation_level self._readonly = readonly self._deferrable = deferrable
(self, isolation_level: Optional[str], readonly: bool, deferrable: bool)
721,817
aiopg.connection
IsolationLevel
An enumeration.
class IsolationLevel(enum.Enum): serializable = SerializableCompiler repeatable_read = RepeatableReadCompiler read_committed = ReadCommittedCompiler default = DefaultCompiler def __call__(self, readonly: bool, deferrable: bool) -> IsolationCompiler: return self.value(readonly, deferrable) # type: ignore
(value, names=None, *, module=None, qualname=None, type=None, start=1)
721,818
aiopg.pool
Pool
Connection pool
class Pool: """Connection pool""" def __init__( self, dsn: str, minsize: int, maxsize: int, timeout: float, *, enable_json: bool, enable_hstore: bool, enable_uuid: bool, echo: bool, on_connect: Optional[Callable[[Connection], Awaitable[None]]], pool_recycle: float, **kwargs: Any, ): if minsize < 0: raise ValueError("minsize should be zero or greater") if maxsize < minsize and maxsize != 0: raise ValueError("maxsize should be not less than minsize") self._dsn = dsn self._minsize = minsize self._loop = get_running_loop() self._timeout = timeout self._recycle = pool_recycle self._enable_json = enable_json self._enable_hstore = enable_hstore self._enable_uuid = enable_uuid self._echo = echo self._on_connect = on_connect self._conn_kwargs = kwargs self._acquiring = 0 self._free: Deque[Connection] = collections.deque( maxlen=maxsize or None ) self._cond = asyncio.Condition() self._used: Set[Connection] = set() self._terminated: Set[Connection] = set() self._closing = False self._closed = False @property def echo(self) -> bool: return self._echo @property def minsize(self) -> int: return self._minsize @property def maxsize(self) -> Optional[int]: return self._free.maxlen @property def size(self) -> int: return self.freesize + len(self._used) + self._acquiring @property def freesize(self) -> int: return len(self._free) @property def timeout(self) -> float: return self._timeout async def clear(self) -> None: """Close all free connections in pool.""" async with self._cond: while self._free: conn = self._free.popleft() await conn.close() self._cond.notify() @property def closed(self) -> bool: return self._closed def close(self) -> None: """Close pool. Mark all pool connections to be closed on getting back to pool. Closed pool doesn't allow to acquire new connections. """ if self._closed: return self._closing = True def terminate(self) -> None: """Terminate pool. Close pool with instantly closing all acquired connections also. """ self.close() for conn in list(self._used): conn.close() self._terminated.add(conn) self._used.clear() async def wait_closed(self) -> None: """Wait for closing all pool's connections.""" if self._closed: return if not self._closing: raise RuntimeError( ".wait_closed() should be called " "after .close()" ) while self._free: conn = self._free.popleft() await conn.close() async with self._cond: while self.size > self.freesize: await self._cond.wait() self._closed = True def acquire(self) -> _ContextManager[Connection]: """Acquire free connection from the pool.""" coro = self._acquire() return _ContextManager[Connection](coro, self.release) @classmethod async def from_pool_fill(cls, *args: Any, **kwargs: Any) -> "Pool": """constructor for filling the free pool with connections, the number is controlled by the minsize parameter """ self = cls(*args, **kwargs) if self._minsize > 0: async with self._cond: await self._fill_free_pool(False) return self async def _acquire(self) -> Connection: if self._closing: raise RuntimeError("Cannot acquire connection after closing pool") async with async_timeout.timeout(self._timeout), self._cond: while True: await self._fill_free_pool(True) if self._free: conn = self._free.popleft() assert not conn.closed, conn assert conn not in self._used, (conn, self._used) self._used.add(conn) return conn else: await self._cond.wait() async def _fill_free_pool(self, override_min: bool) -> None: # iterate over free connections and remove timeouted ones n, free = 0, len(self._free) while n < free: conn = self._free[-1] if conn.closed: self._free.pop() elif -1 < self._recycle < self._loop.time() - conn.last_usage: await conn.close() self._free.pop() else: self._free.rotate() n += 1 while self.size < self.minsize: self._acquiring += 1 try: conn = await connect( self._dsn, timeout=self._timeout, enable_json=self._enable_json, enable_hstore=self._enable_hstore, enable_uuid=self._enable_uuid, echo=self._echo, **self._conn_kwargs, ) if self._on_connect is not None: await self._on_connect(conn) # raise exception if pool is closing self._free.append(conn) self._cond.notify() finally: self._acquiring -= 1 if self._free: return if override_min and (not self.maxsize or self.size < self.maxsize): self._acquiring += 1 try: conn = await connect( self._dsn, timeout=self._timeout, enable_json=self._enable_json, enable_hstore=self._enable_hstore, enable_uuid=self._enable_uuid, echo=self._echo, **self._conn_kwargs, ) if self._on_connect is not None: await self._on_connect(conn) # raise exception if pool is closing self._free.append(conn) self._cond.notify() finally: self._acquiring -= 1 async def _wakeup(self) -> None: async with self._cond: self._cond.notify() def release(self, conn: Connection) -> "asyncio.Future[None]": """Release free connection back to the connection pool.""" future = create_completed_future(self._loop) if conn in self._terminated: assert conn.closed, conn self._terminated.remove(conn) return future assert conn in self._used, (conn, self._used) self._used.remove(conn) if conn.closed: return future transaction_status = conn.raw.get_transaction_status() if transaction_status != psycopg2.extensions.TRANSACTION_STATUS_IDLE: warnings.warn( f"Invalid transaction status on " f"released connection: {transaction_status}", ResourceWarning, ) conn.close() return future if self._closing: conn.close() else: self._free.append(conn) return asyncio.ensure_future(self._wakeup(), loop=self._loop) async def cursor( self, name: Optional[str] = None, cursor_factory: Any = None, scrollable: Optional[bool] = None, withhold: bool = False, *, timeout: Optional[float] = None, ) -> _PoolCursorContextManager: conn = await self.acquire() cursor = await conn.cursor( name=name, cursor_factory=cursor_factory, scrollable=scrollable, withhold=withhold, timeout=timeout, ) return _PoolCursorContextManager(self, conn, cursor) def __await__(self) -> Generator[Any, Any, _PoolConnectionContextManager]: # This is not a coroutine. It is meant to enable the idiom: # # with (await pool) as conn: # <block> # # as an alternative to: # # conn = await pool.acquire() # try: # <block> # finally: # conn.release() conn = yield from self._acquire().__await__() return _PoolConnectionContextManager(self, conn) def __enter__(self) -> "Pool": raise RuntimeError( '"await" should be used as context manager expression' ) def __exit__( self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ) -> None: # This must exist because __enter__ exists, even though that # always raises; that's how the with-statement works. pass # pragma: nocover async def __aenter__(self) -> "Pool": return self async def __aexit__( self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ) -> None: self.close() await self.wait_closed() def __del__(self) -> None: try: self._free except AttributeError: return # frame has been cleared, __dict__ is empty if self._free: left = 0 while self._free: conn = self._free.popleft() conn.close() left += 1 warnings.warn( f"Unclosed {left} connections in {self!r}", ResourceWarning )
(dsn: str, minsize: int, maxsize: int, timeout: float, *, enable_json: bool, enable_hstore: bool, enable_uuid: bool, echo: bool, on_connect: Optional[Callable[[aiopg.connection.Connection], Awaitable[NoneType]]], pool_recycle: float, **kwargs: Any)
721,819
aiopg.pool
__aenter__
null
def __exit__( self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ) -> None: # This must exist because __enter__ exists, even though that # always raises; that's how the with-statement works. pass # pragma: nocover
(self) -> aiopg.pool.Pool
721,821
aiopg.pool
__await__
null
def __await__(self) -> Generator[Any, Any, _PoolConnectionContextManager]: # This is not a coroutine. It is meant to enable the idiom: # # with (await pool) as conn: # <block> # # as an alternative to: # # conn = await pool.acquire() # try: # <block> # finally: # conn.release() conn = yield from self._acquire().__await__() return _PoolConnectionContextManager(self, conn)
(self) -> Generator[Any, Any, aiopg.pool._PoolConnectionContextManager]
721,822
aiopg.pool
__del__
null
def __del__(self) -> None: try: self._free except AttributeError: return # frame has been cleared, __dict__ is empty if self._free: left = 0 while self._free: conn = self._free.popleft() conn.close() left += 1 warnings.warn( f"Unclosed {left} connections in {self!r}", ResourceWarning )
(self) -> NoneType
721,823
aiopg.pool
__enter__
null
def __enter__(self) -> "Pool": raise RuntimeError( '"await" should be used as context manager expression' )
(self) -> aiopg.pool.Pool
721,825
aiopg.pool
__init__
null
def __init__( self, dsn: str, minsize: int, maxsize: int, timeout: float, *, enable_json: bool, enable_hstore: bool, enable_uuid: bool, echo: bool, on_connect: Optional[Callable[[Connection], Awaitable[None]]], pool_recycle: float, **kwargs: Any, ): if minsize < 0: raise ValueError("minsize should be zero or greater") if maxsize < minsize and maxsize != 0: raise ValueError("maxsize should be not less than minsize") self._dsn = dsn self._minsize = minsize self._loop = get_running_loop() self._timeout = timeout self._recycle = pool_recycle self._enable_json = enable_json self._enable_hstore = enable_hstore self._enable_uuid = enable_uuid self._echo = echo self._on_connect = on_connect self._conn_kwargs = kwargs self._acquiring = 0 self._free: Deque[Connection] = collections.deque( maxlen=maxsize or None ) self._cond = asyncio.Condition() self._used: Set[Connection] = set() self._terminated: Set[Connection] = set() self._closing = False self._closed = False
(self, dsn: str, minsize: int, maxsize: int, timeout: float, *, enable_json: bool, enable_hstore: bool, enable_uuid: bool, echo: bool, on_connect: Optional[Callable[[aiopg.connection.Connection], Awaitable[NoneType]]], pool_recycle: float, **kwargs: Any)
721,826
aiopg.pool
_acquire
null
@classmethod async def from_pool_fill(cls, *args: Any, **kwargs: Any) -> "Pool": """constructor for filling the free pool with connections, the number is controlled by the minsize parameter """ self = cls(*args, **kwargs) if self._minsize > 0: async with self._cond: await self._fill_free_pool(False) return self
(self) -> aiopg.connection.Connection
721,829
aiopg.pool
acquire
Acquire free connection from the pool.
def acquire(self) -> _ContextManager[Connection]: """Acquire free connection from the pool.""" coro = self._acquire() return _ContextManager[Connection](coro, self.release)
(self) -> aiopg.utils._ContextManager[aiopg.connection.Connection]
721,830
aiopg.pool
clear
Close all free connections in pool.
@property def timeout(self) -> float: return self._timeout
(self) -> NoneType
721,831
aiopg.pool
close
Close pool. Mark all pool connections to be closed on getting back to pool. Closed pool doesn't allow to acquire new connections.
def close(self) -> None: """Close pool. Mark all pool connections to be closed on getting back to pool. Closed pool doesn't allow to acquire new connections. """ if self._closed: return self._closing = True
(self) -> NoneType
721,832
aiopg.pool
cursor
null
def release(self, conn: Connection) -> "asyncio.Future[None]": """Release free connection back to the connection pool.""" future = create_completed_future(self._loop) if conn in self._terminated: assert conn.closed, conn self._terminated.remove(conn) return future assert conn in self._used, (conn, self._used) self._used.remove(conn) if conn.closed: return future transaction_status = conn.raw.get_transaction_status() if transaction_status != psycopg2.extensions.TRANSACTION_STATUS_IDLE: warnings.warn( f"Invalid transaction status on " f"released connection: {transaction_status}", ResourceWarning, ) conn.close() return future if self._closing: conn.close() else: self._free.append(conn) return asyncio.ensure_future(self._wakeup(), loop=self._loop)
(self, name: Optional[str] = None, cursor_factory: Optional[Any] = None, scrollable: Optional[bool] = None, withhold: bool = False, *, timeout: Optional[float] = None) -> aiopg.pool._PoolCursorContextManager
721,833
aiopg.pool
release
Release free connection back to the connection pool.
def release(self, conn: Connection) -> "asyncio.Future[None]": """Release free connection back to the connection pool.""" future = create_completed_future(self._loop) if conn in self._terminated: assert conn.closed, conn self._terminated.remove(conn) return future assert conn in self._used, (conn, self._used) self._used.remove(conn) if conn.closed: return future transaction_status = conn.raw.get_transaction_status() if transaction_status != psycopg2.extensions.TRANSACTION_STATUS_IDLE: warnings.warn( f"Invalid transaction status on " f"released connection: {transaction_status}", ResourceWarning, ) conn.close() return future if self._closing: conn.close() else: self._free.append(conn) return asyncio.ensure_future(self._wakeup(), loop=self._loop)
(self, conn: aiopg.connection.Connection) -> _asyncio.Future[None]
721,834
aiopg.pool
terminate
Terminate pool. Close pool with instantly closing all acquired connections also.
def terminate(self) -> None: """Terminate pool. Close pool with instantly closing all acquired connections also. """ self.close() for conn in list(self._used): conn.close() self._terminated.add(conn) self._used.clear()
(self) -> NoneType
721,835
aiopg.pool
wait_closed
Wait for closing all pool's connections.
def terminate(self) -> None: """Terminate pool. Close pool with instantly closing all acquired connections also. """ self.close() for conn in list(self._used): conn.close() self._terminated.add(conn) self._used.clear()
(self) -> NoneType
721,836
aiopg.connection
ReadCommittedCompiler
null
class ReadCommittedCompiler(IsolationCompiler): __slots__ = () def __init__(self, readonly: bool, deferrable: bool): super().__init__("Read committed", readonly, deferrable)
(readonly: bool, deferrable: bool)
721,837
aiopg.connection
__init__
null
def __init__(self, readonly: bool, deferrable: bool): super().__init__("Read committed", readonly, deferrable)
(self, readonly: bool, deferrable: bool)
721,845
aiopg.connection
RepeatableReadCompiler
null
class RepeatableReadCompiler(IsolationCompiler): __slots__ = () def __init__(self, readonly: bool, deferrable: bool): super().__init__("Repeatable read", readonly, deferrable)
(readonly: bool, deferrable: bool)
721,846
aiopg.connection
__init__
null
def __init__(self, readonly: bool, deferrable: bool): super().__init__("Repeatable read", readonly, deferrable)
(self, readonly: bool, deferrable: bool)
721,854
aiopg.connection
SerializableCompiler
null
class SerializableCompiler(IsolationCompiler): __slots__ = () def __init__(self, readonly: bool, deferrable: bool): super().__init__("Serializable", readonly, deferrable)
(readonly: bool, deferrable: bool)
721,855
aiopg.connection
__init__
null
def __init__(self, readonly: bool, deferrable: bool): super().__init__("Serializable", readonly, deferrable)
(self, readonly: bool, deferrable: bool)
721,863
aiopg.connection
Transaction
null
class Transaction: __slots__ = ("_cursor", "_is_begin", "_isolation", "_unique_id") def __init__( self, cursor: "Cursor", isolation_level: Callable[[bool, bool], IsolationCompiler], readonly: bool = False, deferrable: bool = False, ): self._cursor = cursor self._is_begin = False self._unique_id: Optional[str] = None self._isolation = isolation_level(readonly, deferrable) @property def is_begin(self) -> bool: return self._is_begin async def begin(self) -> "Transaction": if self._is_begin: raise psycopg2.ProgrammingError( "You are trying to open a new transaction, use the save point" ) self._is_begin = True await self._cursor.execute(self._isolation.begin()) return self async def commit(self) -> None: self._check_commit_rollback() await self._cursor.execute(self._isolation.commit()) self._is_begin = False async def rollback(self) -> None: self._check_commit_rollback() if not self._cursor.closed: await self._cursor.execute(self._isolation.rollback()) self._is_begin = False async def rollback_savepoint(self) -> None: self._check_release_rollback() if not self._cursor.closed: await self._cursor.execute( self._isolation.rollback_savepoint( self._unique_id # type: ignore ) ) self._unique_id = None async def release_savepoint(self) -> None: self._check_release_rollback() await self._cursor.execute( self._isolation.release_savepoint(self._unique_id) # type: ignore ) self._unique_id = None async def savepoint(self) -> "Transaction": self._check_commit_rollback() if self._unique_id is not None: raise psycopg2.ProgrammingError("You do not shut down savepoint") self._unique_id = f"s{uuid.uuid1().hex}" await self._cursor.execute(self._isolation.savepoint(self._unique_id)) return self def point(self) -> _ContextManager["Transaction"]: return _ContextManager[Transaction]( self.savepoint(), _release_savepoint, _rollback_savepoint, ) def _check_commit_rollback(self) -> None: if not self._is_begin: raise psycopg2.ProgrammingError( "You are trying to commit " "the transaction does not open" ) def _check_release_rollback(self) -> None: self._check_commit_rollback() if self._unique_id is None: raise psycopg2.ProgrammingError("You do not start savepoint") def __repr__(self) -> str: return ( f"<{self.__class__.__name__} " f"transaction={self._isolation} id={id(self):#x}>" ) def __del__(self) -> None: if self._is_begin: warnings.warn( f"You have not closed transaction {self!r}", ResourceWarning ) if self._unique_id is not None: warnings.warn( f"You have not closed savepoint {self!r}", ResourceWarning ) async def __aenter__(self) -> "Transaction": return await self.begin() async def __aexit__( self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ) -> None: if exc_type is not None: await self.rollback() else: await self.commit()
(cursor: 'Cursor', isolation_level: Callable[[bool, bool], aiopg.connection.IsolationCompiler], readonly: bool = False, deferrable: bool = False)
721,864
aiopg.connection
__aenter__
null
def __del__(self) -> None: if self._is_begin: warnings.warn( f"You have not closed transaction {self!r}", ResourceWarning ) if self._unique_id is not None: warnings.warn( f"You have not closed savepoint {self!r}", ResourceWarning )
(self) -> aiopg.connection.Transaction
721,867
aiopg.connection
__init__
null
def __init__( self, cursor: "Cursor", isolation_level: Callable[[bool, bool], IsolationCompiler], readonly: bool = False, deferrable: bool = False, ): self._cursor = cursor self._is_begin = False self._unique_id: Optional[str] = None self._isolation = isolation_level(readonly, deferrable)
(self, cursor: aiopg.connection.Cursor, isolation_level: Callable[[bool, bool], aiopg.connection.IsolationCompiler], readonly: bool = False, deferrable: bool = False)
721,868
aiopg.connection
__repr__
null
def __repr__(self) -> str: return ( f"<{self.__class__.__name__} " f"transaction={self._isolation} id={id(self):#x}>" )
(self) -> str
721,869
aiopg.connection
_check_commit_rollback
null
def _check_commit_rollback(self) -> None: if not self._is_begin: raise psycopg2.ProgrammingError( "You are trying to commit " "the transaction does not open" )
(self) -> NoneType
721,870
aiopg.connection
_check_release_rollback
null
def _check_release_rollback(self) -> None: self._check_commit_rollback() if self._unique_id is None: raise psycopg2.ProgrammingError("You do not start savepoint")
(self) -> NoneType
721,871
aiopg.connection
begin
null
@property def is_begin(self) -> bool: return self._is_begin
(self) -> aiopg.connection.Transaction
721,873
aiopg.connection
point
null
def point(self) -> _ContextManager["Transaction"]: return _ContextManager[Transaction]( self.savepoint(), _release_savepoint, _rollback_savepoint, )
(self) -> aiopg.utils._ContextManager['Transaction']
721,878
aiopg
VersionInfo
VersionInfo(major, minor, micro, releaselevel, serial)
from aiopg import VersionInfo
(major, minor, micro, releaselevel, serial)
721,884
aiopg
_parse_version
null
def _parse_version(ver: str) -> VersionInfo: RE = ( r"^" r"(?P<major>\d+)\.(?P<minor>\d+)\.(?P<micro>\d+)" r"((?P<releaselevel>[a-z]+)(?P<serial>\d+)?)?" r"$" ) match = re.match(RE, ver) if not match: raise ImportError(f"Invalid package version {ver}") try: major = int(match.group("major")) minor = int(match.group("minor")) micro = int(match.group("micro")) levels = {"rc": "candidate", "a": "alpha", "b": "beta", None: "final"} releaselevel = levels[match.group("releaselevel")] serial = int(match.group("serial")) if match.group("serial") else 0 return VersionInfo(major, minor, micro, releaselevel, serial) except Exception as e: raise ImportError(f"Invalid package version {ver}") from e
(ver: str) -> aiopg.VersionInfo
721,885
aiopg.connection
connect
A factory for connecting to PostgreSQL. The coroutine accepts all parameters that psycopg2.connect() does plus optional keyword-only `timeout` parameters. Returns instantiated Connection object.
def connect( dsn: Optional[str] = None, *, timeout: float = TIMEOUT, enable_json: bool = True, enable_hstore: bool = True, enable_uuid: bool = True, echo: bool = False, **kwargs: Any, ) -> _ContextManager["Connection"]: """A factory for connecting to PostgreSQL. The coroutine accepts all parameters that psycopg2.connect() does plus optional keyword-only `timeout` parameters. Returns instantiated Connection object. """ connection = Connection( dsn, timeout, bool(echo), enable_hstore=enable_hstore, enable_uuid=enable_uuid, enable_json=enable_json, **kwargs, ) return _ContextManager[Connection](connection, disconnect) # type: ignore
(dsn: Optional[str] = None, *, timeout: float = 60.0, enable_json: bool = True, enable_hstore: bool = True, enable_uuid: bool = True, echo: bool = False, **kwargs: Any) -> aiopg.utils._ContextManager['Connection']
721,887
aiopg.pool
create_pool
null
def create_pool( dsn: Optional[str] = None, *, minsize: int = 1, maxsize: int = 10, timeout: float = TIMEOUT, pool_recycle: float = -1.0, enable_json: bool = True, enable_hstore: bool = True, enable_uuid: bool = True, echo: bool = False, on_connect: Optional[Callable[[Connection], Awaitable[None]]] = None, **kwargs: Any, ) -> _ContextManager["Pool"]: coro = Pool.from_pool_fill( dsn, minsize, maxsize, timeout, enable_json=enable_json, enable_hstore=enable_hstore, enable_uuid=enable_uuid, echo=echo, on_connect=on_connect, pool_recycle=pool_recycle, **kwargs, ) return _ContextManager[Pool](coro, _destroy_pool)
(dsn: Optional[str] = None, *, minsize: int = 1, maxsize: int = 10, timeout: float = 60.0, pool_recycle: float = -1.0, enable_json: bool = True, enable_hstore: bool = True, enable_uuid: bool = True, echo: bool = False, on_connect: Optional[Callable[[aiopg.connection.Connection], Awaitable[NoneType]]] = None, **kwargs: Any) -> aiopg.utils._ContextManager['Pool']
721,888
aiopg.utils
get_running_loop
null
def get_running_loop() -> asyncio.AbstractEventLoop: return __get_running_loop()
() -> asyncio.events.AbstractEventLoop
721,896
apprise.apprise
Apprise
Our Notification Manager
class Apprise: """ Our Notification Manager """ def __init__(self, servers=None, asset=None, location=None, debug=False): """ Loads a set of server urls while applying the Asset() module to each if specified. If no asset is provided, then the default asset is used. Optionally specify a global ContentLocation for a more strict means of handling Attachments. """ # Initialize a server list of URLs self.servers = list() # Assigns an central asset object that will be later passed into each # notification plugin. Assets contain information such as the local # directory images can be found in. It can also identify remote # URL paths that contain the images you want to present to the end # user. If no asset is specified, then the default one is used. self.asset = \ asset if isinstance(asset, AppriseAsset) else AppriseAsset() if servers: self.add(servers) # Initialize our locale object self.locale = AppriseLocale() # Set our debug flag self.debug = debug # Store our hosting location for optional strict rule handling # of Attachments. Setting this to None removes any attachment # restrictions. self.location = location @staticmethod def instantiate(url, asset=None, tag=None, suppress_exceptions=True): """ Returns the instance of a instantiated plugin based on the provided Server URL. If the url fails to be parsed, then None is returned. The specified url can be either a string (the URL itself) or a dictionary containing all of the components needed to istantiate the notification service. If identifying a dictionary, at the bare minimum, one must specify the schema. An example of a url dictionary object might look like: { schema: 'mailto', host: 'google.com', user: 'myuser', password: 'mypassword', } Alternatively the string is much easier to specify: mailto://user:mypassword@google.com The dictionary works well for people who are calling details() to extract the components they need to build the URL manually. """ # Initialize our result set results = None # Prepare our Asset Object asset = asset if isinstance(asset, AppriseAsset) else AppriseAsset() if isinstance(url, str): # Acquire our url tokens results = plugins.url_to_dict( url, secure_logging=asset.secure_logging) if results is None: # Failed to parse the server URL; detailed logging handled # inside url_to_dict - nothing to report here. return None elif isinstance(url, dict): # We already have our result set results = url if results.get('schema') not in N_MGR: # schema is a mandatory dictionary item as it is the only way # we can index into our loaded plugins logger.error('Dictionary does not include a "schema" entry.') logger.trace( 'Invalid dictionary unpacked as:{}{}'.format( os.linesep, os.linesep.join( ['{}="{}"'.format(k, v) for k, v in results.items()]))) return None logger.trace( 'Dictionary unpacked as:{}{}'.format( os.linesep, os.linesep.join( ['{}="{}"'.format(k, v) for k, v in results.items()]))) # Otherwise we handle the invalid input specified else: logger.error( 'An invalid URL type (%s) was specified for instantiation', type(url)) return None if not N_MGR[results['schema']].enabled: # # First Plugin Enable Check (Pre Initialization) # # Plugin has been disabled at a global level logger.error( '%s:// is disabled on this system.', results['schema']) return None # Build a list of tags to associate with the newly added notifications results['tag'] = set(parse_list(tag)) # Set our Asset Object results['asset'] = asset if suppress_exceptions: try: # Attempt to create an instance of our plugin using the parsed # URL information plugin = N_MGR[results['schema']](**results) # Create log entry of loaded URL logger.debug( 'Loaded {} URL: {}'.format( N_MGR[results['schema']].service_name, plugin.url(privacy=asset.secure_logging))) except Exception: # CWE-312 (Secure Logging) Handling loggable_url = url if not asset.secure_logging \ else cwe312_url(url) # the arguments are invalid or can not be used. logger.error( 'Could not load {} URL: {}'.format( N_MGR[results['schema']].service_name, loggable_url)) return None else: # Attempt to create an instance of our plugin using the parsed # URL information but don't wrap it in a try catch plugin = N_MGR[results['schema']](**results) if not plugin.enabled: # # Second Plugin Enable Check (Post Initialization) # # Service/Plugin is disabled (on a more local level). This is a # case where the plugin was initially enabled but then after the # __init__() was called under the hood something pre-determined # that it could no longer be used. # The only downside to doing it this way is services are # initialized prior to returning the details() if 3rd party tools # are polling what is available. These services that become # disabled thereafter are shown initially that they can be used. logger.error( '%s:// has become disabled on this system.', results['schema']) return None return plugin def add(self, servers, asset=None, tag=None): """ Adds one or more server URLs into our list. You can override the global asset if you wish by including it with the server(s) that you add. The tag allows you to associate 1 or more tag values to the server(s) being added. tagging a service allows you to exclusively access them when calling the notify() function. """ # Initialize our return status return_status = True if asset is None: # prepare default asset asset = self.asset if isinstance(servers, str): # build our server list servers = parse_urls(servers) if len(servers) == 0: return False elif isinstance(servers, dict): # no problem, we support kwargs, convert it to a list servers = [servers] elif isinstance(servers, (ConfigBase, NotifyBase, AppriseConfig)): # Go ahead and just add our plugin into our list self.servers.append(servers) return True elif not isinstance(servers, (tuple, set, list)): logger.error( "An invalid notification (type={}) was specified.".format( type(servers))) return False for _server in servers: if isinstance(_server, (ConfigBase, NotifyBase, AppriseConfig)): # Go ahead and just add our plugin into our list self.servers.append(_server) continue elif not isinstance(_server, (str, dict)): logger.error( "An invalid notification (type={}) was specified.".format( type(_server))) return_status = False continue # Instantiate ourselves an object, this function throws or # returns None if it fails instance = Apprise.instantiate(_server, asset=asset, tag=tag) if not isinstance(instance, NotifyBase): # No logging is required as instantiate() handles failure # and/or success reasons for us return_status = False continue # Add our initialized plugin to our server listings self.servers.append(instance) # Return our status return return_status def clear(self): """ Empties our server list """ self.servers[:] = [] def find(self, tag=common.MATCH_ALL_TAG, match_always=True): """ Returns a list of all servers matching against the tag specified. """ # Build our tag setup # - top level entries are treated as an 'or' # - second level (or more) entries are treated as 'and' # # examples: # tag="tagA, tagB" = tagA or tagB # tag=['tagA', 'tagB'] = tagA or tagB # tag=[('tagA', 'tagC'), 'tagB'] = (tagA and tagC) or tagB # tag=[('tagB', 'tagC')] = tagB and tagC # A match_always flag allows us to pick up on our 'any' keyword # and notify these services under all circumstances match_always = common.MATCH_ALWAYS_TAG if match_always else None # Iterate over our loaded plugins for entry in self.servers: if isinstance(entry, (ConfigBase, AppriseConfig)): # load our servers servers = entry.servers() else: servers = [entry, ] for server in servers: # Apply our tag matching based on our defined logic if is_exclusive_match( logic=tag, data=server.tags, match_all=common.MATCH_ALL_TAG, match_always=match_always): yield server return def notify(self, body, title='', notify_type=common.NotifyType.INFO, body_format=None, tag=common.MATCH_ALL_TAG, match_always=True, attach=None, interpret_escapes=None): """ Send a notification to all the plugins previously loaded. If the body_format specified is NotifyFormat.MARKDOWN, it will be converted to HTML if the Notification type expects this. if the tag is specified (either a string or a set/list/tuple of strings), then only the notifications flagged with that tagged value are notified. By default, all added services are notified (tag=MATCH_ALL_TAG) This function returns True if all notifications were successfully sent, False if even just one of them fails, and None if no notifications were sent at all as a result of tag filtering and/or simply having empty configuration files that were read. Attach can contain a list of attachment URLs. attach can also be represented by an AttachBase() (or list of) object(s). This identifies the products you wish to notify Set interpret_escapes to True if you want to pre-escape a string such as turning a \n into an actual new line, etc. """ try: # Process arguments and build synchronous and asynchronous calls # (this step can throw internal errors). sequential_calls, parallel_calls = self._create_notify_calls( body, title, notify_type=notify_type, body_format=body_format, tag=tag, match_always=match_always, attach=attach, interpret_escapes=interpret_escapes, ) except TypeError: # No notifications sent, and there was an internal error. return False if not sequential_calls and not parallel_calls: # Nothing to send return None sequential_result = Apprise._notify_sequential(*sequential_calls) parallel_result = Apprise._notify_parallel_threadpool(*parallel_calls) return sequential_result and parallel_result async def async_notify(self, *args, **kwargs): """ Send a notification to all the plugins previously loaded, for asynchronous callers. The arguments are identical to those of Apprise.notify(). """ try: # Process arguments and build synchronous and asynchronous calls # (this step can throw internal errors). sequential_calls, parallel_calls = self._create_notify_calls( *args, **kwargs) except TypeError: # No notifications sent, and there was an internal error. return False if not sequential_calls and not parallel_calls: # Nothing to send return None sequential_result = Apprise._notify_sequential(*sequential_calls) parallel_result = \ await Apprise._notify_parallel_asyncio(*parallel_calls) return sequential_result and parallel_result def _create_notify_calls(self, *args, **kwargs): """ Creates notifications for all the plugins loaded. Returns a list of (server, notify() kwargs) tuples for plugins with parallelism disabled and another list for plugins with parallelism enabled. """ all_calls = list(self._create_notify_gen(*args, **kwargs)) # Split into sequential and parallel notify() calls. sequential, parallel = [], [] for (server, notify_kwargs) in all_calls: if server.asset.async_mode: parallel.append((server, notify_kwargs)) else: sequential.append((server, notify_kwargs)) return sequential, parallel def _create_notify_gen(self, body, title='', notify_type=common.NotifyType.INFO, body_format=None, tag=common.MATCH_ALL_TAG, match_always=True, attach=None, interpret_escapes=None): """ Internal generator function for _create_notify_calls(). """ if len(self) == 0: # Nothing to notify msg = "There are no service(s) to notify" logger.error(msg) raise TypeError(msg) if not (title or body or attach): msg = "No message content specified to deliver" logger.error(msg) raise TypeError(msg) try: if title and isinstance(title, bytes): title = title.decode(self.asset.encoding) if body and isinstance(body, bytes): body = body.decode(self.asset.encoding) except UnicodeDecodeError: msg = 'The content passed into Apprise was not of encoding ' \ 'type: {}'.format(self.asset.encoding) logger.error(msg) raise TypeError(msg) # Tracks conversions conversion_body_map = dict() conversion_title_map = dict() # Prepare attachments if required if attach is not None and not isinstance(attach, AppriseAttachment): attach = AppriseAttachment( attach, asset=self.asset, location=self.location) # Allow Asset default value body_format = self.asset.body_format \ if body_format is None else body_format # Allow Asset default value interpret_escapes = self.asset.interpret_escapes \ if interpret_escapes is None else interpret_escapes # Iterate over our loaded plugins for server in self.find(tag, match_always=match_always): # If our code reaches here, we either did not define a tag (it # was set to None), or we did define a tag and the logic above # determined we need to notify the service it's associated with # First we need to generate a key we will use to determine if we # need to build our data out. Entries without are merged with # the body at this stage. key = server.notify_format if server.title_maxlen > 0\ else f'_{server.notify_format}' if server.interpret_emojis: # alter our key slightly to handle emojis since their value is # pulled out of the notification key += "-emojis" if key not in conversion_title_map: # Prepare our title conversion_title_map[key] = '' if not title else title # Conversion of title only occurs for services where the title # is blended with the body (title_maxlen <= 0) if conversion_title_map[key] and server.title_maxlen <= 0: conversion_title_map[key] = convert_between( body_format, server.notify_format, content=conversion_title_map[key]) # Our body is always converted no matter what conversion_body_map[key] = \ convert_between( body_format, server.notify_format, content=body) if interpret_escapes: # # Escape our content # try: # Added overhead required due to Python 3 Encoding Bug # identified here: https://bugs.python.org/issue21331 conversion_body_map[key] = \ conversion_body_map[key]\ .encode('ascii', 'backslashreplace')\ .decode('unicode-escape') conversion_title_map[key] = \ conversion_title_map[key]\ .encode('ascii', 'backslashreplace')\ .decode('unicode-escape') except AttributeError: # Must be of string type msg = 'Failed to escape message body' logger.error(msg) raise TypeError(msg) if server.interpret_emojis: # # Convert our :emoji: definitions # conversion_body_map[key] = \ apply_emojis(conversion_body_map[key]) conversion_title_map[key] = \ apply_emojis(conversion_title_map[key]) kwargs = dict( body=conversion_body_map[key], title=conversion_title_map[key], notify_type=notify_type, attach=attach, body_format=body_format ) yield (server, kwargs) @staticmethod def _notify_sequential(*servers_kwargs): """ Process a list of notify() calls sequentially and synchronously. """ success = True for (server, kwargs) in servers_kwargs: try: # Send notification result = server.notify(**kwargs) success = success and result except TypeError: # These are our internally thrown notifications. success = False except Exception: # A catch all so we don't have to abort early # just because one of our plugins has a bug in it. logger.exception("Unhandled Notification Exception") success = False return success @staticmethod def _notify_parallel_threadpool(*servers_kwargs): """ Process a list of notify() calls in parallel and synchronously. """ n_calls = len(servers_kwargs) # 0-length case if n_calls == 0: return True # There's no need to use a thread pool for just a single notification if n_calls == 1: return Apprise._notify_sequential(servers_kwargs[0]) # Create log entry logger.info( 'Notifying %d service(s) with threads.', len(servers_kwargs)) with cf.ThreadPoolExecutor() as executor: success = True futures = [executor.submit(server.notify, **kwargs) for (server, kwargs) in servers_kwargs] for future in cf.as_completed(futures): try: result = future.result() success = success and result except TypeError: # These are our internally thrown notifications. success = False except Exception: # A catch all so we don't have to abort early # just because one of our plugins has a bug in it. logger.exception("Unhandled Notification Exception") success = False return success @staticmethod async def _notify_parallel_asyncio(*servers_kwargs): """ Process a list of async_notify() calls in parallel and asynchronously. """ n_calls = len(servers_kwargs) # 0-length case if n_calls == 0: return True # (Unlike with the thread pool, we don't optimize for the single- # notification case because asyncio can do useful work while waiting # for that thread to complete) # Create log entry logger.info( 'Notifying %d service(s) asynchronously.', len(servers_kwargs)) async def do_call(server, kwargs): return await server.async_notify(**kwargs) cors = (do_call(server, kwargs) for (server, kwargs) in servers_kwargs) results = await asyncio.gather(*cors, return_exceptions=True) if any(isinstance(status, Exception) and not isinstance(status, TypeError) for status in results): # A catch all so we don't have to abort early just because # one of our plugins has a bug in it. logger.exception("Unhandled Notification Exception") return False if any(isinstance(status, TypeError) for status in results): # These are our internally thrown notifications. return False return all(results) def details(self, lang=None, show_requirements=False, show_disabled=False): """ Returns the details associated with the Apprise object """ # general object returned response = { # Defines the current version of Apprise 'version': __version__, # Lists all of the currently supported Notifications 'schemas': [], # Includes the configured asset details 'asset': self.asset.details(), } for plugin in N_MGR.plugins(): # Iterate over our hashed plugins and dynamically build details on # their status: content = { 'service_name': getattr(plugin, 'service_name', None), 'service_url': getattr(plugin, 'service_url', None), 'setup_url': getattr(plugin, 'setup_url', None), # Placeholder - populated below 'details': None, # Let upstream service know of the plugins that support # attachments 'attachment_support': getattr( plugin, 'attachment_support', False), # Differentiat between what is a custom loaded plugin and # which is native. 'category': getattr(plugin, 'category', None) } # Standard protocol(s) should be None or a tuple enabled = getattr(plugin, 'enabled', True) if not show_disabled and not enabled: # Do not show inactive plugins continue elif show_disabled: # Add current state to response content['enabled'] = enabled # Standard protocol(s) should be None or a tuple protocols = getattr(plugin, 'protocol', None) if isinstance(protocols, str): protocols = (protocols, ) # Secure protocol(s) should be None or a tuple secure_protocols = getattr(plugin, 'secure_protocol', None) if isinstance(secure_protocols, str): secure_protocols = (secure_protocols, ) # Add our protocol details to our content content.update({ 'protocols': protocols, 'secure_protocols': secure_protocols, }) if not lang: # Simply return our results content['details'] = plugins.details(plugin) if show_requirements: content['requirements'] = plugins.requirements(plugin) else: # Emulate the specified language when returning our results with self.locale.lang_at(lang): content['details'] = plugins.details(plugin) if show_requirements: content['requirements'] = plugins.requirements(plugin) # Build our response object response['schemas'].append(content) return response def urls(self, privacy=False): """ Returns all of the loaded URLs defined in this apprise object. """ return [x.url(privacy=privacy) for x in self.servers] def pop(self, index): """ Removes an indexed Notification Service from the stack and returns it. The thing is we can never pop AppriseConfig() entries, only what was loaded within them. So pop needs to carefully iterate over our list and only track actual entries. """ # Tracking variables prev_offset = -1 offset = prev_offset for idx, s in enumerate(self.servers): if isinstance(s, (ConfigBase, AppriseConfig)): servers = s.servers() if len(servers) > 0: # Acquire a new maximum offset to work with offset = prev_offset + len(servers) if offset >= index: # we can pop an element from our config stack fn = s.pop if isinstance(s, ConfigBase) \ else s.server_pop return fn(index if prev_offset == -1 else (index - prev_offset - 1)) else: offset = prev_offset + 1 if offset == index: return self.servers.pop(idx) # Update our old offset prev_offset = offset # If we reach here, then we indexed out of range raise IndexError('list index out of range') def __getitem__(self, index): """ Returns the indexed server entry of a loaded notification server """ # Tracking variables prev_offset = -1 offset = prev_offset for idx, s in enumerate(self.servers): if isinstance(s, (ConfigBase, AppriseConfig)): # Get our list of servers associate with our config object servers = s.servers() if len(servers) > 0: # Acquire a new maximum offset to work with offset = prev_offset + len(servers) if offset >= index: return servers[index if prev_offset == -1 else (index - prev_offset - 1)] else: offset = prev_offset + 1 if offset == index: return self.servers[idx] # Update our old offset prev_offset = offset # If we reach here, then we indexed out of range raise IndexError('list index out of range') def __getstate__(self): """ Pickle Support dumps() """ attributes = { 'asset': self.asset, # Prepare our URL list as we need to extract the associated tags # and asset details associated with it 'urls': [{ 'url': server.url(privacy=False), 'tag': server.tags if server.tags else None, 'asset': server.asset} for server in self.servers], 'locale': self.locale, 'debug': self.debug, 'location': self.location, } return attributes def __setstate__(self, state): """ Pickle Support loads() """ self.servers = list() self.asset = state['asset'] self.locale = state['locale'] self.location = state['location'] for entry in state['urls']: self.add(entry['url'], asset=entry['asset'], tag=entry['tag']) def __bool__(self): """ Allows the Apprise object to be wrapped in an 'if statement'. True is returned if at least one service has been loaded. """ return len(self) > 0 def __iter__(self): """ Returns an iterator to each of our servers loaded. This includes those found inside configuration. """ return chain(*[[s] if not isinstance(s, (ConfigBase, AppriseConfig)) else iter(s.servers()) for s in self.servers]) def __len__(self): """ Returns the number of servers loaded; this includes those found within loaded configuration. This funtion nnever actually counts the Config entry themselves (if they exist), only what they contain. """ return sum([1 if not isinstance(s, (ConfigBase, AppriseConfig)) else len(s.servers()) for s in self.servers])
(servers=None, asset=None, location=None, debug=False)
721,897
apprise.apprise
__bool__
Allows the Apprise object to be wrapped in an 'if statement'. True is returned if at least one service has been loaded.
def __bool__(self): """ Allows the Apprise object to be wrapped in an 'if statement'. True is returned if at least one service has been loaded. """ return len(self) > 0
(self)
721,898
apprise.apprise
__getitem__
Returns the indexed server entry of a loaded notification server
def __getitem__(self, index): """ Returns the indexed server entry of a loaded notification server """ # Tracking variables prev_offset = -1 offset = prev_offset for idx, s in enumerate(self.servers): if isinstance(s, (ConfigBase, AppriseConfig)): # Get our list of servers associate with our config object servers = s.servers() if len(servers) > 0: # Acquire a new maximum offset to work with offset = prev_offset + len(servers) if offset >= index: return servers[index if prev_offset == -1 else (index - prev_offset - 1)] else: offset = prev_offset + 1 if offset == index: return self.servers[idx] # Update our old offset prev_offset = offset # If we reach here, then we indexed out of range raise IndexError('list index out of range')
(self, index)
721,899
apprise.apprise
__getstate__
Pickle Support dumps()
def __getstate__(self): """ Pickle Support dumps() """ attributes = { 'asset': self.asset, # Prepare our URL list as we need to extract the associated tags # and asset details associated with it 'urls': [{ 'url': server.url(privacy=False), 'tag': server.tags if server.tags else None, 'asset': server.asset} for server in self.servers], 'locale': self.locale, 'debug': self.debug, 'location': self.location, } return attributes
(self)
721,900
apprise.apprise
__init__
Loads a set of server urls while applying the Asset() module to each if specified. If no asset is provided, then the default asset is used. Optionally specify a global ContentLocation for a more strict means of handling Attachments.
def __init__(self, servers=None, asset=None, location=None, debug=False): """ Loads a set of server urls while applying the Asset() module to each if specified. If no asset is provided, then the default asset is used. Optionally specify a global ContentLocation for a more strict means of handling Attachments. """ # Initialize a server list of URLs self.servers = list() # Assigns an central asset object that will be later passed into each # notification plugin. Assets contain information such as the local # directory images can be found in. It can also identify remote # URL paths that contain the images you want to present to the end # user. If no asset is specified, then the default one is used. self.asset = \ asset if isinstance(asset, AppriseAsset) else AppriseAsset() if servers: self.add(servers) # Initialize our locale object self.locale = AppriseLocale() # Set our debug flag self.debug = debug # Store our hosting location for optional strict rule handling # of Attachments. Setting this to None removes any attachment # restrictions. self.location = location
(self, servers=None, asset=None, location=None, debug=False)
721,901
apprise.apprise
__iter__
Returns an iterator to each of our servers loaded. This includes those found inside configuration.
def __iter__(self): """ Returns an iterator to each of our servers loaded. This includes those found inside configuration. """ return chain(*[[s] if not isinstance(s, (ConfigBase, AppriseConfig)) else iter(s.servers()) for s in self.servers])
(self)
721,902
apprise.apprise
__len__
Returns the number of servers loaded; this includes those found within loaded configuration. This funtion nnever actually counts the Config entry themselves (if they exist), only what they contain.
def __len__(self): """ Returns the number of servers loaded; this includes those found within loaded configuration. This funtion nnever actually counts the Config entry themselves (if they exist), only what they contain. """ return sum([1 if not isinstance(s, (ConfigBase, AppriseConfig)) else len(s.servers()) for s in self.servers])
(self)
721,903
apprise.apprise
__setstate__
Pickle Support loads()
def __setstate__(self, state): """ Pickle Support loads() """ self.servers = list() self.asset = state['asset'] self.locale = state['locale'] self.location = state['location'] for entry in state['urls']: self.add(entry['url'], asset=entry['asset'], tag=entry['tag'])
(self, state)
721,904
apprise.apprise
_create_notify_calls
Creates notifications for all the plugins loaded. Returns a list of (server, notify() kwargs) tuples for plugins with parallelism disabled and another list for plugins with parallelism enabled.
def _create_notify_calls(self, *args, **kwargs): """ Creates notifications for all the plugins loaded. Returns a list of (server, notify() kwargs) tuples for plugins with parallelism disabled and another list for plugins with parallelism enabled. """ all_calls = list(self._create_notify_gen(*args, **kwargs)) # Split into sequential and parallel notify() calls. sequential, parallel = [], [] for (server, notify_kwargs) in all_calls: if server.asset.async_mode: parallel.append((server, notify_kwargs)) else: sequential.append((server, notify_kwargs)) return sequential, parallel
(self, *args, **kwargs)
721,905
apprise.apprise
_create_notify_gen
Internal generator function for _create_notify_calls().
def _create_notify_gen(self, body, title='', notify_type=common.NotifyType.INFO, body_format=None, tag=common.MATCH_ALL_TAG, match_always=True, attach=None, interpret_escapes=None): """ Internal generator function for _create_notify_calls(). """ if len(self) == 0: # Nothing to notify msg = "There are no service(s) to notify" logger.error(msg) raise TypeError(msg) if not (title or body or attach): msg = "No message content specified to deliver" logger.error(msg) raise TypeError(msg) try: if title and isinstance(title, bytes): title = title.decode(self.asset.encoding) if body and isinstance(body, bytes): body = body.decode(self.asset.encoding) except UnicodeDecodeError: msg = 'The content passed into Apprise was not of encoding ' \ 'type: {}'.format(self.asset.encoding) logger.error(msg) raise TypeError(msg) # Tracks conversions conversion_body_map = dict() conversion_title_map = dict() # Prepare attachments if required if attach is not None and not isinstance(attach, AppriseAttachment): attach = AppriseAttachment( attach, asset=self.asset, location=self.location) # Allow Asset default value body_format = self.asset.body_format \ if body_format is None else body_format # Allow Asset default value interpret_escapes = self.asset.interpret_escapes \ if interpret_escapes is None else interpret_escapes # Iterate over our loaded plugins for server in self.find(tag, match_always=match_always): # If our code reaches here, we either did not define a tag (it # was set to None), or we did define a tag and the logic above # determined we need to notify the service it's associated with # First we need to generate a key we will use to determine if we # need to build our data out. Entries without are merged with # the body at this stage. key = server.notify_format if server.title_maxlen > 0\ else f'_{server.notify_format}' if server.interpret_emojis: # alter our key slightly to handle emojis since their value is # pulled out of the notification key += "-emojis" if key not in conversion_title_map: # Prepare our title conversion_title_map[key] = '' if not title else title # Conversion of title only occurs for services where the title # is blended with the body (title_maxlen <= 0) if conversion_title_map[key] and server.title_maxlen <= 0: conversion_title_map[key] = convert_between( body_format, server.notify_format, content=conversion_title_map[key]) # Our body is always converted no matter what conversion_body_map[key] = \ convert_between( body_format, server.notify_format, content=body) if interpret_escapes: # # Escape our content # try: # Added overhead required due to Python 3 Encoding Bug # identified here: https://bugs.python.org/issue21331 conversion_body_map[key] = \ conversion_body_map[key]\ .encode('ascii', 'backslashreplace')\ .decode('unicode-escape') conversion_title_map[key] = \ conversion_title_map[key]\ .encode('ascii', 'backslashreplace')\ .decode('unicode-escape') except AttributeError: # Must be of string type msg = 'Failed to escape message body' logger.error(msg) raise TypeError(msg) if server.interpret_emojis: # # Convert our :emoji: definitions # conversion_body_map[key] = \ apply_emojis(conversion_body_map[key]) conversion_title_map[key] = \ apply_emojis(conversion_title_map[key]) kwargs = dict( body=conversion_body_map[key], title=conversion_title_map[key], notify_type=notify_type, attach=attach, body_format=body_format ) yield (server, kwargs)
(self, body, title='', notify_type='info', body_format=None, tag='all', match_always=True, attach=None, interpret_escapes=None)
721,906
apprise.apprise
_notify_parallel_asyncio
Process a list of async_notify() calls in parallel and asynchronously.
@staticmethod async def _notify_parallel_asyncio(*servers_kwargs): """ Process a list of async_notify() calls in parallel and asynchronously. """ n_calls = len(servers_kwargs) # 0-length case if n_calls == 0: return True # (Unlike with the thread pool, we don't optimize for the single- # notification case because asyncio can do useful work while waiting # for that thread to complete) # Create log entry logger.info( 'Notifying %d service(s) asynchronously.', len(servers_kwargs)) async def do_call(server, kwargs): return await server.async_notify(**kwargs) cors = (do_call(server, kwargs) for (server, kwargs) in servers_kwargs) results = await asyncio.gather(*cors, return_exceptions=True) if any(isinstance(status, Exception) and not isinstance(status, TypeError) for status in results): # A catch all so we don't have to abort early just because # one of our plugins has a bug in it. logger.exception("Unhandled Notification Exception") return False if any(isinstance(status, TypeError) for status in results): # These are our internally thrown notifications. return False return all(results)
(*servers_kwargs)
721,907
apprise.apprise
_notify_parallel_threadpool
Process a list of notify() calls in parallel and synchronously.
@staticmethod def _notify_parallel_threadpool(*servers_kwargs): """ Process a list of notify() calls in parallel and synchronously. """ n_calls = len(servers_kwargs) # 0-length case if n_calls == 0: return True # There's no need to use a thread pool for just a single notification if n_calls == 1: return Apprise._notify_sequential(servers_kwargs[0]) # Create log entry logger.info( 'Notifying %d service(s) with threads.', len(servers_kwargs)) with cf.ThreadPoolExecutor() as executor: success = True futures = [executor.submit(server.notify, **kwargs) for (server, kwargs) in servers_kwargs] for future in cf.as_completed(futures): try: result = future.result() success = success and result except TypeError: # These are our internally thrown notifications. success = False except Exception: # A catch all so we don't have to abort early # just because one of our plugins has a bug in it. logger.exception("Unhandled Notification Exception") success = False return success
(*servers_kwargs)
721,908
apprise.apprise
_notify_sequential
Process a list of notify() calls sequentially and synchronously.
@staticmethod def _notify_sequential(*servers_kwargs): """ Process a list of notify() calls sequentially and synchronously. """ success = True for (server, kwargs) in servers_kwargs: try: # Send notification result = server.notify(**kwargs) success = success and result except TypeError: # These are our internally thrown notifications. success = False except Exception: # A catch all so we don't have to abort early # just because one of our plugins has a bug in it. logger.exception("Unhandled Notification Exception") success = False return success
(*servers_kwargs)
721,909
apprise.apprise
add
Adds one or more server URLs into our list. You can override the global asset if you wish by including it with the server(s) that you add. The tag allows you to associate 1 or more tag values to the server(s) being added. tagging a service allows you to exclusively access them when calling the notify() function.
def add(self, servers, asset=None, tag=None): """ Adds one or more server URLs into our list. You can override the global asset if you wish by including it with the server(s) that you add. The tag allows you to associate 1 or more tag values to the server(s) being added. tagging a service allows you to exclusively access them when calling the notify() function. """ # Initialize our return status return_status = True if asset is None: # prepare default asset asset = self.asset if isinstance(servers, str): # build our server list servers = parse_urls(servers) if len(servers) == 0: return False elif isinstance(servers, dict): # no problem, we support kwargs, convert it to a list servers = [servers] elif isinstance(servers, (ConfigBase, NotifyBase, AppriseConfig)): # Go ahead and just add our plugin into our list self.servers.append(servers) return True elif not isinstance(servers, (tuple, set, list)): logger.error( "An invalid notification (type={}) was specified.".format( type(servers))) return False for _server in servers: if isinstance(_server, (ConfigBase, NotifyBase, AppriseConfig)): # Go ahead and just add our plugin into our list self.servers.append(_server) continue elif not isinstance(_server, (str, dict)): logger.error( "An invalid notification (type={}) was specified.".format( type(_server))) return_status = False continue # Instantiate ourselves an object, this function throws or # returns None if it fails instance = Apprise.instantiate(_server, asset=asset, tag=tag) if not isinstance(instance, NotifyBase): # No logging is required as instantiate() handles failure # and/or success reasons for us return_status = False continue # Add our initialized plugin to our server listings self.servers.append(instance) # Return our status return return_status
(self, servers, asset=None, tag=None)
721,910
apprise.apprise
async_notify
Send a notification to all the plugins previously loaded, for asynchronous callers. The arguments are identical to those of Apprise.notify().
def notify(self, body, title='', notify_type=common.NotifyType.INFO, body_format=None, tag=common.MATCH_ALL_TAG, match_always=True, attach=None, interpret_escapes=None): """ Send a notification to all the plugins previously loaded. If the body_format specified is NotifyFormat.MARKDOWN, it will be converted to HTML if the Notification type expects this. if the tag is specified (either a string or a set/list/tuple of strings), then only the notifications flagged with that tagged value are notified. By default, all added services are notified (tag=MATCH_ALL_TAG) This function returns True if all notifications were successfully sent, False if even just one of them fails, and None if no notifications were sent at all as a result of tag filtering and/or simply having empty configuration files that were read. Attach can contain a list of attachment URLs. attach can also be represented by an AttachBase() (or list of) object(s). This identifies the products you wish to notify Set interpret_escapes to True if you want to pre-escape a string such as turning a \n into an actual new line, etc. """ try: # Process arguments and build synchronous and asynchronous calls # (this step can throw internal errors). sequential_calls, parallel_calls = self._create_notify_calls( body, title, notify_type=notify_type, body_format=body_format, tag=tag, match_always=match_always, attach=attach, interpret_escapes=interpret_escapes, ) except TypeError: # No notifications sent, and there was an internal error. return False if not sequential_calls and not parallel_calls: # Nothing to send return None sequential_result = Apprise._notify_sequential(*sequential_calls) parallel_result = Apprise._notify_parallel_threadpool(*parallel_calls) return sequential_result and parallel_result
(self, *args, **kwargs)
721,911
apprise.apprise
clear
Empties our server list
def clear(self): """ Empties our server list """ self.servers[:] = []
(self)
721,912
apprise.apprise
details
Returns the details associated with the Apprise object
def details(self, lang=None, show_requirements=False, show_disabled=False): """ Returns the details associated with the Apprise object """ # general object returned response = { # Defines the current version of Apprise 'version': __version__, # Lists all of the currently supported Notifications 'schemas': [], # Includes the configured asset details 'asset': self.asset.details(), } for plugin in N_MGR.plugins(): # Iterate over our hashed plugins and dynamically build details on # their status: content = { 'service_name': getattr(plugin, 'service_name', None), 'service_url': getattr(plugin, 'service_url', None), 'setup_url': getattr(plugin, 'setup_url', None), # Placeholder - populated below 'details': None, # Let upstream service know of the plugins that support # attachments 'attachment_support': getattr( plugin, 'attachment_support', False), # Differentiat between what is a custom loaded plugin and # which is native. 'category': getattr(plugin, 'category', None) } # Standard protocol(s) should be None or a tuple enabled = getattr(plugin, 'enabled', True) if not show_disabled and not enabled: # Do not show inactive plugins continue elif show_disabled: # Add current state to response content['enabled'] = enabled # Standard protocol(s) should be None or a tuple protocols = getattr(plugin, 'protocol', None) if isinstance(protocols, str): protocols = (protocols, ) # Secure protocol(s) should be None or a tuple secure_protocols = getattr(plugin, 'secure_protocol', None) if isinstance(secure_protocols, str): secure_protocols = (secure_protocols, ) # Add our protocol details to our content content.update({ 'protocols': protocols, 'secure_protocols': secure_protocols, }) if not lang: # Simply return our results content['details'] = plugins.details(plugin) if show_requirements: content['requirements'] = plugins.requirements(plugin) else: # Emulate the specified language when returning our results with self.locale.lang_at(lang): content['details'] = plugins.details(plugin) if show_requirements: content['requirements'] = plugins.requirements(plugin) # Build our response object response['schemas'].append(content) return response
(self, lang=None, show_requirements=False, show_disabled=False)
721,913
apprise.apprise
find
Returns a list of all servers matching against the tag specified.
def find(self, tag=common.MATCH_ALL_TAG, match_always=True): """ Returns a list of all servers matching against the tag specified. """ # Build our tag setup # - top level entries are treated as an 'or' # - second level (or more) entries are treated as 'and' # # examples: # tag="tagA, tagB" = tagA or tagB # tag=['tagA', 'tagB'] = tagA or tagB # tag=[('tagA', 'tagC'), 'tagB'] = (tagA and tagC) or tagB # tag=[('tagB', 'tagC')] = tagB and tagC # A match_always flag allows us to pick up on our 'any' keyword # and notify these services under all circumstances match_always = common.MATCH_ALWAYS_TAG if match_always else None # Iterate over our loaded plugins for entry in self.servers: if isinstance(entry, (ConfigBase, AppriseConfig)): # load our servers servers = entry.servers() else: servers = [entry, ] for server in servers: # Apply our tag matching based on our defined logic if is_exclusive_match( logic=tag, data=server.tags, match_all=common.MATCH_ALL_TAG, match_always=match_always): yield server return
(self, tag='all', match_always=True)