repo_id
stringclasses 208
values | file_path
stringlengths 31
190
| content
stringlengths 1
2.65M
| __index_level_0__
int64 0
0
|
---|---|---|---|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/gaerdbms.py
|
# mysql/gaerdbms.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql+gaerdbms
:name: Google Cloud SQL
:dbapi: rdbms
:connectstring: mysql+gaerdbms:///<dbname>?instance=<instancename>
:url: https://developers.google.com/appengine/docs/python/cloud-sql/developers-guide
This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with
minimal changes.
.. deprecated:: 1.0 This dialect is **no longer necessary** for
Google Cloud SQL; the MySQLdb dialect can be used directly.
Cloud SQL now recommends creating connections via the
mysql dialect using the URL format
``mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>``
Pooling
-------
Google App Engine connections appear to be randomly recycled,
so the dialect does not pool connections. The :class:`.NullPool`
implementation is installed within the :class:`_engine.Engine` by
default.
""" # noqa
import os
import re
from sqlalchemy.util import warn_deprecated
from .mysqldb import MySQLDialect_mysqldb
from ...pool import NullPool
def _is_dev_environment():
return os.environ.get("SERVER_SOFTWARE", "").startswith("Development/")
class MySQLDialect_gaerdbms(MySQLDialect_mysqldb):
@classmethod
def dbapi(cls):
warn_deprecated(
"Google Cloud SQL now recommends creating connections via the "
"MySQLdb dialect directly, using the URL format "
"mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/"
"<projectid>:<instancename>"
)
# from django:
# http://code.google.com/p/googleappengine/source/
# browse/trunk/python/google/storage/speckle/
# python/django/backend/base.py#118
# see also [ticket:2649]
# see also http://stackoverflow.com/q/14224679/34549
from google.appengine.api import apiproxy_stub_map
if _is_dev_environment():
from google.appengine.api import rdbms_mysqldb
return rdbms_mysqldb
elif apiproxy_stub_map.apiproxy.GetStub("rdbms"):
from google.storage.speckle.python.api import rdbms_apiproxy
return rdbms_apiproxy
else:
from google.storage.speckle.python.api import rdbms_googleapi
return rdbms_googleapi
@classmethod
def get_pool_class(cls, url):
# Cloud SQL connections die at any moment
return NullPool
def create_connect_args(self, url):
opts = url.translate_connect_args()
if not _is_dev_environment():
# 'dsn' and 'instance' are because we are skipping
# the traditional google.api.rdbms wrapper
opts["dsn"] = ""
opts["instance"] = url.query["instance"]
return [], opts
def _extract_error_code(self, exception):
match = re.compile(r"^(\d+)L?:|^\((\d+)L?,").match(str(exception))
# The rdbms api will wrap then re-raise some types of errors
# making this regex return no matches.
code = match.group(1) or match.group(2) if match else None
if code:
return int(code)
dialect = MySQLDialect_gaerdbms
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/dml.py
|
from ... import exc
from ... import util
from ...sql.base import _generative
from ...sql.dml import Insert as StandardInsert
from ...sql.elements import ClauseElement
from ...sql.expression import alias
from ...util.langhelpers import public_factory
__all__ = ("Insert", "insert")
class Insert(StandardInsert):
"""MySQL-specific implementation of INSERT.
Adds methods for MySQL-specific syntaxes such as ON DUPLICATE KEY UPDATE.
The :class:`~.mysql.Insert` object is created using the
:func:`sqlalchemy.dialects.mysql.insert` function.
.. versionadded:: 1.2
"""
@property
def inserted(self):
"""Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE statement
MySQL's ON DUPLICATE KEY UPDATE clause allows reference to the row
that would be inserted, via a special function called ``VALUES()``.
This attribute provides all columns in this row to be referenceable
such that they will render within a ``VALUES()`` function inside the
ON DUPLICATE KEY UPDATE clause. The attribute is named ``.inserted``
so as not to conflict with the existing
:meth:`_expression.Insert.values` method.
.. seealso::
:ref:`mysql_insert_on_duplicate_key_update` - example of how
to use :attr:`_expression.Insert.inserted`
"""
return self.inserted_alias.columns
@util.memoized_property
def inserted_alias(self):
return alias(self.table, name="inserted")
@_generative
def on_duplicate_key_update(self, *args, **kw):
r"""
Specifies the ON DUPLICATE KEY UPDATE clause.
:param \**kw: Column keys linked to UPDATE values. The
values may be any SQL expression or supported literal Python
values.
.. warning:: This dictionary does **not** take into account
Python-specified default UPDATE values or generation functions,
e.g. those specified using :paramref:`_schema.Column.onupdate`.
These values will not be exercised for an ON DUPLICATE KEY UPDATE
style of UPDATE, unless values are manually specified here.
:param \*args: As an alternative to passing key/value parameters,
a dictionary or list of 2-tuples can be passed as a single positional
argument.
Passing a single dictionary is equivalent to the keyword argument
form::
insert().on_duplicate_key_update({"name": "some name"})
Passing a list of 2-tuples indicates that the parameter assignments
in the UPDATE clause should be ordered as sent, in a manner similar
to that described for the :class:`_expression.Update`
construct overall
in :ref:`updates_order_parameters`::
insert().on_duplicate_key_update(
[("name", "some name"), ("value", "some value")])
.. versionchanged:: 1.3 parameters can be specified as a dictionary
or list of 2-tuples; the latter form provides for parameter
ordering.
.. versionadded:: 1.2
.. seealso::
:ref:`mysql_insert_on_duplicate_key_update`
"""
if args and kw:
raise exc.ArgumentError(
"Can't pass kwargs and positional arguments simultaneously"
)
if args:
if len(args) > 1:
raise exc.ArgumentError(
"Only a single dictionary or list of tuples "
"is accepted positionally."
)
values = args[0]
else:
values = kw
inserted_alias = getattr(self, "inserted_alias", None)
self._post_values_clause = OnDuplicateClause(inserted_alias, values)
return self
insert = public_factory(
Insert, ".dialects.mysql.insert", ".dialects.mysql.Insert"
)
class OnDuplicateClause(ClauseElement):
__visit_name__ = "on_duplicate_key_update"
_parameter_ordering = None
def __init__(self, inserted_alias, update):
self.inserted_alias = inserted_alias
# auto-detect that parameters should be ordered. This is copied from
# Update._proces_colparams(), however we don't look for a special flag
# in this case since we are not disambiguating from other use cases as
# we are in Update.values().
if isinstance(update, list) and (
update and isinstance(update[0], tuple)
):
self._parameter_ordering = [key for key, value in update]
update = dict(update)
if not update or not isinstance(update, dict):
raise ValueError("update parameter must be a non-empty dictionary")
self.update = update
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/json.py
|
# mysql/json.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
from ... import types as sqltypes
class JSON(sqltypes.JSON):
"""MySQL JSON type.
MySQL supports JSON as of version 5.7. Note that MariaDB does **not**
support JSON at the time of this writing.
The :class:`.mysql.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`_types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function at the database level.
.. versionadded:: 1.1
"""
pass
class _FormatTypeMixin(object):
def _format_value(self, value):
raise NotImplementedError()
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
def _format_value(self, value):
if isinstance(value, int):
value = "$[%s]" % value
else:
value = '$."%s"' % value
return value
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
def _format_value(self, value):
return "$%s" % (
"".join(
[
"[%s]" % elem if isinstance(elem, int) else '."%s"' % elem
for elem in value
]
)
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/pyodbc.py
|
# mysql/pyodbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mysql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
.. note:: The PyODBC for MySQL dialect is not well supported, and
is subject to unresolved character encoding issues
which exist within the current ODBC drivers available.
(see http://code.google.com/p/pyodbc/issues/detail?id=25).
Other dialects for MySQL are recommended.
Pass through exact pyodbc connection string::
import urllib
connection_string = (
'DRIVER=MySQL ODBC 8.0 ANSI Driver;'
'SERVER=localhost;'
'PORT=3307;'
'DATABASE=mydb;'
'UID=root;'
'PWD=(whatever);'
'charset=utf8mb4;'
)
params = urllib.parse.quote_plus(connection_string)
connection_uri = "mysql+pyodbc:///?odbc_connect=%s" % params
""" # noqa
import re
from .base import MySQLDialect
from .base import MySQLExecutionContext
from .types import TIME
from ... import util
from ...connectors.pyodbc import PyODBCConnector
from ...sql.sqltypes import Time
class _pyodbcTIME(TIME):
def result_processor(self, dialect, coltype):
def process(value):
# pyodbc returns a datetime.time object; no need to convert
return value
return process
class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
colspecs = util.update_copy(MySQLDialect.colspecs, {Time: _pyodbcTIME})
supports_unicode_statements = False
execution_ctx_cls = MySQLExecutionContext_pyodbc
pyodbc_driver_name = "MySQL"
def __init__(self, **kw):
# deal with http://code.google.com/p/pyodbc/issues/detail?id=25
kw.setdefault("convert_unicode", True)
super(MySQLDialect_pyodbc, self).__init__(**kw)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = {row[0]: row[1] for row in self._compat_fetchall(rs)}
for key in ("character_set_connection", "character_set"):
if opts.get(key, None):
return opts[key]
util.warn(
"Could not detect the connection character set. "
"Assuming latin1."
)
return "latin1"
def _extract_error_code(self, exception):
m = re.compile(r"\((\d+)\)").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
else:
return None
dialect = MySQLDialect_pyodbc
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/base.py
|
# mysql/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql
:name: MySQL
Supported Versions and Features
-------------------------------
SQLAlchemy supports MySQL starting with version 4.1 through modern releases.
However, no heroic measures are taken to work around major missing
SQL features - if your server version does not support sub-selects, for
example, they won't work in SQLAlchemy either.
See the official MySQL documentation for detailed information about features
supported in any given server release.
.. _mysql_connection_timeouts:
Connection Timeouts and Disconnects
-----------------------------------
MySQL features an automatic connection close behavior, for connections that
have been idle for a fixed period of time, defaulting to eight hours.
To circumvent having this issue, use
the :paramref:`_sa.create_engine.pool_recycle` option which ensures that
a connection will be discarded and replaced with a new one if it has been
present in the pool for a fixed number of seconds::
engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
For more comprehensive disconnect detection of pooled connections, including
accommodation of server restarts and network issues, a pre-ping approach may
be employed. See :ref:`pool_disconnects` for current approaches.
.. seealso::
:ref:`pool_disconnects` - Background on several techniques for dealing
with timed out connections as well as database restarts.
.. _mysql_storage_engines:
CREATE TABLE arguments including Storage Engines
------------------------------------------------
MySQL's CREATE TABLE syntax includes a wide array of special options,
including ``ENGINE``, ``CHARSET``, ``MAX_ROWS``, ``ROW_FORMAT``,
``INSERT_METHOD``, and many more.
To accommodate the rendering of these arguments, specify the form
``mysql_argument_name="value"``. For example, to specify a table with
``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8mb4``, and ``KEY_BLOCK_SIZE``
of ``1024``::
Table('mytable', metadata,
Column('data', String(32)),
mysql_engine='InnoDB',
mysql_charset='utf8mb4',
mysql_key_block_size="1024"
)
The MySQL dialect will normally transfer any keyword specified as
``mysql_keyword_name`` to be rendered as ``KEYWORD_NAME`` in the
``CREATE TABLE`` statement. A handful of these names will render with a space
instead of an underscore; to support this, the MySQL dialect has awareness of
these particular names, which include ``DATA DIRECTORY``
(e.g. ``mysql_data_directory``), ``CHARACTER SET`` (e.g.
``mysql_character_set``) and ``INDEX DIRECTORY`` (e.g.
``mysql_index_directory``).
The most common argument is ``mysql_engine``, which refers to the storage
engine for the table. Historically, MySQL server installations would default
to ``MyISAM`` for this value, although newer versions may be defaulting
to ``InnoDB``. The ``InnoDB`` engine is typically preferred for its support
of transactions and foreign keys.
A :class:`_schema.Table`
that is created in a MySQL database with a storage engine
of ``MyISAM`` will be essentially non-transactional, meaning any
INSERT/UPDATE/DELETE statement referring to this table will be invoked as
autocommit. It also will have no support for foreign key constraints; while
the ``CREATE TABLE`` statement accepts foreign key options, when using the
``MyISAM`` storage engine these arguments are discarded. Reflecting such a
table will also produce no foreign key constraint information.
For fully atomic transactions as well as support for foreign key
constraints, all participating ``CREATE TABLE`` statements must specify a
transactional engine, which in the vast majority of cases is ``InnoDB``.
.. seealso::
`The InnoDB Storage Engine
<http://dev.mysql.com/doc/refman/5.0/en/innodb-storage-engine.html>`_ -
on the MySQL website.
Case Sensitivity and Table Reflection
-------------------------------------
MySQL has inconsistent support for case-sensitive identifier
names, basing support on specific details of the underlying
operating system. However, it has been observed that no matter
what case sensitivity behavior is present, the names of tables in
foreign key declarations are *always* received from the database
as all-lower case, making it impossible to accurately reflect a
schema where inter-related tables use mixed-case identifier names.
Therefore it is strongly advised that table names be declared as
all lower case both within SQLAlchemy as well as on the MySQL
database itself, especially if database reflection features are
to be used.
.. _mysql_isolation_level:
Transaction Isolation Level
---------------------------
All MySQL dialects support setting of transaction isolation level both via a
dialect-specific parameter :paramref:`_sa.create_engine.isolation_level`
accepted
by :func:`_sa.create_engine`, as well as the
:paramref:`.Connection.execution_options.isolation_level` argument as passed to
:meth:`_engine.Connection.execution_options`.
This feature works by issuing the
command ``SET SESSION TRANSACTION ISOLATION LEVEL <level>`` for each new
connection. For the special AUTOCOMMIT isolation level, DBAPI-specific
techniques are used.
To set isolation level using :func:`_sa.create_engine`::
engine = create_engine(
"mysql://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED"
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
Valid values for ``isolation_level`` include:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
The special ``AUTOCOMMIT`` value makes use of the various "autocommit"
attributes provided by specific DBAPIs, and is currently supported by
MySQLdb, MySQL-Client, MySQL-Connector Python, and PyMySQL. Using it,
the MySQL connection will return true for the value of
``SELECT @@autocommit;``.
.. versionadded:: 1.1 - added support for the AUTOCOMMIT isolation level.
AUTO_INCREMENT Behavior
-----------------------
When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
the first :class:`.Integer` primary key column which is not marked as a
foreign key::
>>> t = Table('mytable', metadata,
... Column('mytable_id', Integer, primary_key=True)
... )
>>> t.create()
CREATE TABLE mytable (
id INTEGER NOT NULL AUTO_INCREMENT,
PRIMARY KEY (id)
)
You can disable this behavior by passing ``False`` to the
:paramref:`_schema.Column.autoincrement` argument of :class:`_schema.Column`.
This flag
can also be used to enable auto-increment on a secondary column in a
multi-column key for some storage engines::
Table('mytable', metadata,
Column('gid', Integer, primary_key=True, autoincrement=False),
Column('id', Integer, primary_key=True)
)
.. _mysql_ss_cursors:
Server Side Cursors
-------------------
Server-side cursor support is available for the MySQLdb and PyMySQL dialects.
From a MySQL point of view this means that the ``MySQLdb.cursors.SSCursor`` or
``pymysql.cursors.SSCursor`` class is used when building up the cursor which
will receive results. The most typical way of invoking this feature is via the
:paramref:`.Connection.execution_options.stream_results` connection execution
option. Server side cursors can also be enabled for all SELECT statements
unconditionally by passing ``server_side_cursors=True`` to
:func:`_sa.create_engine`.
.. versionadded:: 1.1.4 - added server-side cursor support.
.. _mysql_unicode:
Unicode
-------
Charset Selection
~~~~~~~~~~~~~~~~~
Most MySQL DBAPIs offer the option to set the client character set for
a connection. This is typically delivered using the ``charset`` parameter
in the URL, such as::
e = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4")
This charset is the **client character set** for the connection. Some
MySQL DBAPIs will default this to a value such as ``latin1``, and some
will make use of the ``default-character-set`` setting in the ``my.cnf``
file as well. Documentation for the DBAPI in use should be consulted
for specific behavior.
The encoding used for Unicode has traditionally been ``'utf8'``. However,
for MySQL versions 5.5.3 on forward, a new MySQL-specific encoding
``'utf8mb4'`` has been introduced, and as of MySQL 8.0 a warning is emitted
by the server if plain ``utf8`` is specified within any server-side
directives, replaced with ``utf8mb3``. The rationale for this new encoding
is due to the fact that MySQL's legacy utf-8 encoding only supports
codepoints up to three bytes instead of four. Therefore,
when communicating with a MySQL database
that includes codepoints more than three bytes in size,
this new charset is preferred, if supported by both the database as well
as the client DBAPI, as in::
e = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4")
All modern DBAPIs should support the ``utf8mb4`` charset.
In order to use ``utf8mb4`` encoding for a schema that was created with legacy
``utf8``, changes to the MySQL schema and/or server configuration may be
required.
.. seealso::
`The utf8mb4 Character Set \
<http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html>`_ - \
in the MySQL documentation
.. _mysql_binary_introducer:
Dealing with Binary Data Warnings and Unicode
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MySQL versions 5.6, 5.7 and later (not MariaDB at the time of this writing) now
emit a warning when attempting to pass binary data to the database, while a
character set encoding is also in place, when the binary data itself is not
valid for that encoding::
default.py:509: Warning: (1300, "Invalid utf8mb4 character string:
'F9876A'")
cursor.execute(statement, parameters)
This warning is due to the fact that the MySQL client library is attempting to
interpret the binary string as a unicode object even if a datatype such
as :class:`.LargeBinary` is in use. To resolve this, the SQL statement requires
a binary "character set introducer" be present before any non-NULL value
that renders like this::
INSERT INTO table (data) VALUES (_binary %s)
These character set introducers are provided by the DBAPI driver, assuming the
use of mysqlclient or PyMySQL (both of which are recommended). Add the query
string parameter ``binary_prefix=true`` to the URL to repair this warning::
# mysqlclient
engine = create_engine(
"mysql+mysqldb://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true")
# PyMySQL
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true")
The ``binary_prefix`` flag may or may not be supported by other MySQL drivers.
SQLAlchemy itself cannot render this ``_binary`` prefix reliably, as it does
not work with the NULL value, which is valid to be sent as a bound parameter.
As the MySQL driver renders parameters directly into the SQL string, it's the
most efficient place for this additional keyword to be passed.
.. seealso::
`Character set introducers <https://dev.mysql.com/doc/refman/5.7/en/charset-introducer.html>`_ - on the MySQL website
ANSI Quoting Style
------------------
MySQL features two varieties of identifier "quoting style", one using
backticks and the other using quotes, e.g. ```some_identifier``` vs.
``"some_identifier"``. All MySQL dialects detect which version
is in use by checking the value of ``sql_mode`` when a connection is first
established with a particular :class:`_engine.Engine`.
This quoting style comes
into play when rendering table and column names as well as when reflecting
existing database structures. The detection is entirely automatic and
no special configuration is needed to use either quoting style.
MySQL SQL Extensions
--------------------
Many of the MySQL SQL extensions are handled through SQLAlchemy's generic
function and operator support::
table.select(table.c.password==func.md5('plaintext'))
table.select(table.c.username.op('regexp')('^[a-d]'))
And of course any valid MySQL statement can be executed as a string as well.
Some limited direct support for MySQL extensions to SQL is currently
available.
* INSERT..ON DUPLICATE KEY UPDATE: See
:ref:`mysql_insert_on_duplicate_key_update`
* SELECT pragma, use :meth:`_expression.Select.prefix_with` and
:meth:`_query.Query.prefix_with`::
select(...).prefix_with(['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
* UPDATE with LIMIT::
update(..., mysql_limit=10)
* optimizer hints, use :meth:`_expression.Select.prefix_with` and
:meth:`_query.Query.prefix_with`::
select(...).prefix_with("/*+ NO_RANGE_OPTIMIZATION(t4 PRIMARY) */")
* index hints, use :meth:`_expression.Select.with_hint` and
:meth:`_query.Query.with_hint`::
select(...).with_hint(some_table, "USE INDEX xyz")
.. _mysql_insert_on_duplicate_key_update:
INSERT...ON DUPLICATE KEY UPDATE (Upsert)
------------------------------------------
MySQL allows "upserts" (update or insert)
of rows into a table via the ``ON DUPLICATE KEY UPDATE`` clause of the
``INSERT`` statement. A candidate row will only be inserted if that row does
not match an existing primary or unique key in the table; otherwise, an UPDATE
will be performed. The statement allows for separate specification of the
values to INSERT versus the values for UPDATE.
SQLAlchemy provides ``ON DUPLICATE KEY UPDATE`` support via the MySQL-specific
:func:`.mysql.insert()` function, which provides
the generative method :meth:`~.mysql.Insert.on_duplicate_key_update`::
from sqlalchemy.dialects.mysql import insert
insert_stmt = insert(my_table).values(
id='some_existing_id',
data='inserted value')
on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
data=insert_stmt.inserted.data,
status='U'
)
conn.execute(on_duplicate_key_stmt)
Unlike PostgreSQL's "ON CONFLICT" phrase, the "ON DUPLICATE KEY UPDATE"
phrase will always match on any primary key or unique key, and will always
perform an UPDATE if there's a match; there are no options for it to raise
an error or to skip performing an UPDATE.
``ON DUPLICATE KEY UPDATE`` is used to perform an update of the already
existing row, using any combination of new values as well as values
from the proposed insertion. These values are normally specified using
keyword arguments passed to the
:meth:`~.mysql.Insert.on_duplicate_key_update`
given column key values (usually the name of the column, unless it
specifies :paramref:`_schema.Column.key`
) as keys and literal or SQL expressions
as values::
on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
data="some data",
updated_at=func.current_timestamp(),
)
In a manner similar to that of :meth:`.UpdateBase.values`, other parameter
forms are accepted, including a single dictionary::
on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
{"data": "some data", "updated_at": func.current_timestamp()},
)
as well as a list of 2-tuples, which will automatically provide
a parameter-ordered UPDATE statement in a manner similar to that described
at :ref:`updates_order_parameters`. Unlike the :class:`_expression.Update`
object,
no special flag is needed to specify the intent since the argument form is
this context is unambiguous::
on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
[
("data", "some data"),
("updated_at", func.current_timestamp()),
],
)
.. versionchanged:: 1.3 support for parameter-ordered UPDATE clause within
MySQL ON DUPLICATE KEY UPDATE
.. warning::
The :meth:`_expression.Insert.on_duplicate_key_update`
method does **not** take into
account Python-side default UPDATE values or generation functions, e.g.
e.g. those specified using :paramref:`_schema.Column.onupdate`.
These values will not be exercised for an ON DUPLICATE KEY style of UPDATE,
unless they are manually specified explicitly in the parameters.
In order to refer to the proposed insertion row, the special alias
:attr:`~.mysql.Insert.inserted` is available as an attribute on
the :class:`.mysql.Insert` object; this object is a
:class:`_expression.ColumnCollection` which contains all columns of the target
table::
from sqlalchemy.dialects.mysql import insert
stmt = insert(my_table).values(
id='some_id',
data='inserted value',
author='jlh')
do_update_stmt = stmt.on_duplicate_key_update(
data="updated value",
author=stmt.inserted.author
)
conn.execute(do_update_stmt)
When rendered, the "inserted" namespace will produce the expression
``VALUES(<columnname>)``.
.. versionadded:: 1.2 Added support for MySQL ON DUPLICATE KEY UPDATE clause
rowcount Support
----------------
SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the
usual definition of "number of rows matched by an UPDATE or DELETE" statement.
This is in contradiction to the default setting on most MySQL DBAPI drivers,
which is "number of rows actually modified/deleted". For this reason, the
SQLAlchemy MySQL dialects always add the ``constants.CLIENT.FOUND_ROWS``
flag, or whatever is equivalent for the target dialect, upon connection.
This setting is currently hardcoded.
.. seealso::
:attr:`_engine.ResultProxy.rowcount`
CAST Support
------------
MySQL documents the CAST operator as available in version 4.0.2. When using
the SQLAlchemy :func:`.cast` function, SQLAlchemy
will not render the CAST token on MySQL before this version, based on server
version detection, instead rendering the internal expression directly.
CAST may still not be desirable on an early MySQL version post-4.0.2, as it
didn't add all datatype support until 4.1.1. If your application falls into
this narrow area, the behavior of CAST can be controlled using the
:ref:`sqlalchemy.ext.compiler_toplevel` system, as per the recipe below::
from sqlalchemy.sql.expression import Cast
from sqlalchemy.ext.compiler import compiles
@compiles(Cast, 'mysql')
def _check_mysql_version(element, compiler, **kw):
if compiler.dialect.server_version_info < (4, 1, 0):
return compiler.process(element.clause, **kw)
else:
return compiler.visit_cast(element, **kw)
The above function, which only needs to be declared once
within an application, overrides the compilation of the
:func:`.cast` construct to check for version 4.1.0 before
fully rendering CAST; else the internal element of the
construct is rendered directly.
.. _mysql_indexes:
MySQL Specific Index Options
----------------------------
MySQL-specific extensions to the :class:`.Index` construct are available.
Index Length
~~~~~~~~~~~~~
MySQL provides an option to create index entries with a certain length, where
"length" refers to the number of characters or bytes in each value which will
become part of the index. SQLAlchemy provides this feature via the
``mysql_length`` parameter::
Index('my_index', my_table.c.data, mysql_length=10)
Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4,
'b': 9})
Prefix lengths are given in characters for nonbinary string types and in bytes
for binary string types. The value passed to the keyword argument *must* be
either an integer (and, thus, specify the same prefix length value for all
columns of the index) or a dict in which keys are column names and values are
prefix length values for corresponding columns. MySQL only allows a length for
a column of an index if it is for a CHAR, VARCHAR, TEXT, BINARY, VARBINARY and
BLOB.
Index Prefixes
~~~~~~~~~~~~~~
MySQL storage engines permit you to specify an index prefix when creating
an index. SQLAlchemy provides this feature via the
``mysql_prefix`` parameter on :class:`.Index`::
Index('my_index', my_table.c.data, mysql_prefix='FULLTEXT')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX, so it *must* be a valid index prefix for your MySQL
storage engine.
.. versionadded:: 1.1.5
.. seealso::
`CREATE INDEX <http://dev.mysql.com/doc/refman/5.0/en/create-index.html>`_ - MySQL documentation
Index Types
~~~~~~~~~~~~~
Some MySQL storage engines permit you to specify an index type when creating
an index or primary key constraint. SQLAlchemy provides this feature via the
``mysql_using`` parameter on :class:`.Index`::
Index('my_index', my_table.c.data, mysql_using='hash')
As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`::
PrimaryKeyConstraint("data", mysql_using='hash')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
type for your MySQL storage engine.
More information can be found at:
http://dev.mysql.com/doc/refman/5.0/en/create-index.html
http://dev.mysql.com/doc/refman/5.0/en/create-table.html
Index Parsers
~~~~~~~~~~~~~
CREATE FULLTEXT INDEX in MySQL also supports a "WITH PARSER" option. This
is available using the keyword argument ``mysql_with_parser``::
Index(
'my_index', my_table.c.data,
mysql_prefix='FULLTEXT', mysql_with_parser="ngram")
.. versionadded:: 1.3
.. _mysql_foreign_keys:
MySQL Foreign Keys
------------------
MySQL's behavior regarding foreign keys has some important caveats.
Foreign Key Arguments to Avoid
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MySQL does not support the foreign key arguments "DEFERRABLE", "INITIALLY",
or "MATCH". Using the ``deferrable`` or ``initially`` keyword argument with
:class:`_schema.ForeignKeyConstraint` or :class:`_schema.ForeignKey`
will have the effect of
these keywords being rendered in a DDL expression, which will then raise an
error on MySQL. In order to use these keywords on a foreign key while having
them ignored on a MySQL backend, use a custom compile rule::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.schema import ForeignKeyConstraint
@compiles(ForeignKeyConstraint, "mysql")
def process(element, compiler, **kw):
element.deferrable = element.initially = None
return compiler.visit_foreign_key_constraint(element, **kw)
.. versionchanged:: 0.9.0 - the MySQL backend no longer silently ignores
the ``deferrable`` or ``initially`` keyword arguments of
:class:`_schema.ForeignKeyConstraint` and :class:`_schema.ForeignKey`.
The "MATCH" keyword is in fact more insidious, and is explicitly disallowed
by SQLAlchemy in conjunction with the MySQL backend. This argument is
silently ignored by MySQL, but in addition has the effect of ON UPDATE and ON
DELETE options also being ignored by the backend. Therefore MATCH should
never be used with the MySQL backend; as is the case with DEFERRABLE and
INITIALLY, custom compilation rules can be used to correct a MySQL
ForeignKeyConstraint at DDL definition time.
.. versionadded:: 0.9.0 - the MySQL backend will raise a
:class:`.CompileError` when the ``match`` keyword is used with
:class:`_schema.ForeignKeyConstraint` or :class:`_schema.ForeignKey`.
Reflection of Foreign Key Constraints
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Not all MySQL storage engines support foreign keys. When using the
very common ``MyISAM`` MySQL storage engine, the information loaded by table
reflection will not include foreign keys. For these tables, you may supply a
:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
Table('mytable', metadata,
ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
autoload=True
)
.. seealso::
:ref:`mysql_storage_engines`
.. _mysql_unique_constraints:
MySQL Unique Constraints and Reflection
---------------------------------------
SQLAlchemy supports both the :class:`.Index` construct with the
flag ``unique=True``, indicating a UNIQUE index, as well as the
:class:`.UniqueConstraint` construct, representing a UNIQUE constraint.
Both objects/syntaxes are supported by MySQL when emitting DDL to create
these constraints. However, MySQL does not have a unique constraint
construct that is separate from a unique index; that is, the "UNIQUE"
constraint on MySQL is equivalent to creating a "UNIQUE INDEX".
When reflecting these constructs, the
:meth:`_reflection.Inspector.get_indexes`
and the :meth:`_reflection.Inspector.get_unique_constraints`
methods will **both**
return an entry for a UNIQUE index in MySQL. However, when performing
full table reflection using ``Table(..., autoload=True)``,
the :class:`.UniqueConstraint` construct is
**not** part of the fully reflected :class:`_schema.Table` construct under any
circumstances; this construct is always represented by a :class:`.Index`
with the ``unique=True`` setting present in the :attr:`_schema.Table.indexes`
collection.
.. _mysql_timestamp_null:
TIMESTAMP Columns and NULL
--------------------------
MySQL historically enforces that a column which specifies the
TIMESTAMP datatype implicitly includes a default value of
CURRENT_TIMESTAMP, even though this is not stated, and additionally
sets the column as NOT NULL, the opposite behavior vs. that of all
other datatypes::
mysql> CREATE TABLE ts_test (
-> a INTEGER,
-> b INTEGER NOT NULL,
-> c TIMESTAMP,
-> d TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
-> e TIMESTAMP NULL);
Query OK, 0 rows affected (0.03 sec)
mysql> SHOW CREATE TABLE ts_test;
+---------+-----------------------------------------------------
| Table | Create Table
+---------+-----------------------------------------------------
| ts_test | CREATE TABLE `ts_test` (
`a` int(11) DEFAULT NULL,
`b` int(11) NOT NULL,
`c` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`d` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`e` timestamp NULL DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
Above, we see that an INTEGER column defaults to NULL, unless it is specified
with NOT NULL. But when the column is of type TIMESTAMP, an implicit
default of CURRENT_TIMESTAMP is generated which also coerces the column
to be a NOT NULL, even though we did not specify it as such.
This behavior of MySQL can be changed on the MySQL side using the
`explicit_defaults_for_timestamp
<http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html
#sysvar_explicit_defaults_for_timestamp>`_ configuration flag introduced in
MySQL 5.6. With this server setting enabled, TIMESTAMP columns behave like
any other datatype on the MySQL side with regards to defaults and nullability.
However, to accommodate the vast majority of MySQL databases that do not
specify this new flag, SQLAlchemy emits the "NULL" specifier explicitly with
any TIMESTAMP column that does not specify ``nullable=False``. In order to
accommodate newer databases that specify ``explicit_defaults_for_timestamp``,
SQLAlchemy also emits NOT NULL for TIMESTAMP columns that do specify
``nullable=False``. The following example illustrates::
from sqlalchemy import MetaData, Integer, Table, Column, text
from sqlalchemy.dialects.mysql import TIMESTAMP
m = MetaData()
t = Table('ts_test', m,
Column('a', Integer),
Column('b', Integer, nullable=False),
Column('c', TIMESTAMP),
Column('d', TIMESTAMP, nullable=False)
)
from sqlalchemy import create_engine
e = create_engine("mysql://scott:tiger@localhost/test", echo=True)
m.create_all(e)
output::
CREATE TABLE ts_test (
a INTEGER,
b INTEGER NOT NULL,
c TIMESTAMP NULL,
d TIMESTAMP NOT NULL
)
.. versionchanged:: 1.0.0 - SQLAlchemy now renders NULL or NOT NULL in all
cases for TIMESTAMP columns, to accommodate
``explicit_defaults_for_timestamp``. Prior to this version, it will
not render "NOT NULL" for a TIMESTAMP column that is ``nullable=False``.
""" # noqa
from array import array as _array
from collections import defaultdict
import re
import sys
from sqlalchemy import literal_column
from sqlalchemy.sql import visitors
from . import reflection as _reflection
from .enumerated import ENUM
from .enumerated import SET
from .json import JSON
from .json import JSONIndexType
from .json import JSONPathType
from .types import _FloatType
from .types import _IntegerType
from .types import _MatchType
from .types import _NumericType
from .types import _StringType
from .types import BIGINT
from .types import BIT
from .types import CHAR
from .types import DATETIME
from .types import DECIMAL
from .types import DOUBLE
from .types import FLOAT
from .types import INTEGER
from .types import LONGBLOB
from .types import LONGTEXT
from .types import MEDIUMBLOB
from .types import MEDIUMINT
from .types import MEDIUMTEXT
from .types import NCHAR
from .types import NUMERIC
from .types import NVARCHAR
from .types import REAL
from .types import SMALLINT
from .types import TEXT
from .types import TIME
from .types import TIMESTAMP
from .types import TINYBLOB
from .types import TINYINT
from .types import TINYTEXT
from .types import VARCHAR
from .types import YEAR
from ... import exc
from ... import log
from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
from ...engine import default
from ...engine import reflection
from ...sql import compiler
from ...sql import elements
from ...sql import util as sql_util
from ...types import BINARY
from ...types import BLOB
from ...types import BOOLEAN
from ...types import DATE
from ...types import VARBINARY
from ...util import topological
RESERVED_WORDS = set(
[
"accessible",
"accessible",
"add",
"admin",
"all",
"alter",
"analyze",
"and",
"array", # 8.0
"as",
"asc",
"asensitive",
"before",
"between",
"bigint",
"binary",
"blob",
"both",
"by",
"call",
"cascade",
"case",
"change",
"char",
"character",
"check",
"collate",
"column",
"columns",
"condition",
"constraint",
"continue",
"convert",
"create",
"cross",
"cume_dist",
"current_date",
"current_time",
"current_timestamp",
"current_user",
"cursor",
"database",
"databases",
"day_hour",
"day_microsecond",
"day_minute",
"day_second",
"dec",
"decimal",
"declare",
"default",
"delayed",
"delete",
"desc",
"describe",
"deterministic",
"distinct",
"distinctrow",
"div",
"double",
"drop",
"dual",
"each",
"else",
"elseif",
"empty",
"enclosed",
"escaped",
"except",
"exists",
"exit",
"explain",
"false",
"fetch",
"fields",
"first_value",
"float",
"float4",
"float8",
"for",
"force",
"foreign",
"from",
"fulltext",
"function",
"general",
"generated",
"get",
"grant",
"group",
"grouping",
"groups",
"having",
"high_priority",
"hour_microsecond",
"hour_minute",
"hour_second",
"if",
"ignore",
"ignore_server_ids",
"in",
"index",
"infile",
"inner",
"inout",
"insensitive",
"insert",
"int",
"int1",
"int2",
"int3",
"int4",
"int8",
"integer",
"interval",
"into",
"io_after_gtids",
"io_before_gtids",
"is",
"iterate",
"join",
"json_table",
"key",
"keys",
"kill",
"last_value",
"leading",
"leave",
"left",
"like",
"limit",
"linear",
"linear",
"lines",
"load",
"localtime",
"localtimestamp",
"lock",
"long",
"longblob",
"longtext",
"loop",
"low_priority",
"master_bind",
"master_heartbeat_period",
"master_ssl_verify_server_cert",
"master_ssl_verify_server_cert",
"match",
"maxvalue",
"mediumblob",
"mediumint",
"mediumtext",
"member", # 8.0
"middleint",
"minute_microsecond",
"minute_second",
"mod",
"modifies",
"natural",
"no_write_to_binlog",
"not",
"nth_value",
"ntile",
"null",
"numeric",
"of",
"on",
"one_shot",
"optimize",
"optimizer_costs",
"option",
"optionally",
"or",
"order",
"out",
"outer",
"outfile",
"over",
"partition",
"percent_rank",
"persist",
"persist_only",
"precision",
"primary",
"privileges",
"procedure",
"purge",
"range",
"range",
"rank",
"read",
"read_only",
"read_only",
"read_write",
"read_write", # 5.1
"reads",
"real",
"recursive",
"references",
"regexp",
"release",
"rename",
"repeat",
"replace",
"require",
"resignal",
"restrict",
"return",
"revoke",
"right",
"rlike",
"role",
"row",
"row_number",
"rows",
"schema",
"schemas",
"second_microsecond",
"select",
"sensitive",
"separator",
"set",
"show",
"signal",
"slow", # 5.5
"smallint",
"soname",
"spatial",
"specific",
"sql",
"sql_after_gtids",
"sql_before_gtids", # 5.6
"sql_big_result",
"sql_calc_found_rows",
"sql_small_result",
"sqlexception",
"sqlstate",
"sqlwarning",
"ssl",
"starting",
"stored",
"straight_join",
"system",
"table",
"tables", # 4.1
"terminated",
"then",
"tinyblob",
"tinyint",
"tinytext",
"to",
"trailing",
"trigger",
"true",
"undo",
"union",
"unique",
"unlock",
"unsigned",
"update",
"usage",
"use",
"using",
"utc_date",
"utc_time",
"utc_timestamp",
"values",
"varbinary",
"varchar",
"varcharacter",
"varying",
"virtual", # 5.7
"when",
"where",
"while",
"window", # 8.0
"with",
"write",
"x509",
"xor",
"year_month",
"zerofill", # 5.0
]
)
AUTOCOMMIT_RE = re.compile(
r"\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|LOAD +DATA|REPLACE)",
re.I | re.UNICODE,
)
SET_RE = re.compile(
r"\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w", re.I | re.UNICODE
)
# old names
MSTime = TIME
MSSet = SET
MSEnum = ENUM
MSLongBlob = LONGBLOB
MSMediumBlob = MEDIUMBLOB
MSTinyBlob = TINYBLOB
MSBlob = BLOB
MSBinary = BINARY
MSVarBinary = VARBINARY
MSNChar = NCHAR
MSNVarChar = NVARCHAR
MSChar = CHAR
MSString = VARCHAR
MSLongText = LONGTEXT
MSMediumText = MEDIUMTEXT
MSTinyText = TINYTEXT
MSText = TEXT
MSYear = YEAR
MSTimeStamp = TIMESTAMP
MSBit = BIT
MSSmallInteger = SMALLINT
MSTinyInteger = TINYINT
MSMediumInteger = MEDIUMINT
MSBigInteger = BIGINT
MSNumeric = NUMERIC
MSDecimal = DECIMAL
MSDouble = DOUBLE
MSReal = REAL
MSFloat = FLOAT
MSInteger = INTEGER
colspecs = {
_IntegerType: _IntegerType,
_NumericType: _NumericType,
_FloatType: _FloatType,
sqltypes.Numeric: NUMERIC,
sqltypes.Float: FLOAT,
sqltypes.Time: TIME,
sqltypes.Enum: ENUM,
sqltypes.MatchType: _MatchType,
sqltypes.JSON: JSON,
sqltypes.JSON.JSONIndexType: JSONIndexType,
sqltypes.JSON.JSONPathType: JSONPathType,
}
# Everything 3.23 through 5.1 excepting OpenGIS types.
ischema_names = {
"bigint": BIGINT,
"binary": BINARY,
"bit": BIT,
"blob": BLOB,
"boolean": BOOLEAN,
"char": CHAR,
"date": DATE,
"datetime": DATETIME,
"decimal": DECIMAL,
"double": DOUBLE,
"enum": ENUM,
"fixed": DECIMAL,
"float": FLOAT,
"int": INTEGER,
"integer": INTEGER,
"json": JSON,
"longblob": LONGBLOB,
"longtext": LONGTEXT,
"mediumblob": MEDIUMBLOB,
"mediumint": MEDIUMINT,
"mediumtext": MEDIUMTEXT,
"nchar": NCHAR,
"nvarchar": NVARCHAR,
"numeric": NUMERIC,
"set": SET,
"smallint": SMALLINT,
"text": TEXT,
"time": TIME,
"timestamp": TIMESTAMP,
"tinyblob": TINYBLOB,
"tinyint": TINYINT,
"tinytext": TINYTEXT,
"varbinary": VARBINARY,
"varchar": VARCHAR,
"year": YEAR,
}
class MySQLExecutionContext(default.DefaultExecutionContext):
def should_autocommit_text(self, statement):
return AUTOCOMMIT_RE.match(statement)
def create_server_side_cursor(self):
if self.dialect.supports_server_side_cursors:
return self._dbapi_connection.cursor(self.dialect._sscursor)
else:
raise NotImplementedError()
class MySQLCompiler(compiler.SQLCompiler):
render_table_with_column_in_update_from = True
"""Overridden from base SQLCompiler value"""
extract_map = compiler.SQLCompiler.extract_map.copy()
extract_map.update({"milliseconds": "millisecond"})
def visit_random_func(self, fn, **kw):
return "rand%s" % self.function_argspec(fn)
def visit_sysdate_func(self, fn, **kw):
return "SYSDATE()"
def _render_json_extract_from_binary(self, binary, operator, **kw):
# note we are intentionally calling upon the process() calls in the
# order in which they appear in the SQL String as this is used
# by positional parameter rendering
if binary.type._type_affinity is sqltypes.JSON:
return "JSON_EXTRACT(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
# for non-JSON, MySQL doesn't handle JSON null at all so it has to
# be explicit
case_expression = "CASE JSON_EXTRACT(%s, %s) WHEN 'null' THEN NULL" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
if binary.type._type_affinity is sqltypes.Integer:
type_expression = (
"ELSE CAST(JSON_EXTRACT(%s, %s) AS SIGNED INTEGER)"
% (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
)
elif binary.type._type_affinity is sqltypes.Numeric:
# FLOAT / REAL not added in MySQL til 8.0.17
type_expression = (
"ELSE CAST(JSON_EXTRACT(%s, %s) AS DECIMAL(10, 6))"
% (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
)
elif binary.type._type_affinity is sqltypes.Boolean:
# the NULL handling is particularly weird with boolean, so
# explicitly return true/false constants
type_expression = "WHEN true THEN true ELSE false"
elif binary.type._type_affinity is sqltypes.String:
# this fails with a JSON value that's a four byte unicode
# string. SQLite has the same problem at the moment
type_expression = "ELSE JSON_UNQUOTE(JSON_EXTRACT(%s, %s))" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
else:
# other affinity....this is not expected right now
type_expression = "ELSE JSON_EXTRACT(%s, %s)"
return case_expression + " " + type_expression + " END"
def visit_json_getitem_op_binary(self, binary, operator, **kw):
return self._render_json_extract_from_binary(binary, operator, **kw)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
return self._render_json_extract_from_binary(binary, operator, **kw)
def visit_on_duplicate_key_update(self, on_duplicate, **kw):
if on_duplicate._parameter_ordering:
parameter_ordering = [
elements._column_as_key(key)
for key in on_duplicate._parameter_ordering
]
ordered_keys = set(parameter_ordering)
cols = [
self.statement.table.c[key]
for key in parameter_ordering
if key in self.statement.table.c
] + [
c for c in self.statement.table.c if c.key not in ordered_keys
]
else:
cols = self.statement.table.c
clauses = []
# traverses through all table columns to preserve table column order
for column in (col for col in cols if col.key in on_duplicate.update):
val = on_duplicate.update[column.key]
if elements._is_literal(val):
val = elements.BindParameter(None, val, type_=column.type)
value_text = self.process(val.self_group(), use_schema=False)
else:
def replace(obj):
if (
isinstance(obj, elements.BindParameter)
and obj.type._isnull
):
obj = obj._clone()
obj.type = column.type
return obj
elif (
isinstance(obj, elements.ColumnClause)
and obj.table is on_duplicate.inserted_alias
):
obj = literal_column(
"VALUES(" + self.preparer.quote(column.name) + ")"
)
return obj
else:
# element is not replaced
return None
val = visitors.replacement_traverse(val, {}, replace)
value_text = self.process(val.self_group(), use_schema=False)
name_text = self.preparer.quote(column.name)
clauses.append("%s = %s" % (name_text, value_text))
non_matching = set(on_duplicate.update) - set(c.key for c in cols)
if non_matching:
util.warn(
"Additional column names not matching "
"any column keys in table '%s': %s"
% (
self.statement.table.name,
(", ".join("'%s'" % c for c in non_matching)),
)
)
return "ON DUPLICATE KEY UPDATE " + ", ".join(clauses)
def visit_concat_op_binary(self, binary, operator, **kw):
return "concat(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_match_op_binary(self, binary, operator, **kw):
return "MATCH (%s) AGAINST (%s IN BOOLEAN MODE)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def get_from_hint_text(self, table, text):
return text
def visit_typeclause(self, typeclause, type_=None, **kw):
if type_ is None:
type_ = typeclause.type.dialect_impl(self.dialect)
if isinstance(type_, sqltypes.TypeDecorator):
return self.visit_typeclause(typeclause, type_.impl, **kw)
elif isinstance(type_, sqltypes.Integer):
if getattr(type_, "unsigned", False):
return "UNSIGNED INTEGER"
else:
return "SIGNED INTEGER"
elif isinstance(type_, sqltypes.TIMESTAMP):
return "DATETIME"
elif isinstance(
type_,
(
sqltypes.DECIMAL,
sqltypes.DateTime,
sqltypes.Date,
sqltypes.Time,
),
):
return self.dialect.type_compiler.process(type_)
elif isinstance(type_, sqltypes.String) and not isinstance(
type_, (ENUM, SET)
):
adapted = CHAR._adapt_string_for_cast(type_)
return self.dialect.type_compiler.process(adapted)
elif isinstance(type_, sqltypes._Binary):
return "BINARY"
elif isinstance(type_, sqltypes.JSON):
return "JSON"
elif isinstance(type_, sqltypes.NUMERIC):
return self.dialect.type_compiler.process(type_).replace(
"NUMERIC", "DECIMAL"
)
else:
return None
def visit_cast(self, cast, **kw):
# No cast until 4, no decimals until 5.
if not self.dialect._supports_cast:
util.warn(
"Current MySQL version does not support "
"CAST; the CAST will be skipped."
)
return self.process(cast.clause.self_group(), **kw)
type_ = self.process(cast.typeclause)
if type_ is None:
util.warn(
"Datatype %s does not support CAST on MySQL; "
"the CAST will be skipped."
% self.dialect.type_compiler.process(cast.typeclause.type)
)
return self.process(cast.clause.self_group(), **kw)
return "CAST(%s AS %s)" % (self.process(cast.clause, **kw), type_)
def render_literal_value(self, value, type_):
value = super(MySQLCompiler, self).render_literal_value(value, type_)
if self.dialect._backslash_escapes:
value = value.replace("\\", "\\\\")
return value
# override native_boolean=False behavior here, as
# MySQL still supports native boolean
def visit_true(self, element, **kw):
return "true"
def visit_false(self, element, **kw):
return "false"
def get_select_precolumns(self, select, **kw):
"""Add special MySQL keywords in place of DISTINCT.
.. note::
this usage is deprecated. :meth:`_expression.Select.prefix_with`
should be used for special keywords at the start
of a SELECT.
"""
if isinstance(select._distinct, util.string_types):
return select._distinct.upper() + " "
elif select._distinct:
return "DISTINCT "
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
if join.full:
join_type = " FULL OUTER JOIN "
elif join.isouter:
join_type = " LEFT OUTER JOIN "
else:
join_type = " INNER JOIN "
return "".join(
(
self.process(join.left, asfrom=True, **kwargs),
join_type,
self.process(join.right, asfrom=True, **kwargs),
" ON ",
self.process(join.onclause, **kwargs),
)
)
def for_update_clause(self, select, **kw):
if select._for_update_arg.read:
tmp = " LOCK IN SHARE MODE"
else:
tmp = " FOR UPDATE"
if select._for_update_arg.of and self.dialect.supports_for_update_of:
tables = util.OrderedSet()
for c in select._for_update_arg.of:
tables.update(sql_util.surface_selectables_only(c))
tmp += " OF " + ", ".join(
self.process(table, ashint=True, use_schema=False, **kw)
for table in tables
)
if select._for_update_arg.nowait:
tmp += " NOWAIT"
if select._for_update_arg.skip_locked and self.dialect._is_mysql:
tmp += " SKIP LOCKED"
return tmp
def limit_clause(self, select, **kw):
# MySQL supports:
# LIMIT <limit>
# LIMIT <offset>, <limit>
# and in server versions > 3.3:
# LIMIT <limit> OFFSET <offset>
# The latter is more readable for offsets but we're stuck with the
# former until we can refine dialects by server revision.
limit_clause, offset_clause = (
select._limit_clause,
select._offset_clause,
)
if limit_clause is None and offset_clause is None:
return ""
elif offset_clause is not None:
# As suggested by the MySQL docs, need to apply an
# artificial limit if one wasn't provided
# http://dev.mysql.com/doc/refman/5.0/en/select.html
if limit_clause is None:
# hardwire the upper limit. Currently
# needed by OurSQL with Python 3
# (https://bugs.launchpad.net/oursql/+bug/686232),
# but also is consistent with the usage of the upper
# bound as part of MySQL's "syntax" for OFFSET with
# no LIMIT
return " \n LIMIT %s, %s" % (
self.process(offset_clause, **kw),
"18446744073709551615",
)
else:
return " \n LIMIT %s, %s" % (
self.process(offset_clause, **kw),
self.process(limit_clause, **kw),
)
else:
# No offset provided, so just use the limit
return " \n LIMIT %s" % (self.process(limit_clause, **kw),)
def update_limit_clause(self, update_stmt):
limit = update_stmt.kwargs.get("%s_limit" % self.dialect.name, None)
if limit:
return "LIMIT %s" % limit
else:
return None
def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
return ", ".join(
t._compiler_dispatch(self, asfrom=True, **kw)
for t in [from_table] + list(extra_froms)
)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
return None
def delete_table_clause(self, delete_stmt, from_table, extra_froms):
"""If we have extra froms make sure we render any alias as hint."""
ashint = False
if extra_froms:
ashint = True
return from_table._compiler_dispatch(
self, asfrom=True, iscrud=True, ashint=ashint
)
def delete_extra_from_clause(
self, delete_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the DELETE .. USING clause specific to MySQL."""
return "USING " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in [from_table] + extra_froms
)
def visit_empty_set_expr(self, element_types):
return (
"SELECT %(outer)s FROM (SELECT %(inner)s) "
"as _empty_set WHERE 1!=1"
% {
"inner": ", ".join(
"1 AS _in_%s" % idx
for idx, type_ in enumerate(element_types)
),
"outer": ", ".join(
"_in_%s" % idx for idx, type_ in enumerate(element_types)
),
}
)
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "NOT (%s <=> %s)" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_isnot_distinct_from_binary(self, binary, operator, **kw):
return "%s <=> %s" % (
self.process(binary.left),
self.process(binary.right),
)
class MySQLDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kw):
"""Builds column DDL."""
colspec = [
self.preparer.format_column(column),
self.dialect.type_compiler.process(
column.type, type_expression=column
),
]
if column.computed is not None:
colspec.append(self.process(column.computed))
is_timestamp = isinstance(
column.type._unwrapped_dialect_impl(self.dialect),
sqltypes.TIMESTAMP,
)
if not column.nullable:
colspec.append("NOT NULL")
# see: http://docs.sqlalchemy.org/en/latest/dialects/
# mysql.html#mysql_timestamp_null
elif column.nullable and is_timestamp:
colspec.append("NULL")
default = self.get_column_default_string(column)
if default is not None:
colspec.append("DEFAULT " + default)
comment = column.comment
if comment is not None:
literal = self.sql_compiler.render_literal_value(
comment, sqltypes.String()
)
colspec.append("COMMENT " + literal)
if (
column.table is not None
and column is column.table._autoincrement_column
and column.server_default is None
):
colspec.append("AUTO_INCREMENT")
return " ".join(colspec)
def post_create_table(self, table):
"""Build table-level CREATE options like ENGINE and COLLATE."""
table_opts = []
opts = dict(
(k[len(self.dialect.name) + 1 :].upper(), v)
for k, v in table.kwargs.items()
if k.startswith("%s_" % self.dialect.name)
)
if table.comment is not None:
opts["COMMENT"] = table.comment
partition_options = [
"PARTITION_BY",
"PARTITIONS",
"SUBPARTITIONS",
"SUBPARTITION_BY",
]
nonpart_options = set(opts).difference(partition_options)
part_options = set(opts).intersection(partition_options)
for opt in topological.sort(
[
("DEFAULT_CHARSET", "COLLATE"),
("DEFAULT_CHARACTER_SET", "COLLATE"),
],
nonpart_options,
):
arg = opts[opt]
if opt in _reflection._options_of_type_string:
arg = self.sql_compiler.render_literal_value(
arg, sqltypes.String()
)
if opt in (
"DATA_DIRECTORY",
"INDEX_DIRECTORY",
"DEFAULT_CHARACTER_SET",
"CHARACTER_SET",
"DEFAULT_CHARSET",
"DEFAULT_COLLATE",
):
opt = opt.replace("_", " ")
joiner = "="
if opt in (
"TABLESPACE",
"DEFAULT CHARACTER SET",
"CHARACTER SET",
"COLLATE",
):
joiner = " "
table_opts.append(joiner.join((opt, arg)))
for opt in topological.sort(
[
("PARTITION_BY", "PARTITIONS"),
("PARTITION_BY", "SUBPARTITION_BY"),
("PARTITION_BY", "SUBPARTITIONS"),
("PARTITIONS", "SUBPARTITIONS"),
("PARTITIONS", "SUBPARTITION_BY"),
("SUBPARTITION_BY", "SUBPARTITIONS"),
],
part_options,
):
arg = opts[opt]
if opt in _reflection._options_of_type_string:
arg = self.sql_compiler.render_literal_value(
arg, sqltypes.String()
)
opt = opt.replace("_", " ")
joiner = " "
table_opts.append(joiner.join((opt, arg)))
return " ".join(table_opts)
def visit_create_index(self, create, **kw):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
table = preparer.format_table(index.table)
columns = [
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
]
name = self._prepared_index_name(index)
text = "CREATE "
if index.unique:
text += "UNIQUE "
index_prefix = index.kwargs.get("mysql_prefix", None)
if index_prefix:
text += index_prefix + " "
text += "INDEX %s ON %s " % (name, table)
length = index.dialect_options["mysql"]["length"]
if length is not None:
if isinstance(length, dict):
# length value can be a (column_name --> integer value)
# mapping specifying the prefix length for each column of the
# index
columns = ", ".join(
"%s(%d)" % (expr, length[col.name])
if col.name in length
else (
"%s(%d)" % (expr, length[expr])
if expr in length
else "%s" % expr
)
for col, expr in zip(index.expressions, columns)
)
else:
# or can be an integer value specifying the same
# prefix length for all columns of the index
columns = ", ".join(
"%s(%d)" % (col, length) for col in columns
)
else:
columns = ", ".join(columns)
text += "(%s)" % columns
parser = index.dialect_options["mysql"]["with_parser"]
if parser is not None:
text += " WITH PARSER %s" % (parser,)
using = index.dialect_options["mysql"]["using"]
if using is not None:
text += " USING %s" % (preparer.quote(using))
return text
def visit_primary_key_constraint(self, constraint):
text = super(MySQLDDLCompiler, self).visit_primary_key_constraint(
constraint
)
using = constraint.dialect_options["mysql"]["using"]
if using:
text += " USING %s" % (self.preparer.quote(using))
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s ON %s" % (
self._prepared_index_name(index, include_schema=False),
self.preparer.format_table(index.table),
)
def visit_drop_constraint(self, drop):
constraint = drop.element
if isinstance(constraint, sa_schema.ForeignKeyConstraint):
qual = "FOREIGN KEY "
const = self.preparer.format_constraint(constraint)
elif isinstance(constraint, sa_schema.PrimaryKeyConstraint):
qual = "PRIMARY KEY "
const = ""
elif isinstance(constraint, sa_schema.UniqueConstraint):
qual = "INDEX "
const = self.preparer.format_constraint(constraint)
elif isinstance(constraint, sa_schema.CheckConstraint):
if self.dialect._is_mariadb:
qual = "CONSTRAINT "
else:
qual = "CHECK "
const = self.preparer.format_constraint(constraint)
else:
qual = ""
const = self.preparer.format_constraint(constraint)
return "ALTER TABLE %s DROP %s%s" % (
self.preparer.format_table(constraint.table),
qual,
const,
)
def define_constraint_match(self, constraint):
if constraint.match is not None:
raise exc.CompileError(
"MySQL ignores the 'MATCH' keyword while at the same time "
"causes ON UPDATE/ON DELETE clauses to be ignored."
)
return ""
def visit_set_table_comment(self, create):
return "ALTER TABLE %s COMMENT %s" % (
self.preparer.format_table(create.element),
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.String()
),
)
def visit_drop_table_comment(self, create):
return "ALTER TABLE %s COMMENT ''" % (
self.preparer.format_table(create.element)
)
def visit_set_column_comment(self, create):
return "ALTER TABLE %s CHANGE %s %s" % (
self.preparer.format_table(create.element.table),
self.preparer.format_column(create.element),
self.get_column_specification(create.element),
)
class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def _extend_numeric(self, type_, spec):
"Extend a numeric-type declaration with MySQL specific extensions."
if not self._mysql_type(type_):
return spec
if type_.unsigned:
spec += " UNSIGNED"
if type_.zerofill:
spec += " ZEROFILL"
return spec
def _extend_string(self, type_, defaults, spec):
"""Extend a string-type declaration with standard SQL CHARACTER SET /
COLLATE annotations and MySQL specific extensions.
"""
def attr(name):
return getattr(type_, name, defaults.get(name))
if attr("charset"):
charset = "CHARACTER SET %s" % attr("charset")
elif attr("ascii"):
charset = "ASCII"
elif attr("unicode"):
charset = "UNICODE"
else:
charset = None
if attr("collation"):
collation = "COLLATE %s" % type_.collation
elif attr("binary"):
collation = "BINARY"
else:
collation = None
if attr("national"):
# NATIONAL (aka NCHAR/NVARCHAR) trumps charsets.
return " ".join(
[c for c in ("NATIONAL", spec, collation) if c is not None]
)
return " ".join(
[c for c in (spec, charset, collation) if c is not None]
)
def _mysql_type(self, type_):
return isinstance(type_, (_StringType, _NumericType))
def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return self._extend_numeric(type_, "NUMERIC")
elif type_.scale is None:
return self._extend_numeric(
type_,
"NUMERIC(%(precision)s)" % {"precision": type_.precision},
)
else:
return self._extend_numeric(
type_,
"NUMERIC(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return self._extend_numeric(type_, "DECIMAL")
elif type_.scale is None:
return self._extend_numeric(
type_,
"DECIMAL(%(precision)s)" % {"precision": type_.precision},
)
else:
return self._extend_numeric(
type_,
"DECIMAL(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
def visit_DOUBLE(self, type_, **kw):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(
type_,
"DOUBLE(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
else:
return self._extend_numeric(type_, "DOUBLE")
def visit_REAL(self, type_, **kw):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(
type_,
"REAL(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
else:
return self._extend_numeric(type_, "REAL")
def visit_FLOAT(self, type_, **kw):
if (
self._mysql_type(type_)
and type_.scale is not None
and type_.precision is not None
):
return self._extend_numeric(
type_, "FLOAT(%s, %s)" % (type_.precision, type_.scale)
)
elif type_.precision is not None:
return self._extend_numeric(
type_, "FLOAT(%s)" % (type_.precision,)
)
else:
return self._extend_numeric(type_, "FLOAT")
def visit_INTEGER(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"INTEGER(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "INTEGER")
def visit_BIGINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"BIGINT(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "BIGINT")
def visit_MEDIUMINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"MEDIUMINT(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "MEDIUMINT")
def visit_TINYINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_, "TINYINT(%s)" % type_.display_width
)
else:
return self._extend_numeric(type_, "TINYINT")
def visit_SMALLINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"SMALLINT(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "SMALLINT")
def visit_BIT(self, type_, **kw):
if type_.length is not None:
return "BIT(%s)" % type_.length
else:
return "BIT"
def visit_DATETIME(self, type_, **kw):
if getattr(type_, "fsp", None):
return "DATETIME(%d)" % type_.fsp
else:
return "DATETIME"
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
if getattr(type_, "fsp", None):
return "TIME(%d)" % type_.fsp
else:
return "TIME"
def visit_TIMESTAMP(self, type_, **kw):
if getattr(type_, "fsp", None):
return "TIMESTAMP(%d)" % type_.fsp
else:
return "TIMESTAMP"
def visit_YEAR(self, type_, **kw):
if type_.display_width is None:
return "YEAR"
else:
return "YEAR(%s)" % type_.display_width
def visit_TEXT(self, type_, **kw):
if type_.length:
return self._extend_string(type_, {}, "TEXT(%d)" % type_.length)
else:
return self._extend_string(type_, {}, "TEXT")
def visit_TINYTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "TINYTEXT")
def visit_MEDIUMTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "MEDIUMTEXT")
def visit_LONGTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "LONGTEXT")
def visit_VARCHAR(self, type_, **kw):
if type_.length:
return self._extend_string(type_, {}, "VARCHAR(%d)" % type_.length)
else:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" % self.dialect.name
)
def visit_CHAR(self, type_, **kw):
if type_.length:
return self._extend_string(
type_, {}, "CHAR(%(length)s)" % {"length": type_.length}
)
else:
return self._extend_string(type_, {}, "CHAR")
def visit_NVARCHAR(self, type_, **kw):
# We'll actually generate the equiv. "NATIONAL VARCHAR" instead
# of "NVARCHAR".
if type_.length:
return self._extend_string(
type_,
{"national": True},
"VARCHAR(%(length)s)" % {"length": type_.length},
)
else:
raise exc.CompileError(
"NVARCHAR requires a length on dialect %s" % self.dialect.name
)
def visit_NCHAR(self, type_, **kw):
# We'll actually generate the equiv.
# "NATIONAL CHAR" instead of "NCHAR".
if type_.length:
return self._extend_string(
type_,
{"national": True},
"CHAR(%(length)s)" % {"length": type_.length},
)
else:
return self._extend_string(type_, {"national": True}, "CHAR")
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY(%d)" % type_.length
def visit_JSON(self, type_, **kw):
return "JSON"
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
def visit_enum(self, type_, **kw):
if not type_.native_enum:
return super(MySQLTypeCompiler, self).visit_enum(type_)
else:
return self._visit_enumerated_values("ENUM", type_, type_.enums)
def visit_BLOB(self, type_, **kw):
if type_.length:
return "BLOB(%d)" % type_.length
else:
return "BLOB"
def visit_TINYBLOB(self, type_, **kw):
return "TINYBLOB"
def visit_MEDIUMBLOB(self, type_, **kw):
return "MEDIUMBLOB"
def visit_LONGBLOB(self, type_, **kw):
return "LONGBLOB"
def _visit_enumerated_values(self, name, type_, enumerated_values):
quoted_enums = []
for e in enumerated_values:
quoted_enums.append("'%s'" % e.replace("'", "''"))
return self._extend_string(
type_, {}, "%s(%s)" % (name, ",".join(quoted_enums))
)
def visit_ENUM(self, type_, **kw):
return self._visit_enumerated_values(
"ENUM", type_, type_._enumerated_values
)
def visit_SET(self, type_, **kw):
return self._visit_enumerated_values(
"SET", type_, type_._enumerated_values
)
def visit_BOOLEAN(self, type_, **kw):
return "BOOL"
class MySQLIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect, server_ansiquotes=False, **kw):
if not server_ansiquotes:
quote = "`"
else:
quote = '"'
super(MySQLIdentifierPreparer, self).__init__(
dialect, initial_quote=quote, escape_quote=quote
)
def _quote_free_identifiers(self, *ids):
"""Unilaterally identifier-quote any number of strings."""
return tuple([self.quote_identifier(i) for i in ids if i is not None])
@log.class_logger
class MySQLDialect(default.DefaultDialect):
"""Details of the MySQL dialect.
Not used directly in application code.
"""
name = "mysql"
supports_alter = True
# MySQL has no true "boolean" type; we
# allow for the "true" and "false" keywords, however
supports_native_boolean = False
# identifiers are 64, however aliases can be 255...
max_identifier_length = 255
max_index_name_length = 64
supports_native_enum = True
supports_for_update_of = False # default for MySQL ...
# ... may be updated to True for MySQL 8+ in initialize()
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
supports_multivalues_insert = True
supports_comments = True
inline_comments = True
default_paramstyle = "format"
colspecs = colspecs
cte_follows_insert = True
statement_compiler = MySQLCompiler
ddl_compiler = MySQLDDLCompiler
type_compiler = MySQLTypeCompiler
ischema_names = ischema_names
preparer = MySQLIdentifierPreparer
# default SQL compilation settings -
# these are modified upon initialize(),
# i.e. first connect
_backslash_escapes = True
_server_ansiquotes = False
construct_arguments = [
(sa_schema.Table, {"*": None}),
(sql.Update, {"limit": None}),
(sa_schema.PrimaryKeyConstraint, {"using": None}),
(
sa_schema.Index,
{
"using": None,
"length": None,
"prefix": None,
"with_parser": None,
},
),
]
def __init__(
self,
isolation_level=None,
json_serializer=None,
json_deserializer=None,
**kwargs
):
kwargs.pop("use_ansiquotes", None) # legacy
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
self._json_serializer = json_serializer
self._json_deserializer = json_deserializer
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
_isolation_lookup = set(
[
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
]
)
def set_isolation_level(self, connection, level):
level = level.replace("_", " ")
# adjust for ConnectionFairy being present
# allows attribute set e.g. "connection.autocommit = True"
# to work properly
if hasattr(connection, "connection"):
connection = connection.connection
self._set_isolation_level(connection, level)
def _set_isolation_level(self, connection, level):
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL %s" % level)
cursor.execute("COMMIT")
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
if self._is_mysql and self.server_version_info >= (5, 7, 20):
cursor.execute("SELECT @@transaction_isolation")
else:
cursor.execute("SELECT @@tx_isolation")
row = cursor.fetchone()
if row is None:
util.warn(
"Could not retrieve transaction isolation level for MySQL "
"connection."
)
raise NotImplementedError()
val = row[0]
cursor.close()
if util.py3k and isinstance(val, bytes):
val = val.decode()
return val.upper().replace("-", " ")
def _get_server_version_info(self, connection):
# get database server version info explicitly over the wire
# to avoid proxy servers like MaxScale getting in the
# way with their own values, see #4205
dbapi_con = connection.connection
cursor = dbapi_con.cursor()
cursor.execute("SELECT VERSION()")
val = cursor.fetchone()[0]
cursor.close()
if util.py3k and isinstance(val, bytes):
val = val.decode()
return self._parse_server_version(val)
def _parse_server_version(self, val):
version = []
r = re.compile(r"[.\-]")
for n in r.split(val):
try:
version.append(int(n))
except ValueError:
mariadb = re.match(r"(.*)(MariaDB)(.*)", n)
if mariadb:
version.extend(g for g in mariadb.groups() if g)
else:
version.append(n)
return tuple(version)
def do_commit(self, dbapi_connection):
"""Execute a COMMIT."""
# COMMIT/ROLLBACK were introduced in 3.23.15.
# Yes, we have at least one user who has to talk to these old
# versions!
#
# Ignore commit/rollback if support isn't present, otherwise even
# basic operations via autocommit fail.
try:
dbapi_connection.commit()
except Exception:
if self.server_version_info < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_rollback(self, dbapi_connection):
"""Execute a ROLLBACK."""
try:
dbapi_connection.rollback()
except Exception:
if self.server_version_info < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_begin_twophase(self, connection, xid):
connection.execute(sql.text("XA BEGIN :xid"), xid=xid)
def do_prepare_twophase(self, connection, xid):
connection.execute(sql.text("XA END :xid"), xid=xid)
connection.execute(sql.text("XA PREPARE :xid"), xid=xid)
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
connection.execute(sql.text("XA END :xid"), xid=xid)
connection.execute(sql.text("XA ROLLBACK :xid"), xid=xid)
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
connection.execute(sql.text("XA COMMIT :xid"), xid=xid)
def do_recover_twophase(self, connection):
resultset = connection.execute("XA RECOVER")
return [row["data"][0 : row["gtrid_length"]] for row in resultset]
def is_disconnect(self, e, connection, cursor):
if isinstance(
e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError)
):
return self._extract_error_code(e) in (
2006,
2013,
2014,
2045,
2055,
)
elif isinstance(
e, (self.dbapi.InterfaceError, self.dbapi.InternalError)
):
# if underlying connection is closed,
# this is the error you get
return "(0, '')" in str(e)
else:
return False
def _compat_fetchall(self, rp, charset=None):
"""Proxy result rows to smooth over MySQL-Python driver
inconsistencies."""
return [_DecodingRowProxy(row, charset) for row in rp.fetchall()]
def _compat_fetchone(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver
inconsistencies."""
row = rp.fetchone()
if row:
return _DecodingRowProxy(row, charset)
else:
return None
def _compat_first(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver
inconsistencies."""
row = rp.first()
if row:
return _DecodingRowProxy(row, charset)
else:
return None
def _extract_error_code(self, exception):
raise NotImplementedError()
def _get_default_schema_name(self, connection):
return connection.execute("SELECT DATABASE()").scalar()
def has_table(self, connection, table_name, schema=None):
# SHOW TABLE STATUS LIKE and SHOW TABLES LIKE do not function properly
# on macosx (and maybe win?) with multibyte table names.
#
# TODO: if this is not a problem on win, make the strategy swappable
# based on platform. DESCRIBE is slower.
# [ticket:726]
# full_name = self.identifier_preparer.format_table(table,
# use_schema=True)
full_name = ".".join(
self.identifier_preparer._quote_free_identifiers(
schema, table_name
)
)
st = "DESCRIBE %s" % full_name
rs = None
try:
try:
rs = connection.execution_options(
skip_user_error_events=True
).execute(st)
have = rs.fetchone() is not None
rs.close()
return have
except exc.DBAPIError as e:
if self._extract_error_code(e.orig) == 1146:
return False
raise
finally:
if rs:
rs.close()
def initialize(self, connection):
self._connection_charset = self._detect_charset(connection)
self._detect_sql_mode(connection)
self._detect_ansiquotes(connection)
self._detect_casing(connection)
if self._server_ansiquotes:
# if ansiquotes == True, build a new IdentifierPreparer
# with the new setting
self.identifier_preparer = self.preparer(
self, server_ansiquotes=self._server_ansiquotes
)
default.DefaultDialect.initialize(self, connection)
self.supports_for_update_of = (
self._is_mysql and self.server_version_info >= (8,)
)
self._needs_correct_for_88718_96365 = (
not self._is_mariadb and self.server_version_info >= (8,)
)
self._warn_for_known_db_issues()
def _warn_for_known_db_issues(self):
if self._is_mariadb:
mdb_version = self._mariadb_normalized_version_info
if mdb_version > (10, 2) and mdb_version < (10, 2, 9):
util.warn(
"MariaDB %r before 10.2.9 has known issues regarding "
"CHECK constraints, which impact handling of NULL values "
"with SQLAlchemy's boolean datatype (MDEV-13596). An "
"additional issue prevents proper migrations of columns "
"with CHECK constraints (MDEV-11114). Please upgrade to "
"MariaDB 10.2.9 or greater, or use the MariaDB 10.1 "
"series, to avoid these issues." % (mdb_version,)
)
@property
def _is_mariadb(self):
return (
self.server_version_info and "MariaDB" in self.server_version_info
)
@property
def _is_mysql(self):
return not self._is_mariadb
@property
def _is_mariadb_102(self):
return self._is_mariadb and self._mariadb_normalized_version_info > (
10,
2,
)
@property
def _mariadb_normalized_version_info(self):
# MariaDB's wire-protocol prepends the server_version with
# the string "5.5"; now that we use @@version we no longer see this.
if self._is_mariadb:
idx = self.server_version_info.index("MariaDB")
return self.server_version_info[idx - 3 : idx]
else:
return self.server_version_info
@property
def _supports_cast(self):
return (
self.server_version_info is None
or self.server_version_info >= (4, 0, 2)
)
@reflection.cache
def get_schema_names(self, connection, **kw):
rp = connection.execute("SHOW schemas")
return [r[0] for r in rp]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
"""Return a Unicode SHOW TABLES from a given schema."""
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
charset = self._connection_charset
if self.server_version_info < (5, 0, 2):
rp = connection.execute(
"SHOW TABLES FROM %s"
% self.identifier_preparer.quote_identifier(current_schema)
)
return [
row[0] for row in self._compat_fetchall(rp, charset=charset)
]
else:
rp = connection.execute(
"SHOW FULL TABLES FROM %s"
% self.identifier_preparer.quote_identifier(current_schema)
)
return [
row[0]
for row in self._compat_fetchall(rp, charset=charset)
if row[1] == "BASE TABLE"
]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if self.server_version_info < (5, 0, 2):
raise NotImplementedError
if schema is None:
schema = self.default_schema_name
if self.server_version_info < (5, 0, 2):
return self.get_table_names(connection, schema)
charset = self._connection_charset
rp = connection.execute(
"SHOW FULL TABLES FROM %s"
% self.identifier_preparer.quote_identifier(schema)
)
return [
row[0]
for row in self._compat_fetchall(rp, charset=charset)
if row[1] in ("VIEW", "SYSTEM VIEW")
]
@reflection.cache
def get_table_options(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return parsed_state.table_options
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return parsed_state.columns
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
for key in parsed_state.keys:
if key["type"] == "PRIMARY":
# There can be only one.
cols = [s[0] for s in key["columns"]]
return {"constrained_columns": cols, "name": None}
return {"constrained_columns": [], "name": None}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
default_schema = None
fkeys = []
for spec in parsed_state.fk_constraints:
ref_name = spec["table"][-1]
ref_schema = len(spec["table"]) > 1 and spec["table"][-2] or schema
if not ref_schema:
if default_schema is None:
default_schema = connection.dialect.default_schema_name
if schema == default_schema:
ref_schema = schema
loc_names = spec["local"]
ref_names = spec["foreign"]
con_kw = {}
for opt in ("onupdate", "ondelete"):
if spec.get(opt, False):
con_kw[opt] = spec[opt]
fkey_d = {
"name": spec["name"],
"constrained_columns": loc_names,
"referred_schema": ref_schema,
"referred_table": ref_name,
"referred_columns": ref_names,
"options": con_kw,
}
fkeys.append(fkey_d)
if self._needs_correct_for_88718_96365:
self._correct_for_mysql_bugs_88718_96365(fkeys, connection)
return fkeys
def _correct_for_mysql_bugs_88718_96365(self, fkeys, connection):
# Foreign key is always in lower case (MySQL 8.0)
# https://bugs.mysql.com/bug.php?id=88718
# issue #4344 for SQLAlchemy
# table name also for MySQL 8.0
# https://bugs.mysql.com/bug.php?id=96365
# issue #4751 for SQLAlchemy
# for lower_case_table_names=2, information_schema.columns
# preserves the original table/schema casing, but SHOW CREATE
# TABLE does not. this problem is not in lower_case_table_names=1,
# but use case-insensitive matching for these two modes in any case.
if self._casing in (1, 2):
def lower(s):
return s.lower()
else:
# if on case sensitive, there can be two tables referenced
# with the same name different casing, so we need to use
# case-sensitive matching.
def lower(s):
return s
default_schema_name = connection.dialect.default_schema_name
col_tuples = [
(
lower(rec["referred_schema"] or default_schema_name),
lower(rec["referred_table"]),
col_name,
)
for rec in fkeys
for col_name in rec["referred_columns"]
]
if col_tuples:
correct_for_wrong_fk_case = connection.execute(
sql.text(
"""
select table_schema, table_name, column_name
from information_schema.columns
where (table_schema, table_name, lower(column_name)) in
:table_data;
"""
).bindparams(sql.bindparam("table_data", expanding=True)),
table_data=col_tuples,
)
# in casing=0, table name and schema name come back in their
# exact case.
# in casing=1, table name and schema name come back in lower
# case.
# in casing=2, table name and schema name come back from the
# information_schema.columns view in the case
# that was used in CREATE DATABASE and CREATE TABLE, but
# SHOW CREATE TABLE converts them to *lower case*, therefore
# not matching. So for this case, case-insensitive lookup
# is necessary
d = defaultdict(dict)
for schema, tname, cname in correct_for_wrong_fk_case:
d[(lower(schema), lower(tname))]["SCHEMANAME"] = schema
d[(lower(schema), lower(tname))]["TABLENAME"] = tname
d[(lower(schema), lower(tname))][cname.lower()] = cname
for fkey in fkeys:
rec = d[
(
lower(fkey["referred_schema"] or default_schema_name),
lower(fkey["referred_table"]),
)
]
fkey["referred_table"] = rec["TABLENAME"]
if fkey["referred_schema"] is not None:
fkey["referred_schema"] = rec["SCHEMANAME"]
fkey["referred_columns"] = [
rec[col.lower()] for col in fkey["referred_columns"]
]
@reflection.cache
def get_check_constraints(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return [
{"name": spec["name"], "sqltext": spec["sqltext"]}
for spec in parsed_state.ck_constraints
]
@reflection.cache
def get_table_comment(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return {"text": parsed_state.table_options.get("mysql_comment", None)}
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
indexes = []
for spec in parsed_state.keys:
dialect_options = {}
unique = False
flavor = spec["type"]
if flavor == "PRIMARY":
continue
if flavor == "UNIQUE":
unique = True
elif flavor in ("FULLTEXT", "SPATIAL"):
dialect_options["mysql_prefix"] = flavor
elif flavor is None:
pass
else:
self.logger.info(
"Converting unknown KEY type %s to a plain KEY", flavor
)
pass
if spec["parser"]:
dialect_options["mysql_with_parser"] = spec["parser"]
index_d = {}
if dialect_options:
index_d["dialect_options"] = dialect_options
index_d["name"] = spec["name"]
index_d["column_names"] = [s[0] for s in spec["columns"]]
index_d["unique"] = unique
if flavor:
index_d["type"] = flavor
indexes.append(index_d)
return indexes
@reflection.cache
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return [
{
"name": key["name"],
"column_names": [col[0] for col in key["columns"]],
"duplicates_index": key["name"],
}
for key in parsed_state.keys
if key["type"] == "UNIQUE"
]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
charset = self._connection_charset
full_name = ".".join(
self.identifier_preparer._quote_free_identifiers(schema, view_name)
)
sql = self._show_create_table(
connection, None, charset, full_name=full_name
)
return sql
def _parsed_state_or_create(
self, connection, table_name, schema=None, **kw
):
return self._setup_parser(
connection,
table_name,
schema,
info_cache=kw.get("info_cache", None),
)
@util.memoized_property
def _tabledef_parser(self):
"""return the MySQLTableDefinitionParser, generate if needed.
The deferred creation ensures that the dialect has
retrieved server version information first.
"""
if self.server_version_info < (4, 1) and self._server_ansiquotes:
# ANSI_QUOTES doesn't affect SHOW CREATE TABLE on < 4.1
preparer = self.preparer(self, server_ansiquotes=False)
else:
preparer = self.identifier_preparer
return _reflection.MySQLTableDefinitionParser(self, preparer)
@reflection.cache
def _setup_parser(self, connection, table_name, schema=None, **kw):
charset = self._connection_charset
parser = self._tabledef_parser
full_name = ".".join(
self.identifier_preparer._quote_free_identifiers(
schema, table_name
)
)
sql = self._show_create_table(
connection, None, charset, full_name=full_name
)
if re.match(r"^CREATE (?:ALGORITHM)?.* VIEW", sql):
# Adapt views to something table-like.
columns = self._describe_table(
connection, None, charset, full_name=full_name
)
sql = parser._describe_to_create(table_name, columns)
return parser.parse(sql, charset)
def _detect_charset(self, connection):
raise NotImplementedError()
def _detect_casing(self, connection):
"""Sniff out identifier case sensitivity.
Cached per-connection. This value can not change without a server
restart.
"""
# http://dev.mysql.com/doc/refman/5.0/en/name-case-sensitivity.html
charset = self._connection_charset
row = self._compat_first(
connection.execute("SHOW VARIABLES LIKE 'lower_case_table_names'"),
charset=charset,
)
if not row:
cs = 0
else:
# 4.0.15 returns OFF or ON according to [ticket:489]
# 3.23 doesn't, 4.0.27 doesn't..
if row[1] == "OFF":
cs = 0
elif row[1] == "ON":
cs = 1
else:
cs = int(row[1])
self._casing = cs
return cs
def _detect_collations(self, connection):
"""Pull the active COLLATIONS list from the server.
Cached per-connection.
"""
collations = {}
if self.server_version_info < (4, 1, 0):
pass
else:
charset = self._connection_charset
rs = connection.execute("SHOW COLLATION")
for row in self._compat_fetchall(rs, charset):
collations[row[0]] = row[1]
return collations
def _detect_sql_mode(self, connection):
row = self._compat_first(
connection.execute("SHOW VARIABLES LIKE 'sql_mode'"),
charset=self._connection_charset,
)
if not row:
util.warn(
"Could not retrieve SQL_MODE; please ensure the "
"MySQL user has permissions to SHOW VARIABLES"
)
self._sql_mode = ""
else:
self._sql_mode = row[1] or ""
def _detect_ansiquotes(self, connection):
"""Detect and adjust for the ANSI_QUOTES sql mode."""
mode = self._sql_mode
if not mode:
mode = ""
elif mode.isdigit():
mode_no = int(mode)
mode = (mode_no | 4 == mode_no) and "ANSI_QUOTES" or ""
self._server_ansiquotes = "ANSI_QUOTES" in mode
# as of MySQL 5.0.1
self._backslash_escapes = "NO_BACKSLASH_ESCAPES" not in mode
def _show_create_table(
self, connection, table, charset=None, full_name=None
):
"""Run SHOW CREATE TABLE for a ``Table``."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "SHOW CREATE TABLE %s" % full_name
rp = None
try:
rp = connection.execution_options(
skip_user_error_events=True
).execute(st)
except exc.DBAPIError as e:
if self._extract_error_code(e.orig) == 1146:
util.raise_(exc.NoSuchTableError(full_name), replace_context=e)
else:
raise
row = self._compat_first(rp, charset=charset)
if not row:
raise exc.NoSuchTableError(full_name)
return row[1].strip()
return sql
def _describe_table(self, connection, table, charset=None, full_name=None):
"""Run DESCRIBE for a ``Table`` and return processed rows."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "DESCRIBE %s" % full_name
rp, rows = None, None
try:
try:
rp = connection.execution_options(
skip_user_error_events=True
).execute(st)
except exc.DBAPIError as e:
code = self._extract_error_code(e.orig)
if code == 1146:
util.raise_(
exc.NoSuchTableError(full_name), replace_context=e
)
elif code == 1356:
util.raise_(
exc.UnreflectableTableError(
"Table or view named %s could not be "
"reflected: %s" % (full_name, e)
),
replace_context=e,
)
else:
raise
rows = self._compat_fetchall(rp, charset=charset)
finally:
if rp:
rp.close()
return rows
class _DecodingRowProxy(object):
"""Return unicode-decoded values based on type inspection.
Smooth over data type issues (esp. with alpha driver versions) and
normalize strings as Unicode regardless of user-configured driver
encoding settings.
"""
# Some MySQL-python versions can return some columns as
# sets.Set(['value']) (seriously) but thankfully that doesn't
# seem to come up in DDL queries.
_encoding_compat = {
"koi8r": "koi8_r",
"koi8u": "koi8_u",
"utf16": "utf-16-be", # MySQL's uft16 is always bigendian
"utf8mb4": "utf8", # real utf8
"eucjpms": "ujis",
}
def __init__(self, rowproxy, charset):
self.rowproxy = rowproxy
self.charset = self._encoding_compat.get(charset, charset)
def __getitem__(self, index):
item = self.rowproxy[index]
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, util.binary_type):
return item.decode(self.charset)
else:
return item
def __getattr__(self, attr):
item = getattr(self.rowproxy, attr)
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, util.binary_type):
return item.decode(self.charset)
else:
return item
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/cymysql.py
|
# mysql/cymysql.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql+cymysql
:name: CyMySQL
:dbapi: cymysql
:connectstring: mysql+cymysql://<username>:<password>@<host>/<dbname>[?<options>]
:url: https://github.com/nakagami/CyMySQL
.. note::
The CyMySQL dialect is **not tested as part of SQLAlchemy's continuous
integration** and may have unresolved issues. The recommended MySQL
dialects are mysqlclient and PyMySQL.
""" # noqa
from .base import BIT
from .base import MySQLDialect
from .mysqldb import MySQLDialect_mysqldb
from ... import util
class _cymysqlBIT(BIT):
def result_processor(self, dialect, coltype):
"""Convert a MySQL's 64 bit, variable length binary string to a long.
"""
def process(value):
if value is not None:
v = 0
for i in util.iterbytes(value):
v = v << 8 | i
return v
return value
return process
class MySQLDialect_cymysql(MySQLDialect_mysqldb):
driver = "cymysql"
description_encoding = None
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
supports_unicode_statements = True
colspecs = util.update_copy(MySQLDialect.colspecs, {BIT: _cymysqlBIT})
@classmethod
def dbapi(cls):
return __import__("cymysql")
def _detect_charset(self, connection):
return connection.connection.charset
def _extract_error_code(self, exception):
return exception.errno
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.OperationalError):
return self._extract_error_code(e) in (
2006,
2013,
2014,
2045,
2055,
)
elif isinstance(e, self.dbapi.InterfaceError):
# if underlying connection is closed,
# this is the error you get
return True
else:
return False
dialect = MySQLDialect_cymysql
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mssql/mxodbc.py
|
# mssql/mxodbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+mxodbc
:name: mxODBC
:dbapi: mxodbc
:connectstring: mssql+mxodbc://<username>:<password>@<dsnname>
:url: http://www.egenix.com/
Execution Modes
---------------
mxODBC features two styles of statement execution, using the
``cursor.execute()`` and ``cursor.executedirect()`` methods (the second being
an extension to the DBAPI specification). The former makes use of a particular
API call specific to the SQL Server Native Client ODBC driver known
SQLDescribeParam, while the latter does not.
mxODBC apparently only makes repeated use of a single prepared statement
when SQLDescribeParam is used. The advantage to prepared statement reuse is
one of performance. The disadvantage is that SQLDescribeParam has a limited
set of scenarios in which bind parameters are understood, including that they
cannot be placed within the argument lists of function calls, anywhere outside
the FROM, or even within subqueries within the FROM clause - making the usage
of bind parameters within SELECT statements impossible for all but the most
simplistic statements.
For this reason, the mxODBC dialect uses the "native" mode by default only for
INSERT, UPDATE, and DELETE statements, and uses the escaped string mode for
all other statements.
This behavior can be controlled via
:meth:`~sqlalchemy.sql.expression.Executable.execution_options` using the
``native_odbc_execute`` flag with a value of ``True`` or ``False``, where a
value of ``True`` will unconditionally use native bind parameters and a value
of ``False`` will unconditionally use string-escaped parameters.
"""
from .base import _MSDate
from .base import _MSDateTime
from .base import _MSTime
from .base import MSDialect
from .base import VARBINARY
from .pyodbc import _MSNumeric_pyodbc
from .pyodbc import MSExecutionContext_pyodbc
from ... import types as sqltypes
from ...connectors.mxodbc import MxODBCConnector
class _MSNumeric_mxodbc(_MSNumeric_pyodbc):
"""Include pyodbc's numeric processor.
"""
class _MSDate_mxodbc(_MSDate):
def bind_processor(self, dialect):
def process(value):
if value is not None:
return "%s-%s-%s" % (value.year, value.month, value.day)
else:
return None
return process
class _MSTime_mxodbc(_MSTime):
def bind_processor(self, dialect):
def process(value):
if value is not None:
return "%s:%s:%s" % (value.hour, value.minute, value.second)
else:
return None
return process
class _VARBINARY_mxodbc(VARBINARY):
"""
mxODBC Support for VARBINARY column types.
This handles the special case for null VARBINARY values,
which maps None values to the mx.ODBC.Manager.BinaryNull symbol.
"""
def bind_processor(self, dialect):
if dialect.dbapi is None:
return None
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
# should pull from mx.ODBC.Manager.BinaryNull
return dialect.dbapi.BinaryNull
return process
class MSExecutionContext_mxodbc(MSExecutionContext_pyodbc):
"""
The pyodbc execution context is useful for enabling
SELECT SCOPE_IDENTITY in cases where OUTPUT clause
does not work (tables with insert triggers).
"""
# todo - investigate whether the pyodbc execution context
# is really only being used in cases where OUTPUT
# won't work.
class MSDialect_mxodbc(MxODBCConnector, MSDialect):
# this is only needed if "native ODBC" mode is used,
# which is now disabled by default.
# statement_compiler = MSSQLStrictCompiler
execution_ctx_cls = MSExecutionContext_mxodbc
# flag used by _MSNumeric_mxodbc
_need_decimal_fix = True
colspecs = {
sqltypes.Numeric: _MSNumeric_mxodbc,
sqltypes.DateTime: _MSDateTime,
sqltypes.Date: _MSDate_mxodbc,
sqltypes.Time: _MSTime_mxodbc,
VARBINARY: _VARBINARY_mxodbc,
sqltypes.LargeBinary: _VARBINARY_mxodbc,
}
def __init__(self, description_encoding=None, **params):
super(MSDialect_mxodbc, self).__init__(**params)
self.description_encoding = description_encoding
dialect = MSDialect_mxodbc
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mssql/zxjdbc.py
|
# mssql/zxjdbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...]
:driverurl: http://jtds.sourceforge.net/
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
""" # noqa
from .base import MSDialect
from .base import MSExecutionContext
from ... import engine
from ...connectors.zxJDBC import ZxJDBCConnector
class MSExecutionContext_zxjdbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
super(MSExecutionContext_zxjdbc, self).pre_exec()
# scope_identity after the fact returns null in jTDS so we must
# embed it
if self._select_lastrowid and self.dialect.use_scope_identity:
self._embedded_scope_identity = True
self.statement += "; SELECT scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
while True:
try:
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error:
self.cursor.nextset()
self._lastrowid = int(row[0])
if (
self.isinsert or self.isupdate or self.isdelete
) and self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
table = self.dialect.identifier_preparer.format_table(
self.compiled.statement.table
)
self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
jdbc_db_name = "jtds:sqlserver"
jdbc_driver_name = "net.sourceforge.jtds.jdbc.Driver"
execution_ctx_cls = MSExecutionContext_zxjdbc
def _get_server_version_info(self, connection):
return tuple(
int(x) for x in connection.connection.dbversion.split(".")
)
dialect = MSDialect_zxjdbc
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mssql/__init__.py
|
# mssql/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import adodbapi # noqa
from . import base # noqa
from . import mxodbc # noqa
from . import pymssql # noqa
from . import pyodbc # noqa
from . import zxjdbc # noqa
from .base import BIGINT
from .base import BINARY
from .base import BIT
from .base import CHAR
from .base import DATE
from .base import DATETIME
from .base import DATETIME2
from .base import DATETIMEOFFSET
from .base import DECIMAL
from .base import FLOAT
from .base import IMAGE
from .base import INTEGER
from .base import MONEY
from .base import NCHAR
from .base import NTEXT
from .base import NUMERIC
from .base import NVARCHAR
from .base import REAL
from .base import ROWVERSION
from .base import SMALLDATETIME
from .base import SMALLINT
from .base import SMALLMONEY
from .base import SQL_VARIANT
from .base import TEXT
from .base import TIME
from .base import TIMESTAMP
from .base import TINYINT
from .base import try_cast
from .base import UNIQUEIDENTIFIER
from .base import VARBINARY
from .base import VARCHAR
from .base import XML
base.dialect = dialect = pyodbc.dialect
__all__ = (
"INTEGER",
"BIGINT",
"SMALLINT",
"TINYINT",
"VARCHAR",
"NVARCHAR",
"CHAR",
"NCHAR",
"TEXT",
"NTEXT",
"DECIMAL",
"NUMERIC",
"FLOAT",
"DATETIME",
"DATETIME2",
"DATETIMEOFFSET",
"DATE",
"TIME",
"SMALLDATETIME",
"BINARY",
"VARBINARY",
"BIT",
"REAL",
"IMAGE",
"TIMESTAMP",
"ROWVERSION",
"MONEY",
"SMALLMONEY",
"UNIQUEIDENTIFIER",
"SQL_VARIANT",
"XML",
"dialect",
"try_cast",
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mssql/pymssql.py
|
# mssql/pymssql.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+pymssql
:name: pymssql
:dbapi: pymssql
:connectstring: mssql+pymssql://<username>:<password>@<freetds_name>/?charset=utf8
pymssql is a Python module that provides a Python DBAPI interface around
`FreeTDS <http://www.freetds.org/>`_.
.. note::
pymssql is currently not included in SQLAlchemy's continuous integration
(CI) testing.
Modern versions of this driver worked very well with SQL Server and FreeTDS
from Linux and were highly recommended. However, pymssql is currently
unmaintained and has fallen behind the progress of the Microsoft ODBC driver in
its support for newer features of SQL Server. The latest official release of
pymssql at the time of this document is version 2.1.4 (August, 2018) and it
lacks support for:
1. table-valued parameters (TVPs),
2. ``datetimeoffset`` columns using timezone-aware ``datetime`` objects
(values are sent and retrieved as strings), and
3. encrypted connections (e.g., to Azure SQL), when pymssql is installed from
the pre-built wheels. Support for encrypted connections requires building
pymssql from source, which can be a nuisance, especially under Windows.
The above features are all supported by mssql+pyodbc when using Microsoft's
ODBC Driver for SQL Server (msodbcsql), which is now available for Windows,
(several flavors of) Linux, and macOS.
""" # noqa
import re
from .base import MSDialect
from .base import MSIdentifierPreparer
from ... import processors
from ... import types as sqltypes
from ... import util
class _MSNumeric_pymssql(sqltypes.Numeric):
def result_processor(self, dialect, type_):
if not self.asdecimal:
return processors.to_float
else:
return sqltypes.Numeric.result_processor(self, dialect, type_)
class MSIdentifierPreparer_pymssql(MSIdentifierPreparer):
def __init__(self, dialect):
super(MSIdentifierPreparer_pymssql, self).__init__(dialect)
# pymssql has the very unusual behavior that it uses pyformat
# yet does not require that percent signs be doubled
self._double_percents = False
class MSDialect_pymssql(MSDialect):
supports_native_decimal = True
driver = "pymssql"
preparer = MSIdentifierPreparer_pymssql
colspecs = util.update_copy(
MSDialect.colspecs,
{sqltypes.Numeric: _MSNumeric_pymssql, sqltypes.Float: sqltypes.Float},
)
@classmethod
def dbapi(cls):
module = __import__("pymssql")
# pymmsql < 2.1.1 doesn't have a Binary method. we use string
client_ver = tuple(int(x) for x in module.__version__.split("."))
if client_ver < (2, 1, 1):
# TODO: monkeypatching here is less than ideal
module.Binary = lambda x: x if hasattr(x, "decode") else str(x)
if client_ver < (1,):
util.warn(
"The pymssql dialect expects at least "
"the 1.0 series of the pymssql DBAPI."
)
return module
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version")
m = re.match(r"Microsoft .*? - (\d+).(\d+).(\d+).(\d+)", vers)
if m:
return tuple(int(x) for x in m.group(1, 2, 3, 4))
else:
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
opts.update(url.query)
port = opts.pop("port", None)
if port and "host" in opts:
opts["host"] = "%s:%s" % (opts["host"], port)
return [[], opts]
def is_disconnect(self, e, connection, cursor):
for msg in (
"Adaptive Server connection timed out",
"Net-Lib error during Connection reset by peer",
"message 20003", # connection timeout
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed",
"message 20006", # Write to the server failed
"message 20017", # Unexpected EOF from the server
"message 20047", # DBPROCESS is dead or not enabled
):
if msg in str(e):
return True
else:
return False
def set_isolation_level(self, connection, level):
if level == "AUTOCOMMIT":
connection.autocommit(True)
else:
connection.autocommit(False)
super(MSDialect_pymssql, self).set_isolation_level(
connection, level
)
dialect = MSDialect_pymssql
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mssql/adodbapi.py
|
# mssql/adodbapi.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+adodbapi
:name: adodbapi
:dbapi: adodbapi
:connectstring: mssql+adodbapi://<username>:<password>@<dsnname>
:url: http://adodbapi.sourceforge.net/
.. note::
The adodbapi dialect is not implemented in SQLAlchemy versions 0.6 and
above at this time.
"""
import datetime
import sys
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.dialects.mssql.base import MSDateTime
from sqlalchemy.dialects.mssql.base import MSDialect
class MSDateTime_adodbapi(MSDateTime):
def result_processor(self, dialect, coltype):
def process(value):
# adodbapi will return datetimes with empty time
# values as datetime.date() objects.
# Promote them back to full datetime.datetime()
if type(value) is datetime.date:
return datetime.datetime(value.year, value.month, value.day)
return value
return process
class MSDialect_adodbapi(MSDialect):
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_unicode = sys.maxunicode == 65535
supports_unicode_statements = True
driver = "adodbapi"
@classmethod
def import_dbapi(cls):
import adodbapi as module
return module
colspecs = util.update_copy(
MSDialect.colspecs, {sqltypes.DateTime: MSDateTime_adodbapi}
)
def create_connect_args(self, url):
def check_quote(token):
if ";" in str(token):
token = "'%s'" % token
return token
keys = dict((k, check_quote(v)) for k, v in url.query.items())
connectors = ["Provider=SQLOLEDB"]
if "port" in keys:
connectors.append(
"Data Source=%s, %s" % (keys.get("host"), keys.get("port"))
)
else:
connectors.append("Data Source=%s" % keys.get("host"))
connectors.append("Initial Catalog=%s" % keys.get("database"))
user = keys.get("user")
if user:
connectors.append("User Id=%s" % user)
connectors.append("Password=%s" % keys.get("password", ""))
else:
connectors.append("Integrated Security=SSPI")
return [[";".join(connectors)], {}]
def is_disconnect(self, e, connection, cursor):
return isinstance(
e, self.dbapi.adodbapi.DatabaseError
) and "'connection failure'" in str(e)
dialect = MSDialect_adodbapi
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mssql/provision.py
|
from ... import create_engine
from ... import exc
from ...testing.provision import create_db
from ...testing.provision import drop_db
from ...testing.provision import log
from ...testing.provision import run_reap_dbs
from ...testing.provision import update_db_opts
@update_db_opts.for_db("mssql")
def _mssql_update_db_opts(db_url, db_opts):
db_opts["legacy_schema_aliasing"] = False
@create_db.for_db("mssql")
def _mssql_create_db(cfg, eng, ident):
with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
conn.execute("create database %s" % ident)
conn.execute(
"ALTER DATABASE %s SET ALLOW_SNAPSHOT_ISOLATION ON" % ident
)
conn.execute(
"ALTER DATABASE %s SET READ_COMMITTED_SNAPSHOT ON" % ident
)
conn.execute("use %s" % ident)
conn.execute("create schema test_schema")
conn.execute("create schema test_schema_2")
@drop_db.for_db("mssql")
def _mssql_drop_db(cfg, eng, ident):
with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
_mssql_drop_ignore(conn, ident)
def _mssql_drop_ignore(conn, ident):
try:
# typically when this happens, we can't KILL the session anyway,
# so let the cleanup process drop the DBs
# for row in conn.execute(
# "select session_id from sys.dm_exec_sessions "
# "where database_id=db_id('%s')" % ident):
# log.info("killing SQL server sesssion %s", row['session_id'])
# conn.execute("kill %s" % row['session_id'])
conn.execute("drop database %s" % ident)
log.info("Reaped db: %s", ident)
return True
except exc.DatabaseError as err:
log.warning("couldn't drop db: %s", err)
return False
@run_reap_dbs.for_db("mssql")
def _reap_mssql_dbs(url, idents):
log.info("db reaper connecting to %r", url)
eng = create_engine(url)
with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
log.info("identifiers in file: %s", ", ".join(idents))
to_reap = conn.execute(
"select d.name from sys.databases as d where name "
"like 'TEST_%' and not exists (select session_id "
"from sys.dm_exec_sessions "
"where database_id=d.database_id)"
)
all_names = {dbname.lower() for (dbname,) in to_reap}
to_drop = set()
for name in all_names:
if name in idents:
to_drop.add(name)
dropped = total = 0
for total, dbname in enumerate(to_drop, 1):
if _mssql_drop_ignore(conn, dbname):
dropped += 1
log.info(
"Dropped %d out of %d stale databases detected", dropped, total
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mssql/information_schema.py
|
# mssql/information_schema.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# TODO: should be using the sys. catalog with SQL Server, not information
# schema
from ... import cast
from ... import Column
from ... import MetaData
from ... import Table
from ... import util
from ...ext.compiler import compiles
from ...sql import expression
from ...types import Boolean
from ...types import Integer
from ...types import String
from ...types import TypeDecorator
from ...types import Unicode
ischema = MetaData()
class CoerceUnicode(TypeDecorator):
impl = Unicode
def process_bind_param(self, value, dialect):
if util.py2k and isinstance(value, util.binary_type):
value = value.decode(dialect.encoding)
return value
def bind_expression(self, bindvalue):
return _cast_on_2005(bindvalue)
class _cast_on_2005(expression.ColumnElement):
def __init__(self, bindvalue):
self.bindvalue = bindvalue
@compiles(_cast_on_2005)
def _compile(element, compiler, **kw):
from . import base
if (
compiler.dialect.server_version_info is None
or compiler.dialect.server_version_info < base.MS_2005_VERSION
):
return compiler.process(element.bindvalue, **kw)
else:
return compiler.process(cast(element.bindvalue, Unicode), **kw)
schemata = Table(
"SCHEMATA",
ischema,
Column("CATALOG_NAME", CoerceUnicode, key="catalog_name"),
Column("SCHEMA_NAME", CoerceUnicode, key="schema_name"),
Column("SCHEMA_OWNER", CoerceUnicode, key="schema_owner"),
schema="INFORMATION_SCHEMA",
)
tables = Table(
"TABLES",
ischema,
Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("TABLE_TYPE", CoerceUnicode, key="table_type"),
schema="INFORMATION_SCHEMA",
)
columns = Table(
"COLUMNS",
ischema,
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
Column("IS_NULLABLE", Integer, key="is_nullable"),
Column("DATA_TYPE", String, key="data_type"),
Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
Column(
"CHARACTER_MAXIMUM_LENGTH", Integer, key="character_maximum_length"
),
Column("NUMERIC_PRECISION", Integer, key="numeric_precision"),
Column("NUMERIC_SCALE", Integer, key="numeric_scale"),
Column("COLUMN_DEFAULT", Integer, key="column_default"),
Column("COLLATION_NAME", String, key="collation_name"),
schema="INFORMATION_SCHEMA",
)
constraints = Table(
"TABLE_CONSTRAINTS",
ischema,
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
Column("CONSTRAINT_TYPE", CoerceUnicode, key="constraint_type"),
schema="INFORMATION_SCHEMA",
)
column_constraints = Table(
"CONSTRAINT_COLUMN_USAGE",
ischema,
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
schema="INFORMATION_SCHEMA",
)
key_constraints = Table(
"KEY_COLUMN_USAGE",
ischema,
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
Column("CONSTRAINT_SCHEMA", CoerceUnicode, key="constraint_schema"),
Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
schema="INFORMATION_SCHEMA",
)
ref_constraints = Table(
"REFERENTIAL_CONSTRAINTS",
ischema,
Column("CONSTRAINT_CATALOG", CoerceUnicode, key="constraint_catalog"),
Column("CONSTRAINT_SCHEMA", CoerceUnicode, key="constraint_schema"),
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
# TODO: is CATLOG misspelled ?
Column(
"UNIQUE_CONSTRAINT_CATLOG",
CoerceUnicode,
key="unique_constraint_catalog",
),
Column(
"UNIQUE_CONSTRAINT_SCHEMA",
CoerceUnicode,
key="unique_constraint_schema",
),
Column(
"UNIQUE_CONSTRAINT_NAME", CoerceUnicode, key="unique_constraint_name"
),
Column("MATCH_OPTION", String, key="match_option"),
Column("UPDATE_RULE", String, key="update_rule"),
Column("DELETE_RULE", String, key="delete_rule"),
schema="INFORMATION_SCHEMA",
)
views = Table(
"VIEWS",
ischema,
Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("VIEW_DEFINITION", CoerceUnicode, key="view_definition"),
Column("CHECK_OPTION", String, key="check_option"),
Column("IS_UPDATABLE", String, key="is_updatable"),
schema="INFORMATION_SCHEMA",
)
computed_columns = Table(
"computed_columns",
ischema,
Column("object_id", Integer),
Column("name", CoerceUnicode),
Column("is_computed", Boolean),
Column("is_persisted", Boolean),
Column("definition", CoerceUnicode),
schema="sys",
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mssql/pyodbc.py
|
# mssql/pyodbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mssql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mssql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
Connecting to PyODBC
--------------------
The URL here is to be translated to PyODBC connection strings, as
detailed in `ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_.
DSN Connections
^^^^^^^^^^^^^^^
A DSN connection in ODBC means that a pre-existing ODBC datasource is
configured on the client machine. The application then specifies the name
of this datasource, which encompasses details such as the specific ODBC driver
in use as well as the network address of the database. Assuming a datasource
is configured on the client, a basic DSN-based connection looks like::
engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn")
Which above, will pass the following connection string to PyODBC::
dsn=mydsn;UID=user;PWD=pass
If the username and password are omitted, the DSN form will also add
the ``Trusted_Connection=yes`` directive to the ODBC string.
Hostname Connections
^^^^^^^^^^^^^^^^^^^^
Hostname-based connections are also supported by pyodbc. These are often
easier to use than a DSN and have the additional advantage that the specific
database name to connect towards may be specified locally in the URL, rather
than it being fixed as part of a datasource configuration.
When using a hostname connection, the driver name must also be specified in the
query parameters of the URL. As these names usually have spaces in them, the
name must be URL encoded which means using plus signs for spaces::
engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0")
Other keywords interpreted by the Pyodbc dialect to be passed to
``pyodbc.connect()`` in both the DSN and hostname cases include:
``odbc_autotranslate``, ``ansi``, ``unicode_results``, ``autocommit``.
Note that in order for the dialect to recognize these keywords
(including the ``driver`` keyword above) they must be all lowercase.
Pass through exact Pyodbc string
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A PyODBC connection string can also be sent in pyodbc's format directly, as
specified in `ConnectionStrings
<https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_ into the driver
using the parameter ``odbc_connect``. The delimeters must be URL encoded, as
illustrated below using ``urllib.parse.quote_plus``::
import urllib
params = urllib.parse.quote_plus("DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password")
engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
Driver / Unicode Support
-------------------------
PyODBC works best with Microsoft ODBC drivers, particularly in the area
of Unicode support on both Python 2 and Python 3.
Using the FreeTDS ODBC drivers on Linux or OSX with PyODBC is **not**
recommended; there have been historically many Unicode-related issues
in this area, including before Microsoft offered ODBC drivers for Linux
and OSX. Now that Microsoft offers drivers for all platforms, for
PyODBC support these are recommended. FreeTDS remains relevant for
non-ODBC drivers such as pymssql where it works very well.
Rowcount Support
----------------
Pyodbc only has partial support for rowcount. See the notes at
:ref:`mssql_rowcount_versioning` for important notes when using ORM
versioning.
.. _mssql_pyodbc_fastexecutemany:
Fast Executemany Mode
---------------------
The Pyodbc driver has added support for a "fast executemany" mode of execution
which greatly reduces round trips for a DBAPI ``executemany()`` call when using
Microsoft ODBC drivers. The feature is enabled by setting the flag
``.fast_executemany`` on the DBAPI cursor when an executemany call is to be
used. The SQLAlchemy pyodbc SQL Server dialect supports setting this flag
automatically when the ``.fast_executemany`` flag is passed to
:func:`_sa.create_engine`
; note that the ODBC driver must be the Microsoft driver
in order to use this flag::
engine = create_engine(
"mssql+pyodbc://scott:tiger@mssql2017:1433/test?driver=ODBC+Driver+13+for+SQL+Server",
fast_executemany=True)
.. versionadded:: 1.3
.. seealso::
`fast executemany <https://github.com/mkleehammer/pyodbc/wiki/Features-beyond-the-DB-API#fast_executemany>`_
- on github
""" # noqa
import datetime
import decimal
import re
import struct
from .base import BINARY
from .base import DATETIMEOFFSET
from .base import MSDialect
from .base import MSExecutionContext
from .base import VARBINARY
from ... import exc
from ... import types as sqltypes
from ... import util
from ...connectors.pyodbc import PyODBCConnector
class _ms_numeric_pyodbc(object):
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
The routines here are needed for older pyodbc versions
as well as current mxODBC versions.
"""
def bind_processor(self, dialect):
super_process = super(_ms_numeric_pyodbc, self).bind_processor(dialect)
if not dialect._need_decimal_fix:
return super_process
def process(value):
if self.asdecimal and isinstance(value, decimal.Decimal):
adjusted = value.adjusted()
if adjusted < 0:
return self._small_dec_to_string(value)
elif adjusted > 7:
return self._large_dec_to_string(value)
if super_process:
return super_process(value)
else:
return value
return process
# these routines needed for older versions of pyodbc.
# as of 2.1.8 this logic is integrated.
def _small_dec_to_string(self, value):
return "%s0.%s%s" % (
(value < 0 and "-" or ""),
"0" * (abs(value.adjusted()) - 1),
"".join([str(nint) for nint in value.as_tuple()[1]]),
)
def _large_dec_to_string(self, value):
_int = value.as_tuple()[1]
if "E" in str(value):
result = "%s%s%s" % (
(value < 0 and "-" or ""),
"".join([str(s) for s in _int]),
"0" * (value.adjusted() - (len(_int) - 1)),
)
else:
if (len(_int) - 1) > value.adjusted():
result = "%s%s.%s" % (
(value < 0 and "-" or ""),
"".join([str(s) for s in _int][0 : value.adjusted() + 1]),
"".join([str(s) for s in _int][value.adjusted() + 1 :]),
)
else:
result = "%s%s" % (
(value < 0 and "-" or ""),
"".join([str(s) for s in _int][0 : value.adjusted() + 1]),
)
return result
class _MSNumeric_pyodbc(_ms_numeric_pyodbc, sqltypes.Numeric):
pass
class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float):
pass
class _ms_binary_pyodbc(object):
"""Wraps binary values in dialect-specific Binary wrapper.
If the value is null, return a pyodbc-specific BinaryNull
object to prevent pyODBC [and FreeTDS] from defaulting binary
NULL types to SQLWCHAR and causing implicit conversion errors.
"""
def bind_processor(self, dialect):
if dialect.dbapi is None:
return None
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
# pyodbc-specific
return dialect.dbapi.BinaryNull
return process
class _ODBCDateTimeOffset(DATETIMEOFFSET):
def bind_processor(self, dialect):
def process(value):
if value is None:
return None
elif isinstance(value, util.string_types):
# if a string was passed directly, allow it through
return value
else:
# Convert to string format required by T-SQL
dto_string = value.strftime("%Y-%m-%d %H:%M:%S.%f %z")
# offset needs a colon, e.g., -0700 -> -07:00
# "UTC offset in the form (+-)HHMM[SS[.ffffff]]"
# backend currently rejects seconds / fractional seconds
dto_string = re.sub(
r"([\+\-]\d{2})([\d\.]+)$", r"\1:\2", dto_string
)
return dto_string
return process
class _VARBINARY_pyodbc(_ms_binary_pyodbc, VARBINARY):
pass
class _BINARY_pyodbc(_ms_binary_pyodbc, BINARY):
pass
class MSExecutionContext_pyodbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
"""where appropriate, issue "select scope_identity()" in the same
statement.
Background on why "scope_identity()" is preferable to "@@identity":
http://msdn.microsoft.com/en-us/library/ms190315.aspx
Background on why we attempt to embed "scope_identity()" into the same
statement as the INSERT:
http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
"""
super(MSExecutionContext_pyodbc, self).pre_exec()
# don't embed the scope_identity select into an
# "INSERT .. DEFAULT VALUES"
if (
self._select_lastrowid
and self.dialect.use_scope_identity
and len(self.parameters[0])
):
self._embedded_scope_identity = True
self.statement += "; select scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
# Fetch the last inserted id from the manipulated statement
# We may have to skip over a number of result sets with
# no data (due to triggers, etc.)
while True:
try:
# fetchall() ensures the cursor is consumed
# without closing it (FreeTDS particularly)
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error:
# no way around this - nextset() consumes the previous set
# so we need to just keep flipping
self.cursor.nextset()
self._lastrowid = int(row[0])
else:
super(MSExecutionContext_pyodbc, self).post_exec()
class MSDialect_pyodbc(PyODBCConnector, MSDialect):
# mssql still has problems with this on Linux
supports_sane_rowcount_returning = False
execution_ctx_cls = MSExecutionContext_pyodbc
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric: _MSNumeric_pyodbc,
sqltypes.Float: _MSFloat_pyodbc,
BINARY: _BINARY_pyodbc,
DATETIMEOFFSET: _ODBCDateTimeOffset,
# SQL Server dialect has a VARBINARY that is just to support
# "deprecate_large_types" w/ VARBINARY(max), but also we must
# handle the usual SQL standard VARBINARY
VARBINARY: _VARBINARY_pyodbc,
sqltypes.VARBINARY: _VARBINARY_pyodbc,
sqltypes.LargeBinary: _VARBINARY_pyodbc,
},
)
def __init__(
self, description_encoding=None, fast_executemany=False, **params
):
if "description_encoding" in params:
self.description_encoding = params.pop("description_encoding")
super(MSDialect_pyodbc, self).__init__(**params)
self.use_scope_identity = (
self.use_scope_identity
and self.dbapi
and hasattr(self.dbapi.Cursor, "nextset")
)
self._need_decimal_fix = self.dbapi and self._dbapi_version() < (
2,
1,
8,
)
self.fast_executemany = fast_executemany
def _get_server_version_info(self, connection):
try:
# "Version of the instance of SQL Server, in the form
# of 'major.minor.build.revision'"
raw = connection.scalar(
"SELECT CAST(SERVERPROPERTY('ProductVersion') AS VARCHAR)"
)
except exc.DBAPIError:
# SQL Server docs indicate this function isn't present prior to
# 2008. Before we had the VARCHAR cast above, pyodbc would also
# fail on this query.
return super(MSDialect_pyodbc, self)._get_server_version_info(
connection, allow_chars=False
)
else:
version = []
r = re.compile(r"[.\-]")
for n in r.split(raw):
try:
version.append(int(n))
except ValueError:
pass
return tuple(version)
def on_connect(self):
super_ = super(MSDialect_pyodbc, self).on_connect()
def on_connect(conn):
if super_ is not None:
super_(conn)
self._setup_timestampoffset_type(conn)
return on_connect
def _setup_timestampoffset_type(self, connection):
# output converter function for datetimeoffset
def _handle_datetimeoffset(dto_value):
tup = struct.unpack("<6hI2h", dto_value)
return datetime.datetime(
tup[0],
tup[1],
tup[2],
tup[3],
tup[4],
tup[5],
tup[6] // 1000,
util.timezone(
datetime.timedelta(hours=tup[7], minutes=tup[8])
),
)
odbc_SQL_SS_TIMESTAMPOFFSET = -155 # as defined in SQLNCLI.h
connection.add_output_converter(
odbc_SQL_SS_TIMESTAMPOFFSET, _handle_datetimeoffset
)
def do_executemany(self, cursor, statement, parameters, context=None):
if self.fast_executemany:
cursor.fast_executemany = True
super(MSDialect_pyodbc, self).do_executemany(
cursor, statement, parameters, context=context
)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
code = e.args[0]
if code in (
"08S01",
"01002",
"08003",
"08007",
"08S02",
"08001",
"HYT00",
"HY010",
"10054",
):
return True
return super(MSDialect_pyodbc, self).is_disconnect(
e, connection, cursor
)
dialect = MSDialect_pyodbc
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mssql/base.py
|
# mssql/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql
:name: Microsoft SQL Server
.. _mssql_identity:
Auto Increment Behavior / IDENTITY Columns
------------------------------------------
SQL Server provides so-called "auto incrementing" behavior using the
``IDENTITY`` construct, which can be placed on any single integer column in a
table. SQLAlchemy considers ``IDENTITY`` within its default "autoincrement"
behavior for an integer primary key column, described at
:paramref:`_schema.Column.autoincrement`. This means that by default,
the first
integer primary key column in a :class:`_schema.Table`
will be considered to be the
identity column and will generate DDL as such::
from sqlalchemy import Table, MetaData, Column, Integer
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
The above example will generate DDL as:
.. sourcecode:: sql
CREATE TABLE t (
id INTEGER NOT NULL IDENTITY(1,1),
x INTEGER NULL,
PRIMARY KEY (id)
)
For the case where this default generation of ``IDENTITY`` is not desired,
specify ``False`` for the :paramref:`_schema.Column.autoincrement` flag,
on the first integer primary key column::
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer))
m.create_all(engine)
To add the ``IDENTITY`` keyword to a non-primary key column, specify
``True`` for the :paramref:`_schema.Column.autoincrement` flag on the desired
:class:`_schema.Column` object, and ensure that
:paramref:`_schema.Column.autoincrement`
is set to ``False`` on any integer primary key column::
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer, autoincrement=True))
m.create_all(engine)
.. versionchanged:: 1.3 Added ``mssql_identity_start`` and
``mssql_identity_increment`` parameters to :class:`_schema.Column`.
These replace
the use of the :class:`.Sequence` object in order to specify these values.
.. deprecated:: 1.3
The use of :class:`.Sequence` to specify IDENTITY characteristics is
deprecated and will be removed in a future release. Please use
the ``mssql_identity_start`` and ``mssql_identity_increment`` parameters
documented at :ref:`mssql_identity`.
.. note::
There can only be one IDENTITY column on the table. When using
``autoincrement=True`` to enable the IDENTITY keyword, SQLAlchemy does not
guard against multiple columns specifying the option simultaneously. The
SQL Server database will instead reject the ``CREATE TABLE`` statement.
.. note::
An INSERT statement which attempts to provide a value for a column that is
marked with IDENTITY will be rejected by SQL Server. In order for the
value to be accepted, a session-level option "SET IDENTITY_INSERT" must be
enabled. The SQLAlchemy SQL Server dialect will perform this operation
automatically when using a core :class:`_expression.Insert`
construct; if the
execution specifies a value for the IDENTITY column, the "IDENTITY_INSERT"
option will be enabled for the span of that statement's invocation.However,
this scenario is not high performing and should not be relied upon for
normal use. If a table doesn't actually require IDENTITY behavior in its
integer primary key column, the keyword should be disabled when creating
the table by ensuring that ``autoincrement=False`` is set.
Controlling "Start" and "Increment"
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specific control over the "start" and "increment" values for
the ``IDENTITY`` generator are provided using the
``mssql_identity_start`` and ``mssql_identity_increment`` parameters
passed to the :class:`_schema.Column` object::
from sqlalchemy import Table, Integer, Column
test = Table(
'test', metadata,
Column(
'id', Integer, primary_key=True, mssql_identity_start=100,
mssql_identity_increment=10
),
Column('name', String(20))
)
The CREATE TABLE for the above :class:`_schema.Table` object would be:
.. sourcecode:: sql
CREATE TABLE test (
id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
name VARCHAR(20) NULL,
)
.. versionchanged:: 1.3 The ``mssql_identity_start`` and
``mssql_identity_increment`` parameters are now used to affect the
``IDENTITY`` generator for a :class:`_schema.Column` under SQL Server.
Previously, the :class:`.Sequence` object was used. As SQL Server now
supports real sequences as a separate construct, :class:`.Sequence` will be
functional in the normal way in a future SQLAlchemy version.
INSERT behavior
^^^^^^^^^^^^^^^^
Handling of the ``IDENTITY`` column at INSERT time involves two key
techniques. The most common is being able to fetch the "last inserted value"
for a given ``IDENTITY`` column, a process which SQLAlchemy performs
implicitly in many cases, most importantly within the ORM.
The process for fetching this value has several variants:
* In the vast majority of cases, RETURNING is used in conjunction with INSERT
statements on SQL Server in order to get newly generated primary key values:
.. sourcecode:: sql
INSERT INTO t (x) OUTPUT inserted.id VALUES (?)
* When RETURNING is not available or has been disabled via
``implicit_returning=False``, either the ``scope_identity()`` function or
the ``@@identity`` variable is used; behavior varies by backend:
* when using PyODBC, the phrase ``; select scope_identity()`` will be
appended to the end of the INSERT statement; a second result set will be
fetched in order to receive the value. Given a table as::
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer),
implicit_returning=False)
an INSERT will look like:
.. sourcecode:: sql
INSERT INTO t (x) VALUES (?); select scope_identity()
* Other dialects such as pymssql will call upon
``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT
statement. If the flag ``use_scope_identity=False`` is passed to
:func:`_sa.create_engine`,
the statement ``SELECT @@identity AS lastrowid``
is used instead.
A table that contains an ``IDENTITY`` column will prohibit an INSERT statement
that refers to the identity column explicitly. The SQLAlchemy dialect will
detect when an INSERT construct, created using a core
:func:`_expression.insert`
construct (not a plain string SQL), refers to the identity column, and
in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert
statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
execution. Given this example::
m = MetaData()
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
with engine.begin() as conn:
conn.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
The above column will be created with IDENTITY, however the INSERT statement
we emit is specifying explicit values. In the echo output we can see
how SQLAlchemy handles this:
.. sourcecode:: sql
CREATE TABLE t (
id INTEGER NOT NULL IDENTITY(1,1),
x INTEGER NULL,
PRIMARY KEY (id)
)
COMMIT
SET IDENTITY_INSERT t ON
INSERT INTO t (id, x) VALUES (?, ?)
((1, 1), (2, 2))
SET IDENTITY_INSERT t OFF
COMMIT
This
is an auxiliary use case suitable for testing and bulk insert scenarios.
MAX on VARCHAR / NVARCHAR
-------------------------
SQL Server supports the special string "MAX" within the
:class:`_types.VARCHAR` and :class:`_types.NVARCHAR` datatypes,
to indicate "maximum length possible". The dialect currently handles this as
a length of "None" in the base type, rather than supplying a
dialect-specific version of these types, so that a base type
specified such as ``VARCHAR(None)`` can assume "unlengthed" behavior on
more than one backend without using dialect-specific types.
To build a SQL Server VARCHAR or NVARCHAR with MAX length, use None::
my_table = Table(
'my_table', metadata,
Column('my_data', VARCHAR(None)),
Column('my_n_data', NVARCHAR(None))
)
Collation Support
-----------------
Character collations are supported by the base string types,
specified by the string argument "collation"::
from sqlalchemy import VARCHAR
Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
When such a column is associated with a :class:`_schema.Table`, the
CREATE TABLE statement for this column will yield::
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
LIMIT/OFFSET Support
--------------------
MSSQL has no support for the LIMIT or OFFSET keywords. LIMIT is
supported directly through the ``TOP`` Transact SQL keyword::
select.limit
will yield::
SELECT TOP n
If using SQL Server 2005 or above, LIMIT with OFFSET
support is available through the ``ROW_NUMBER OVER`` construct.
For versions below 2005, LIMIT with OFFSET usage will fail.
.. _mssql_isolation_level:
Transaction Isolation Level
---------------------------
All SQL Server dialects support setting of transaction isolation level
both via a dialect-specific parameter
:paramref:`_sa.create_engine.isolation_level`
accepted by :func:`_sa.create_engine`,
as well as the :paramref:`.Connection.execution_options.isolation_level`
argument as passed to
:meth:`_engine.Connection.execution_options`.
This feature works by issuing the
command ``SET TRANSACTION ISOLATION LEVEL <level>`` for
each new connection.
To set isolation level using :func:`_sa.create_engine`::
engine = create_engine(
"mssql+pyodbc://scott:tiger@ms_2008",
isolation_level="REPEATABLE READ"
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
Valid values for ``isolation_level`` include:
* ``AUTOCOMMIT`` - pyodbc / pymssql-specific
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``SNAPSHOT`` - specific to SQL Server
.. versionadded:: 1.1 support for isolation level setting on Microsoft
SQL Server.
.. versionadded:: 1.2 added AUTOCOMMIT isolation level setting
Nullability
-----------
MSSQL has support for three levels of column nullability. The default
nullability allows nulls and is explicit in the CREATE TABLE
construct::
name VARCHAR(20) NULL
If ``nullable=None`` is specified then no specification is made. In
other words the database's configured default is used. This will
render::
name VARCHAR(20)
If ``nullable`` is ``True`` or ``False`` then the column will be
``NULL`` or ``NOT NULL`` respectively.
Date / Time Handling
--------------------
DATE and TIME are supported. Bind parameters are converted
to datetime.datetime() objects as required by most MSSQL drivers,
and results are processed from strings if needed.
The DATE and TIME types are not available for MSSQL 2005 and
previous - if a server version below 2008 is detected, DDL
for these types will be issued as DATETIME.
.. _mssql_large_type_deprecation:
Large Text/Binary Type Deprecation
----------------------------------
Per
`SQL Server 2012/2014 Documentation <http://technet.microsoft.com/en-us/library/ms187993.aspx>`_,
the ``NTEXT``, ``TEXT`` and ``IMAGE`` datatypes are to be removed from SQL
Server in a future release. SQLAlchemy normally relates these types to the
:class:`.UnicodeText`, :class:`_expression.TextClause` and
:class:`.LargeBinary` datatypes.
In order to accommodate this change, a new flag ``deprecate_large_types``
is added to the dialect, which will be automatically set based on detection
of the server version in use, if not otherwise set by the user. The
behavior of this flag is as follows:
* When this flag is ``True``, the :class:`.UnicodeText`,
:class:`_expression.TextClause` and
:class:`.LargeBinary` datatypes, when used to render DDL, will render the
types ``NVARCHAR(max)``, ``VARCHAR(max)``, and ``VARBINARY(max)``,
respectively. This is a new behavior as of the addition of this flag.
* When this flag is ``False``, the :class:`.UnicodeText`,
:class:`_expression.TextClause` and
:class:`.LargeBinary` datatypes, when used to render DDL, will render the
types ``NTEXT``, ``TEXT``, and ``IMAGE``,
respectively. This is the long-standing behavior of these types.
* The flag begins with the value ``None``, before a database connection is
established. If the dialect is used to render DDL without the flag being
set, it is interpreted the same as ``False``.
* On first connection, the dialect detects if SQL Server version 2012 or
greater is in use; if the flag is still at ``None``, it sets it to ``True``
or ``False`` based on whether 2012 or greater is detected.
* The flag can be set to either ``True`` or ``False`` when the dialect
is created, typically via :func:`_sa.create_engine`::
eng = create_engine("mssql+pymssql://user:pass@host/db",
deprecate_large_types=True)
* Complete control over whether the "old" or "new" types are rendered is
available in all SQLAlchemy versions by using the UPPERCASE type objects
instead: :class:`_types.NVARCHAR`, :class:`_types.VARCHAR`,
:class:`_types.VARBINARY`, :class:`_types.TEXT`, :class:`_mssql.NTEXT`,
:class:`_mssql.IMAGE`
will always remain fixed and always output exactly that
type.
.. versionadded:: 1.0.0
.. _multipart_schema_names:
Multipart Schema Names
----------------------
SQL Server schemas sometimes require multiple parts to their "schema"
qualifier, that is, including the database name and owner name as separate
tokens, such as ``mydatabase.dbo.some_table``. These multipart names can be set
at once using the :paramref:`_schema.Table.schema` argument of
:class:`_schema.Table`::
Table(
"some_table", metadata,
Column("q", String(50)),
schema="mydatabase.dbo"
)
When performing operations such as table or component reflection, a schema
argument that contains a dot will be split into separate
"database" and "owner" components in order to correctly query the SQL
Server information schema tables, as these two values are stored separately.
Additionally, when rendering the schema name for DDL or SQL, the two
components will be quoted separately for case sensitive names and other
special characters. Given an argument as below::
Table(
"some_table", metadata,
Column("q", String(50)),
schema="MyDataBase.dbo"
)
The above schema would be rendered as ``[MyDataBase].dbo``, and also in
reflection, would be reflected using "dbo" as the owner and "MyDataBase"
as the database name.
To control how the schema name is broken into database / owner,
specify brackets (which in SQL Server are quoting characters) in the name.
Below, the "owner" will be considered as ``MyDataBase.dbo`` and the
"database" will be None::
Table(
"some_table", metadata,
Column("q", String(50)),
schema="[MyDataBase.dbo]"
)
To individually specify both database and owner name with special characters
or embedded dots, use two sets of brackets::
Table(
"some_table", metadata,
Column("q", String(50)),
schema="[MyDataBase.Period].[MyOwner.Dot]"
)
.. versionchanged:: 1.2 the SQL Server dialect now treats brackets as
identifier delimeters splitting the schema into separate database
and owner tokens, to allow dots within either name itself.
.. _legacy_schema_rendering:
Legacy Schema Mode
------------------
Very old versions of the MSSQL dialect introduced the behavior such that a
schema-qualified table would be auto-aliased when used in a
SELECT statement; given a table::
account_table = Table(
'account', metadata,
Column('id', Integer, primary_key=True),
Column('info', String(100)),
schema="customer_schema"
)
this legacy mode of rendering would assume that "customer_schema.account"
would not be accepted by all parts of the SQL statement, as illustrated
below::
>>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=True)
>>> print(account_table.select().compile(eng))
SELECT account_1.id, account_1.info
FROM customer_schema.account AS account_1
This mode of behavior is now off by default, as it appears to have served
no purpose; however in the case that legacy applications rely upon it,
it is available using the ``legacy_schema_aliasing`` argument to
:func:`_sa.create_engine` as illustrated above.
.. versionchanged:: 1.1 the ``legacy_schema_aliasing`` flag introduced
in version 1.0.5 to allow disabling of legacy mode for schemas now
defaults to False.
.. _mssql_indexes:
Clustered Index Support
-----------------------
The MSSQL dialect supports clustered indexes (and primary keys) via the
``mssql_clustered`` option. This option is available to :class:`.Index`,
:class:`.UniqueConstraint`. and :class:`.PrimaryKeyConstraint`.
To generate a clustered index::
Index("my_index", table.c.x, mssql_clustered=True)
which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
To generate a clustered primary key use::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
which will render the table, for example, as::
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY CLUSTERED (x, y))
Similarly, we can generate a clustered unique constraint using::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True),
)
To explicitly request a non-clustered primary key (for example, when
a separate clustered index is desired), use::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=False))
which will render the table, for example, as::
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY NONCLUSTERED (x, y))
.. versionchanged:: 1.1 the ``mssql_clustered`` option now defaults
to None, rather than False. ``mssql_clustered=False`` now explicitly
renders the NONCLUSTERED clause, whereas None omits the CLUSTERED
clause entirely, allowing SQL Server defaults to take effect.
MSSQL-Specific Index Options
-----------------------------
In addition to clustering, the MSSQL dialect supports other special options
for :class:`.Index`.
INCLUDE
^^^^^^^
The ``mssql_include`` option renders INCLUDE(colname) for the given string
names::
Index("my_index", table.c.x, mssql_include=['y'])
would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
.. _mssql_index_where:
Filtered Indexes
^^^^^^^^^^^^^^^^
The ``mssql_where`` option renders WHERE(condition) for the given string
names::
Index("my_index", table.c.x, mssql_where=table.c.x > 10)
would render the index as ``CREATE INDEX my_index ON table (x) WHERE x > 10``.
.. versionadded:: 1.3.4
Index ordering
^^^^^^^^^^^^^^
Index ordering is available via functional expressions, such as::
Index("my_index", table.c.x.desc())
would render the index as ``CREATE INDEX my_index ON table (x DESC)``
.. seealso::
:ref:`schema_indexes_functional`
Compatibility Levels
--------------------
MSSQL supports the notion of setting compatibility levels at the
database level. This allows, for instance, to run a database that
is compatible with SQL2000 while running on a SQL2005 database
server. ``server_version_info`` will always return the database
server version information (in this case SQL2005) and not the
compatibility level information. Because of this, if running under
a backwards compatibility mode SQLAlchemy may attempt to use T-SQL
statements that are unable to be parsed by the database server.
Triggers
--------
SQLAlchemy by default uses OUTPUT INSERTED to get at newly
generated primary key values via IDENTITY columns or other
server side defaults. MS-SQL does not
allow the usage of OUTPUT INSERTED on tables that have triggers.
To disable the usage of OUTPUT INSERTED on a per-table basis,
specify ``implicit_returning=False`` for each :class:`_schema.Table`
which has triggers::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
# ...,
implicit_returning=False
)
Declarative form::
class MyClass(Base):
# ...
__table_args__ = {'implicit_returning':False}
This option can also be specified engine-wide using the
``implicit_returning=False`` argument on :func:`_sa.create_engine`.
.. _mssql_rowcount_versioning:
Rowcount Support / ORM Versioning
---------------------------------
The SQL Server drivers may have limited ability to return the number
of rows updated from an UPDATE or DELETE statement.
As of this writing, the PyODBC driver is not able to return a rowcount when
OUTPUT INSERTED is used. This impacts the SQLAlchemy ORM's versioning feature
in many cases where server-side value generators are in use in that while the
versioning operations can succeed, the ORM cannot always check that an UPDATE
or DELETE statement matched the number of rows expected, which is how it
verifies that the version identifier matched. When this condition occurs, a
warning will be emitted but the operation will proceed.
The use of OUTPUT INSERTED can be disabled by setting the
:paramref:`_schema.Table.implicit_returning` flag to ``False`` on a particular
:class:`_schema.Table`, which in declarative looks like::
class MyTable(Base):
__tablename__ = 'mytable'
id = Column(Integer, primary_key=True)
stuff = Column(String(10))
timestamp = Column(TIMESTAMP(), default=text('DEFAULT'))
__mapper_args__ = {
'version_id_col': timestamp,
'version_id_generator': False,
}
__table_args__ = {
'implicit_returning': False
}
Enabling Snapshot Isolation
---------------------------
SQL Server has a default transaction
isolation mode that locks entire tables, and causes even mildly concurrent
applications to have long held locks and frequent deadlocks.
Enabling snapshot isolation for the database as a whole is recommended
for modern levels of concurrency support. This is accomplished via the
following ALTER DATABASE commands executed at the SQL prompt::
ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON
Background on SQL Server snapshot isolation is available at
http://msdn.microsoft.com/en-us/library/ms175095.aspx.
""" # noqa
import codecs
import datetime
import operator
import re
from . import information_schema as ischema
from ... import engine
from ... import exc
from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
from ...engine import default
from ...engine import reflection
from ...sql import compiler
from ...sql import expression
from ...sql import func
from ...sql import quoted_name
from ...sql import util as sql_util
from ...types import BIGINT
from ...types import BINARY
from ...types import CHAR
from ...types import DATE
from ...types import DATETIME
from ...types import DECIMAL
from ...types import FLOAT
from ...types import INTEGER
from ...types import NCHAR
from ...types import NUMERIC
from ...types import NVARCHAR
from ...types import SMALLINT
from ...types import TEXT
from ...types import VARCHAR
from ...util import update_wrapper
from ...util.langhelpers import public_factory
# http://sqlserverbuilds.blogspot.com/
MS_2016_VERSION = (13,)
MS_2014_VERSION = (12,)
MS_2012_VERSION = (11,)
MS_2008_VERSION = (10,)
MS_2005_VERSION = (9,)
MS_2000_VERSION = (8,)
RESERVED_WORDS = set(
[
"add",
"all",
"alter",
"and",
"any",
"as",
"asc",
"authorization",
"backup",
"begin",
"between",
"break",
"browse",
"bulk",
"by",
"cascade",
"case",
"check",
"checkpoint",
"close",
"clustered",
"coalesce",
"collate",
"column",
"commit",
"compute",
"constraint",
"contains",
"containstable",
"continue",
"convert",
"create",
"cross",
"current",
"current_date",
"current_time",
"current_timestamp",
"current_user",
"cursor",
"database",
"dbcc",
"deallocate",
"declare",
"default",
"delete",
"deny",
"desc",
"disk",
"distinct",
"distributed",
"double",
"drop",
"dump",
"else",
"end",
"errlvl",
"escape",
"except",
"exec",
"execute",
"exists",
"exit",
"external",
"fetch",
"file",
"fillfactor",
"for",
"foreign",
"freetext",
"freetexttable",
"from",
"full",
"function",
"goto",
"grant",
"group",
"having",
"holdlock",
"identity",
"identity_insert",
"identitycol",
"if",
"in",
"index",
"inner",
"insert",
"intersect",
"into",
"is",
"join",
"key",
"kill",
"left",
"like",
"lineno",
"load",
"merge",
"national",
"nocheck",
"nonclustered",
"not",
"null",
"nullif",
"of",
"off",
"offsets",
"on",
"open",
"opendatasource",
"openquery",
"openrowset",
"openxml",
"option",
"or",
"order",
"outer",
"over",
"percent",
"pivot",
"plan",
"precision",
"primary",
"print",
"proc",
"procedure",
"public",
"raiserror",
"read",
"readtext",
"reconfigure",
"references",
"replication",
"restore",
"restrict",
"return",
"revert",
"revoke",
"right",
"rollback",
"rowcount",
"rowguidcol",
"rule",
"save",
"schema",
"securityaudit",
"select",
"session_user",
"set",
"setuser",
"shutdown",
"some",
"statistics",
"system_user",
"table",
"tablesample",
"textsize",
"then",
"to",
"top",
"tran",
"transaction",
"trigger",
"truncate",
"tsequal",
"union",
"unique",
"unpivot",
"update",
"updatetext",
"use",
"user",
"values",
"varying",
"view",
"waitfor",
"when",
"where",
"while",
"with",
"writetext",
]
)
class REAL(sqltypes.REAL):
__visit_name__ = "REAL"
def __init__(self, **kw):
# REAL is a synonym for FLOAT(24) on SQL server.
# it is only accepted as the word "REAL" in DDL, the numeric
# precision value is not allowed to be present
kw.setdefault("precision", 24)
super(REAL, self).__init__(**kw)
class TINYINT(sqltypes.Integer):
__visit_name__ = "TINYINT"
# MSSQL DATE/TIME types have varied behavior, sometimes returning
# strings. MSDate/TIME check for everything, and always
# filter bind parameters into datetime objects (required by pyodbc,
# not sure about other dialects).
class _MSDate(sqltypes.Date):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
_reg = re.compile(r"(\d+)-(\d+)-(\d+)")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.date()
elif isinstance(value, util.string_types):
m = self._reg.match(value)
if not m:
raise ValueError(
"could not parse %r as a date value" % (value,)
)
return datetime.date(*[int(x or 0) for x in m.groups()])
else:
return value
return process
class TIME(sqltypes.TIME):
def __init__(self, precision=None, **kwargs):
self.precision = precision
super(TIME, self).__init__()
__zero_date = datetime.date(1900, 1, 1)
def bind_processor(self, dialect):
def process(value):
if isinstance(value, datetime.datetime):
value = datetime.datetime.combine(
self.__zero_date, value.time()
)
elif isinstance(value, datetime.time):
""" issue #5339
per: https://github.com/mkleehammer/pyodbc/wiki/Tips-and-Tricks-by-Database-Platform#time-columns
pass TIME value as string
""" # noqa
value = str(value)
return value
return process
_reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.time()
elif isinstance(value, util.string_types):
m = self._reg.match(value)
if not m:
raise ValueError(
"could not parse %r as a time value" % (value,)
)
return datetime.time(*[int(x or 0) for x in m.groups()])
else:
return value
return process
_MSTime = TIME
class _DateTimeBase(object):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
class _MSDateTime(_DateTimeBase, sqltypes.DateTime):
pass
class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = "SMALLDATETIME"
class DATETIME2(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = "DATETIME2"
def __init__(self, precision=None, **kw):
super(DATETIME2, self).__init__(**kw)
self.precision = precision
# TODO: is this not an Interval ?
class DATETIMEOFFSET(sqltypes.TypeEngine):
__visit_name__ = "DATETIMEOFFSET"
def __init__(self, precision=None, **kwargs):
self.precision = precision
class _UnicodeLiteral(object):
def literal_processor(self, dialect):
def process(value):
value = value.replace("'", "''")
if dialect.identifier_preparer._double_percents:
value = value.replace("%", "%%")
return "N'%s'" % value
return process
class _MSUnicode(_UnicodeLiteral, sqltypes.Unicode):
pass
class _MSUnicodeText(_UnicodeLiteral, sqltypes.UnicodeText):
pass
class TIMESTAMP(sqltypes._Binary):
"""Implement the SQL Server TIMESTAMP type.
Note this is **completely different** than the SQL Standard
TIMESTAMP type, which is not supported by SQL Server. It
is a read-only datatype that does not support INSERT of values.
.. versionadded:: 1.2
.. seealso::
:class:`_mssql.ROWVERSION`
"""
__visit_name__ = "TIMESTAMP"
# expected by _Binary to be present
length = None
def __init__(self, convert_int=False):
"""Construct a TIMESTAMP or ROWVERSION type.
:param convert_int: if True, binary integer values will
be converted to integers on read.
.. versionadded:: 1.2
"""
self.convert_int = convert_int
def result_processor(self, dialect, coltype):
super_ = super(TIMESTAMP, self).result_processor(dialect, coltype)
if self.convert_int:
def process(value):
value = super_(value)
if value is not None:
# https://stackoverflow.com/a/30403242/34549
value = int(codecs.encode(value, "hex"), 16)
return value
return process
else:
return super_
class ROWVERSION(TIMESTAMP):
"""Implement the SQL Server ROWVERSION type.
The ROWVERSION datatype is a SQL Server synonym for the TIMESTAMP
datatype, however current SQL Server documentation suggests using
ROWVERSION for new datatypes going forward.
The ROWVERSION datatype does **not** reflect (e.g. introspect) from the
database as itself; the returned datatype will be
:class:`_mssql.TIMESTAMP`.
This is a read-only datatype that does not support INSERT of values.
.. versionadded:: 1.2
.. seealso::
:class:`_mssql.TIMESTAMP`
"""
__visit_name__ = "ROWVERSION"
class NTEXT(sqltypes.UnicodeText):
"""MSSQL NTEXT type, for variable-length unicode text up to 2^30
characters."""
__visit_name__ = "NTEXT"
class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary):
"""The MSSQL VARBINARY type.
This type is present to support "deprecate_large_types" mode where
either ``VARBINARY(max)`` or IMAGE is rendered. Otherwise, this type
object is redundant vs. :class:`_types.VARBINARY`.
.. versionadded:: 1.0.0
.. seealso::
:ref:`mssql_large_type_deprecation`
"""
__visit_name__ = "VARBINARY"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = "IMAGE"
class XML(sqltypes.Text):
"""MSSQL XML type.
This is a placeholder type for reflection purposes that does not include
any Python-side datatype support. It also does not currently support
additional arguments, such as "CONTENT", "DOCUMENT",
"xml_schema_collection".
.. versionadded:: 1.1.11
"""
__visit_name__ = "XML"
class BIT(sqltypes.TypeEngine):
__visit_name__ = "BIT"
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class SQL_VARIANT(sqltypes.TypeEngine):
__visit_name__ = "SQL_VARIANT"
class TryCast(sql.elements.Cast):
"""Represent a SQL Server TRY_CAST expression.
"""
__visit_name__ = "try_cast"
def __init__(self, *arg, **kw):
"""Create a TRY_CAST expression.
:class:`.TryCast` is a subclass of SQLAlchemy's :class:`.Cast`
construct, and works in the same way, except that the SQL expression
rendered is "TRY_CAST" rather than "CAST"::
from sqlalchemy import select
from sqlalchemy import Numeric
from sqlalchemy.dialects.mssql import try_cast
stmt = select([
try_cast(product_table.c.unit_price, Numeric(10, 4))
])
The above would render::
SELECT TRY_CAST (product_table.unit_price AS NUMERIC(10, 4))
FROM product_table
.. versionadded:: 1.3.7
"""
super(TryCast, self).__init__(*arg, **kw)
try_cast = public_factory(TryCast, ".dialects.mssql.try_cast")
# old names.
MSDateTime = _MSDateTime
MSDate = _MSDate
MSReal = REAL
MSTinyInteger = TINYINT
MSTime = TIME
MSSmallDateTime = SMALLDATETIME
MSDateTime2 = DATETIME2
MSDateTimeOffset = DATETIMEOFFSET
MSText = TEXT
MSNText = NTEXT
MSString = VARCHAR
MSNVarchar = NVARCHAR
MSChar = CHAR
MSNChar = NCHAR
MSBinary = BINARY
MSVarBinary = VARBINARY
MSImage = IMAGE
MSBit = BIT
MSMoney = MONEY
MSSmallMoney = SMALLMONEY
MSUniqueIdentifier = UNIQUEIDENTIFIER
MSVariant = SQL_VARIANT
ischema_names = {
"int": INTEGER,
"bigint": BIGINT,
"smallint": SMALLINT,
"tinyint": TINYINT,
"varchar": VARCHAR,
"nvarchar": NVARCHAR,
"char": CHAR,
"nchar": NCHAR,
"text": TEXT,
"ntext": NTEXT,
"decimal": DECIMAL,
"numeric": NUMERIC,
"float": FLOAT,
"datetime": DATETIME,
"datetime2": DATETIME2,
"datetimeoffset": DATETIMEOFFSET,
"date": DATE,
"time": TIME,
"smalldatetime": SMALLDATETIME,
"binary": BINARY,
"varbinary": VARBINARY,
"bit": BIT,
"real": REAL,
"image": IMAGE,
"xml": XML,
"timestamp": TIMESTAMP,
"money": MONEY,
"smallmoney": SMALLMONEY,
"uniqueidentifier": UNIQUEIDENTIFIER,
"sql_variant": SQL_VARIANT,
}
class MSTypeCompiler(compiler.GenericTypeCompiler):
def _extend(self, spec, type_, length=None):
"""Extend a string-type declaration with standard SQL
COLLATE annotations.
"""
if getattr(type_, "collation", None):
collation = "COLLATE %s" % type_.collation
else:
collation = None
if not length:
length = type_.length
if length:
spec = spec + "(%s)" % length
return " ".join([c for c in (spec, collation) if c is not None])
def visit_FLOAT(self, type_, **kw):
precision = getattr(type_, "precision", None)
if precision is None:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {"precision": precision}
def visit_TINYINT(self, type_, **kw):
return "TINYINT"
def visit_DATETIMEOFFSET(self, type_, **kw):
if type_.precision is not None:
return "DATETIMEOFFSET(%s)" % type_.precision
else:
return "DATETIMEOFFSET"
def visit_TIME(self, type_, **kw):
precision = getattr(type_, "precision", None)
if precision is not None:
return "TIME(%s)" % precision
else:
return "TIME"
def visit_TIMESTAMP(self, type_, **kw):
return "TIMESTAMP"
def visit_ROWVERSION(self, type_, **kw):
return "ROWVERSION"
def visit_DATETIME2(self, type_, **kw):
precision = getattr(type_, "precision", None)
if precision is not None:
return "DATETIME2(%s)" % precision
else:
return "DATETIME2"
def visit_SMALLDATETIME(self, type_, **kw):
return "SMALLDATETIME"
def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_VARCHAR(type_, **kw)
else:
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_NVARCHAR(type_, **kw)
else:
return self.visit_NTEXT(type_, **kw)
def visit_NTEXT(self, type_, **kw):
return self._extend("NTEXT", type_)
def visit_TEXT(self, type_, **kw):
return self._extend("TEXT", type_)
def visit_VARCHAR(self, type_, **kw):
return self._extend("VARCHAR", type_, length=type_.length or "max")
def visit_CHAR(self, type_, **kw):
return self._extend("CHAR", type_)
def visit_NCHAR(self, type_, **kw):
return self._extend("NCHAR", type_)
def visit_NVARCHAR(self, type_, **kw):
return self._extend("NVARCHAR", type_, length=type_.length or "max")
def visit_date(self, type_, **kw):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_, **kw)
else:
return self.visit_DATE(type_, **kw)
def visit_time(self, type_, **kw):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_, **kw)
else:
return self.visit_TIME(type_, **kw)
def visit_large_binary(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_VARBINARY(type_, **kw)
else:
return self.visit_IMAGE(type_, **kw)
def visit_IMAGE(self, type_, **kw):
return "IMAGE"
def visit_XML(self, type_, **kw):
return "XML"
def visit_VARBINARY(self, type_, **kw):
return self._extend("VARBINARY", type_, length=type_.length or "max")
def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
def visit_BIT(self, type_, **kw):
return "BIT"
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_SMALLMONEY(self, type_, **kw):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
def visit_SQL_VARIANT(self, type_, **kw):
return "SQL_VARIANT"
class MSExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
_select_lastrowid = False
_result_proxy = None
_lastrowid = None
def _opt_encode(self, statement):
if not self.dialect.supports_unicode_statements:
return self.dialect._encoder(statement)[0]
else:
return statement
def pre_exec(self):
"""Activate IDENTITY_INSERT if needed."""
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = (
seq_column.key in self.compiled_parameters[0]
) or (
self.compiled.statement.parameters
and (
(
self.compiled.statement._has_multi_parameters
and (
seq_column.key
in self.compiled.statement.parameters[0]
or seq_column
in self.compiled.statement.parameters[0]
)
)
or (
not self.compiled.statement._has_multi_parameters
and (
seq_column.key
in self.compiled.statement.parameters
or seq_column
in self.compiled.statement.parameters
)
)
)
)
else:
self._enable_identity_insert = False
self._select_lastrowid = (
not self.compiled.inline
and insert_has_sequence
and not self.compiled.returning
and not self._enable_identity_insert
and not self.executemany
)
if self._enable_identity_insert:
self.root_connection._cursor_execute(
self.cursor,
self._opt_encode(
"SET IDENTITY_INSERT %s ON"
% self.dialect.identifier_preparer.format_table(tbl)
),
(),
self,
)
def post_exec(self):
"""Disable IDENTITY_INSERT if enabled."""
conn = self.root_connection
if self._select_lastrowid:
if self.dialect.use_scope_identity:
conn._cursor_execute(
self.cursor,
"SELECT scope_identity() AS lastrowid",
(),
self,
)
else:
conn._cursor_execute(
self.cursor, "SELECT @@identity AS lastrowid", (), self
)
# fetchall() ensures the cursor is consumed without closing it
row = self.cursor.fetchall()[0]
self._lastrowid = int(row[0])
if (
self.isinsert or self.isupdate or self.isdelete
) and self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
conn._cursor_execute(
self.cursor,
self._opt_encode(
"SET IDENTITY_INSERT %s OFF"
% self.dialect.identifier_preparer.format_table(
self.compiled.statement.table
)
),
(),
self,
)
def get_lastrowid(self):
return self._lastrowid
def handle_dbapi_exception(self, e):
if self._enable_identity_insert:
try:
self.cursor.execute(
self._opt_encode(
"SET IDENTITY_INSERT %s OFF"
% self.dialect.identifier_preparer.format_table(
self.compiled.statement.table
)
)
)
except Exception:
pass
def get_result_proxy(self):
if self._result_proxy:
return self._result_proxy
else:
return engine.ResultProxy(self)
class MSSQLCompiler(compiler.SQLCompiler):
returning_precedes_values = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
"doy": "dayofyear",
"dow": "weekday",
"milliseconds": "millisecond",
"microseconds": "microsecond",
},
)
def __init__(self, *args, **kwargs):
self.tablealiases = {}
super(MSSQLCompiler, self).__init__(*args, **kwargs)
def _with_legacy_schema_aliasing(fn):
def decorate(self, *arg, **kw):
if self.dialect.legacy_schema_aliasing:
return fn(self, *arg, **kw)
else:
super_ = getattr(super(MSSQLCompiler, self), fn.__name__)
return super_(*arg, **kw)
return decorate
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_current_date_func(self, fn, **kw):
return "GETDATE()"
def visit_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_char_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_concat_op_binary(self, binary, operator, **kw):
return "%s + %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_true(self, expr, **kw):
return "1"
def visit_false(self, expr, **kw):
return "0"
def visit_match_op_binary(self, binary, operator, **kw):
return "CONTAINS (%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def get_select_precolumns(self, select, **kw):
""" MS-SQL puts TOP, it's version of LIMIT here """
s = ""
if select._distinct:
s += "DISTINCT "
if select._simple_int_limit and (
select._offset_clause is None
or (select._simple_int_offset and select._offset == 0)
):
# ODBC drivers and possibly others
# don't support bind params in the SELECT clause on SQL Server.
# so have to use literal here.
s += "TOP %d " % select._limit
if s:
return s
else:
return compiler.SQLCompiler.get_select_precolumns(
self, select, **kw
)
def get_from_hint_text(self, table, text):
return text
def get_crud_hint_text(self, table, text):
return text
def limit_clause(self, select, **kw):
# Limit in mssql is after the select keyword
return ""
def visit_try_cast(self, element, **kw):
return "TRY_CAST (%s AS %s)" % (
self.process(element.clause, **kw),
self.process(element.typeclause, **kw),
)
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``row_number()`` criterion.
"""
if (
(not select._simple_int_limit and select._limit_clause is not None)
or (
select._offset_clause is not None
and not select._simple_int_offset
or select._offset
)
) and not getattr(select, "_mssql_visit", None):
# to use ROW_NUMBER(), an ORDER BY is required.
if not select._order_by_clause.clauses:
raise exc.CompileError(
"MSSQL requires an order_by when "
"using an OFFSET or a non-simple "
"LIMIT clause"
)
_order_by_clauses = [
sql_util.unwrap_label_reference(elem)
for elem in select._order_by_clause.clauses
]
limit_clause = select._limit_clause
offset_clause = select._offset_clause
kwargs["select_wraps_for"] = select
select = select._generate()
select._mssql_visit = True
select = (
select.column(
sql.func.ROW_NUMBER()
.over(order_by=_order_by_clauses)
.label("mssql_rn")
)
.order_by(None)
.alias()
)
mssql_rn = sql.column("mssql_rn")
limitselect = sql.select(
[c for c in select.c if c.key != "mssql_rn"]
)
if offset_clause is not None:
limitselect.append_whereclause(mssql_rn > offset_clause)
if limit_clause is not None:
limitselect.append_whereclause(
mssql_rn <= (limit_clause + offset_clause)
)
else:
limitselect.append_whereclause(mssql_rn <= (limit_clause))
return self.process(limitselect, **kwargs)
else:
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
@_with_legacy_schema_aliasing
def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs):
if mssql_aliased is table or iscrud:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
# alias schema-qualified tables
alias = self._schema_aliased_table(table)
if alias is not None:
return self.process(alias, mssql_aliased=table, **kwargs)
else:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
@_with_legacy_schema_aliasing
def visit_alias(self, alias, **kw):
# translate for schema-qualified table aliases
kw["mssql_aliased"] = alias.original
return super(MSSQLCompiler, self).visit_alias(alias, **kw)
@_with_legacy_schema_aliasing
def visit_column(self, column, add_to_result_map=None, **kw):
if (
column.table is not None
and (not self.isupdate and not self.isdelete)
or self.is_subquery()
):
# translate for schema-qualified table aliases
t = self._schema_aliased_table(column.table)
if t is not None:
converted = expression._corresponding_column_or_error(
t, column
)
if add_to_result_map is not None:
add_to_result_map(
column.name,
column.name,
(column, column.name, column.key),
column.type,
)
return super(MSSQLCompiler, self).visit_column(converted, **kw)
return super(MSSQLCompiler, self).visit_column(
column, add_to_result_map=add_to_result_map, **kw
)
def _schema_aliased_table(self, table):
if getattr(table, "schema", None) is not None:
if table not in self.tablealiases:
self.tablealiases[table] = table.alias()
return self.tablealiases[table]
else:
return None
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return "DATEPART(%s, %s)" % (field, self.process(extract.expr, **kw))
def visit_savepoint(self, savepoint_stmt):
return "SAVE TRANSACTION %s" % self.preparer.format_savepoint(
savepoint_stmt
)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TRANSACTION %s" % self.preparer.format_savepoint(
savepoint_stmt
)
def visit_binary(self, binary, **kwargs):
"""Move bind parameters to the right-hand side of an operator, where
possible.
"""
if (
isinstance(binary.left, expression.BindParameter)
and binary.operator == operator.eq
and not isinstance(binary.right, expression.BindParameter)
):
return self.process(
expression.BinaryExpression(
binary.right, binary.left, binary.operator
),
**kwargs
)
return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
def returning_clause(self, stmt, returning_cols):
if self.isinsert or self.isupdate:
target = stmt.table.alias("inserted")
else:
target = stmt.table.alias("deleted")
adapter = sql_util.ClauseAdapter(target)
columns = [
self._label_select_column(
None, adapter.traverse(c), True, False, {}
)
for c in expression._select_iterables(returning_cols)
]
return "OUTPUT " + ", ".join(columns)
def get_cte_preamble(self, recursive):
# SQL Server finds it too inconvenient to accept
# an entirely optional, SQL standard specified,
# "RECURSIVE" word with their "WITH",
# so here we go
return "WITH"
def label_select_column(self, select, column, asfrom):
if isinstance(column, expression.Function):
return column.label(None)
else:
return super(MSSQLCompiler, self).label_select_column(
select, column, asfrom
)
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
# SQLAlchemy doesn't use
return ""
def order_by_clause(self, select, **kw):
# MSSQL only allows ORDER BY in subqueries if there is a LIMIT
if self.is_subquery() and not select._limit:
# avoid processing the order by clause if we won't end up
# using it, because we don't want all the bind params tacked
# onto the positional list if that is what the dbapi requires
return ""
order_by = self.process(select._order_by_clause, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the UPDATE..FROM clause specific to MSSQL.
In MSSQL, if the UPDATE statement involves an alias of the table to
be updated, then the table itself must be added to the FROM list as
well. Otherwise, it is optional. Here, we add it regardless.
"""
return "FROM " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in [from_table] + extra_froms
)
def delete_table_clause(self, delete_stmt, from_table, extra_froms):
"""If we have extra froms make sure we render any alias as hint."""
ashint = False
if extra_froms:
ashint = True
return from_table._compiler_dispatch(
self, asfrom=True, iscrud=True, ashint=ashint
)
def delete_extra_from_clause(
self, delete_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the DELETE .. FROM clause specific to MSSQL.
Yes, it has the FROM keyword twice.
"""
return "FROM " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in [from_table] + extra_froms
)
def visit_empty_set_expr(self, type_):
return "SELECT 1 WHERE 1!=1"
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "NOT EXISTS (SELECT %s INTERSECT SELECT %s)" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_isnot_distinct_from_binary(self, binary, operator, **kw):
return "EXISTS (SELECT %s INTERSECT SELECT %s)" % (
self.process(binary.left),
self.process(binary.right),
)
class MSSQLStrictCompiler(MSSQLCompiler):
"""A subclass of MSSQLCompiler which disables the usage of bind
parameters where not allowed natively by MS-SQL.
A dialect may use this compiler on a platform where native
binds are used.
"""
ansi_bind_rules = True
def visit_in_op_binary(self, binary, operator, **kw):
kw["literal_binds"] = True
return "%s IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_notin_op_binary(self, binary, operator, **kw):
kw["literal_binds"] = True
return "%s NOT IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def render_literal_value(self, value, type_):
"""
For date and datetime values, convert to a string
format acceptable to MSSQL. That seems to be the
so-called ODBC canonical date format which looks
like this:
yyyy-mm-dd hh:mi:ss.mmm(24h)
For other data types, call the base class implementation.
"""
# datetime and date are both subclasses of datetime.date
if issubclass(type(value), datetime.date):
# SQL Server wants single quotes around the date string.
return "'" + str(value) + "'"
else:
return super(MSSQLStrictCompiler, self).render_literal_value(
value, type_
)
class MSDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
# type is not accepted in a computed column
if column.computed is not None:
colspec += " " + self.process(column.computed)
else:
colspec += " " + self.dialect.type_compiler.process(
column.type, type_expression=column
)
if column.nullable is not None:
if (
not column.nullable
or column.primary_key
or isinstance(column.default, sa_schema.Sequence)
or column.autoincrement is True
):
colspec += " NOT NULL"
elif column.computed is None:
# don't specify "NULL" for computed columns
colspec += " NULL"
if column.table is None:
raise exc.CompileError(
"mssql requires Table-bound columns "
"in order to generate DDL"
)
# install an IDENTITY Sequence if we either a sequence or an implicit
# IDENTITY column
if isinstance(column.default, sa_schema.Sequence):
if (
column.default.start is not None
or column.default.increment is not None
or column is not column.table._autoincrement_column
):
util.warn_deprecated(
"Use of Sequence with SQL Server in order to affect the "
"parameters of the IDENTITY value is deprecated, as "
"Sequence "
"will correspond to an actual SQL Server "
"CREATE SEQUENCE in "
"a future release. Please use the mssql_identity_start "
"and mssql_identity_increment parameters."
)
if column.default.start == 0:
start = 0
else:
start = column.default.start or 1
colspec += " IDENTITY(%s,%s)" % (
start,
column.default.increment or 1,
)
elif (
column is column.table._autoincrement_column
or column.autoincrement is True
):
start = column.dialect_options["mssql"]["identity_start"]
increment = column.dialect_options["mssql"]["identity_increment"]
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_create_index(self, create, include_schema=False):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
# handle clustering option
clustered = index.dialect_options["mssql"]["clustered"]
if clustered is not None:
if clustered:
text += "CLUSTERED "
else:
text += "NONCLUSTERED "
text += "INDEX %s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=include_schema),
preparer.format_table(index.table),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
whereclause = index.dialect_options["mssql"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
whereclause, include_table=False, literal_binds=True
)
text += " WHERE " + where_compiled
# handle other included columns
if index.dialect_options["mssql"]["include"]:
inclusions = [
index.table.c[col]
if isinstance(col, util.string_types)
else col
for col in index.dialect_options["mssql"]["include"]
]
text += " INCLUDE (%s)" % ", ".join(
[preparer.quote(c.name) for c in inclusions]
)
return text
def visit_drop_index(self, drop):
return "\nDROP INDEX %s ON %s" % (
self._prepared_index_name(drop.element, include_schema=False),
self.preparer.format_table(drop.element.table),
)
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % self.preparer.format_constraint(
constraint
)
text += "PRIMARY KEY "
clustered = constraint.dialect_options["mssql"]["clustered"]
if clustered is not None:
if clustered:
text += "CLUSTERED "
else:
text += "NONCLUSTERED "
text += "(%s)" % ", ".join(
self.preparer.quote(c.name) for c in constraint
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE "
clustered = constraint.dialect_options["mssql"]["clustered"]
if clustered is not None:
if clustered:
text += "CLUSTERED "
else:
text += "NONCLUSTERED "
text += "(%s)" % ", ".join(
self.preparer.quote(c.name) for c in constraint
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_computed_column(self, generated):
text = "AS (%s)" % self.sql_compiler.process(
generated.sqltext, include_table=False, literal_binds=True
)
# explicitly check for True|False since None means server default
if generated.persisted is True:
text += " PERSISTED"
return text
class MSIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect):
super(MSIdentifierPreparer, self).__init__(
dialect,
initial_quote="[",
final_quote="]",
quote_case_sensitive_collations=False,
)
def _escape_identifier(self, value):
return value
def quote_schema(self, schema, force=None):
"""Prepare a quoted table and schema name."""
# need to re-implement the deprecation warning entirely
if force is not None:
# not using the util.deprecated_params() decorator in this
# case because of the additional function call overhead on this
# very performance-critical spot.
util.warn_deprecated(
"The IdentifierPreparer.quote_schema.force parameter is "
"deprecated and will be removed in a future release. This "
"flag has no effect on the behavior of the "
"IdentifierPreparer.quote method; please refer to "
"quoted_name()."
)
dbname, owner = _schema_elements(schema)
if dbname:
result = "%s.%s" % (self.quote(dbname), self.quote(owner))
elif owner:
result = self.quote(owner)
else:
result = ""
return result
def _db_plus_owner_listing(fn):
def wrap(dialect, connection, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(
dbname,
connection,
fn,
dialect,
connection,
dbname,
owner,
schema,
**kw
)
return update_wrapper(wrap, fn)
def _db_plus_owner(fn):
def wrap(dialect, connection, tablename, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(
dbname,
connection,
fn,
dialect,
connection,
tablename,
dbname,
owner,
schema,
**kw
)
return update_wrapper(wrap, fn)
def _switch_db(dbname, connection, fn, *arg, **kw):
if dbname:
current_db = connection.scalar("select db_name()")
if current_db != dbname:
connection.execute(
"use %s" % connection.dialect.identifier_preparer.quote(dbname)
)
try:
return fn(*arg, **kw)
finally:
if dbname and current_db != dbname:
connection.execute(
"use %s"
% connection.dialect.identifier_preparer.quote(current_db)
)
def _owner_plus_db(dialect, schema):
if not schema:
return None, dialect.default_schema_name
elif "." in schema:
return _schema_elements(schema)
else:
return None, schema
_memoized_schema = util.LRUCache()
def _schema_elements(schema):
if isinstance(schema, quoted_name) and schema.quote:
return None, schema
if schema in _memoized_schema:
return _memoized_schema[schema]
# tests for this function are in:
# test/dialect/mssql/test_reflection.py ->
# OwnerPlusDBTest.test_owner_database_pairs
# test/dialect/mssql/test_compiler.py -> test_force_schema_*
# test/dialect/mssql/test_compiler.py -> test_schema_many_tokens_*
#
push = []
symbol = ""
bracket = False
has_brackets = False
for token in re.split(r"(\[|\]|\.)", schema):
if not token:
continue
if token == "[":
bracket = True
has_brackets = True
elif token == "]":
bracket = False
elif not bracket and token == ".":
if has_brackets:
push.append("[%s]" % symbol)
else:
push.append(symbol)
symbol = ""
has_brackets = False
else:
symbol += token
if symbol:
push.append(symbol)
if len(push) > 1:
dbname, owner = ".".join(push[0:-1]), push[-1]
# test for internal brackets
if re.match(r".*\].*\[.*", dbname[1:-1]):
dbname = quoted_name(dbname, quote=False)
else:
dbname = dbname.lstrip("[").rstrip("]")
elif len(push):
dbname, owner = None, push[0]
else:
dbname, owner = None, None
_memoized_schema[schema] = dbname, owner
return dbname, owner
class MSDialect(default.DefaultDialect):
name = "mssql"
supports_default_values = True
supports_empty_insert = False
execution_ctx_cls = MSExecutionContext
use_scope_identity = True
max_identifier_length = 128
schema_name = "dbo"
colspecs = {
sqltypes.DateTime: _MSDateTime,
sqltypes.Date: _MSDate,
sqltypes.Time: TIME,
sqltypes.Unicode: _MSUnicode,
sqltypes.UnicodeText: _MSUnicodeText,
}
engine_config_types = default.DefaultDialect.engine_config_types.union(
[("legacy_schema_aliasing", util.asbool)]
)
ischema_names = ischema_names
supports_native_boolean = False
non_native_boolean_check_constraint = False
supports_unicode_binds = True
postfetch_lastrowid = True
_supports_nvarchar_max = False
server_version_info = ()
statement_compiler = MSSQLCompiler
ddl_compiler = MSDDLCompiler
type_compiler = MSTypeCompiler
preparer = MSIdentifierPreparer
construct_arguments = [
(sa_schema.PrimaryKeyConstraint, {"clustered": None}),
(sa_schema.UniqueConstraint, {"clustered": None}),
(sa_schema.Index, {"clustered": None, "include": None, "where": None}),
(sa_schema.Column, {"identity_start": 1, "identity_increment": 1}),
]
def __init__(
self,
query_timeout=None,
use_scope_identity=True,
schema_name="dbo",
isolation_level=None,
deprecate_large_types=None,
legacy_schema_aliasing=False,
**opts
):
self.query_timeout = int(query_timeout or 0)
self.schema_name = schema_name
self.use_scope_identity = use_scope_identity
self.deprecate_large_types = deprecate_large_types
self.legacy_schema_aliasing = legacy_schema_aliasing
super(MSDialect, self).__init__(**opts)
self.isolation_level = isolation_level
def do_savepoint(self, connection, name):
# give the DBAPI a push
connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
super(MSDialect, self).do_savepoint(connection, name)
def do_release_savepoint(self, connection, name):
# SQL Server does not support RELEASE SAVEPOINT
pass
_isolation_lookup = set(
[
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
"SNAPSHOT",
]
)
def set_isolation_level(self, connection, level):
level = level.replace("_", " ")
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("SET TRANSACTION ISOLATION LEVEL %s" % level)
cursor.close()
if level == "SNAPSHOT":
connection.commit()
def get_isolation_level(self, connection):
if self.server_version_info < MS_2005_VERSION:
raise NotImplementedError(
"Can't fetch isolation level prior to SQL Server 2005"
)
last_error = None
views = ("sys.dm_exec_sessions", "sys.dm_pdw_nodes_exec_sessions")
for view in views:
cursor = connection.cursor()
try:
cursor.execute(
"""
SELECT CASE transaction_isolation_level
WHEN 0 THEN NULL
WHEN 1 THEN 'READ UNCOMMITTED'
WHEN 2 THEN 'READ COMMITTED'
WHEN 3 THEN 'REPEATABLE READ'
WHEN 4 THEN 'SERIALIZABLE'
WHEN 5 THEN 'SNAPSHOT' END AS TRANSACTION_ISOLATION_LEVEL
FROM %s
where session_id = @@SPID
"""
% view
)
val = cursor.fetchone()[0]
except self.dbapi.Error as err:
# Python3 scoping rules
last_error = err
continue
else:
return val.upper()
finally:
cursor.close()
else:
# note that the NotImplementedError is caught by
# DefaultDialect, so the warning here is all that displays
util.warn(
"Could not fetch transaction isolation level, "
"tried views: %s; final error was: %s" % (views, last_error)
)
raise NotImplementedError(
"Can't fetch isolation level on this particular "
"SQL Server version. tried views: %s; final error was: %s"
% (views, last_error)
)
def initialize(self, connection):
super(MSDialect, self).initialize(connection)
self._setup_version_attributes()
self._setup_supports_nvarchar_max(connection)
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
def _setup_version_attributes(self):
if self.server_version_info[0] not in list(range(8, 17)):
util.warn(
"Unrecognized server version info '%s'. Some SQL Server "
"features may not function properly."
% ".".join(str(x) for x in self.server_version_info)
)
if (
self.server_version_info >= MS_2005_VERSION
and "implicit_returning" not in self.__dict__
):
self.implicit_returning = True
if self.server_version_info >= MS_2008_VERSION:
self.supports_multivalues_insert = True
if self.deprecate_large_types is None:
self.deprecate_large_types = (
self.server_version_info >= MS_2012_VERSION
)
def _setup_supports_nvarchar_max(self, connection):
try:
connection.scalar(
sql.text("SELECT CAST('test max support' AS NVARCHAR(max))")
)
except exc.DBAPIError:
self._supports_nvarchar_max = False
else:
self._supports_nvarchar_max = True
def _get_default_schema_name(self, connection):
if self.server_version_info < MS_2005_VERSION:
return self.schema_name
else:
query = sql.text("SELECT schema_name()")
default_schema_name = connection.scalar(query)
if default_schema_name is not None:
# guard against the case where the default_schema_name is being
# fed back into a table reflection function.
return quoted_name(default_schema_name, quote=True)
else:
return self.schema_name
@_db_plus_owner
def has_table(self, connection, tablename, dbname, owner, schema):
columns = ischema.columns
whereclause = columns.c.table_name == tablename
if owner:
whereclause = sql.and_(
whereclause, columns.c.table_schema == owner
)
s = sql.select([columns], whereclause)
c = connection.execute(s)
return c.first() is not None
@reflection.cache
def get_schema_names(self, connection, **kw):
s = sql.select(
[ischema.schemata.c.schema_name],
order_by=[ischema.schemata.c.schema_name],
)
schema_names = [r[0] for r in connection.execute(s)]
return schema_names
@reflection.cache
@_db_plus_owner_listing
def get_table_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select(
[tables.c.table_name],
sql.and_(
tables.c.table_schema == owner,
tables.c.table_type == "BASE TABLE",
),
order_by=[tables.c.table_name],
)
table_names = [r[0] for r in connection.execute(s)]
return table_names
@reflection.cache
@_db_plus_owner_listing
def get_view_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select(
[tables.c.table_name],
sql.and_(
tables.c.table_schema == owner, tables.c.table_type == "VIEW"
),
order_by=[tables.c.table_name],
)
view_names = [r[0] for r in connection.execute(s)]
return view_names
@reflection.cache
@_db_plus_owner
def get_indexes(self, connection, tablename, dbname, owner, schema, **kw):
# using system catalogs, don't support index reflection
# below MS 2005
if self.server_version_info < MS_2005_VERSION:
return []
rp = connection.execute(
sql.text(
"select ind.index_id, ind.is_unique, ind.name "
"from sys.indexes as ind join sys.tables as tab on "
"ind.object_id=tab.object_id "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name = :tabname "
"and sch.name=:schname "
"and ind.is_primary_key=0 and ind.type != 0"
)
.bindparams(
sql.bindparam("tabname", tablename, ischema.CoerceUnicode()),
sql.bindparam("schname", owner, ischema.CoerceUnicode()),
)
.columns(name=sqltypes.Unicode())
)
indexes = {}
for row in rp:
indexes[row["index_id"]] = {
"name": row["name"],
"unique": row["is_unique"] == 1,
"column_names": [],
}
rp = connection.execute(
sql.text(
"select ind_col.index_id, ind_col.object_id, col.name "
"from sys.columns as col "
"join sys.tables as tab on tab.object_id=col.object_id "
"join sys.index_columns as ind_col on "
"(ind_col.column_id=col.column_id and "
"ind_col.object_id=tab.object_id) "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name=:tabname "
"and sch.name=:schname"
)
.bindparams(
sql.bindparam("tabname", tablename, ischema.CoerceUnicode()),
sql.bindparam("schname", owner, ischema.CoerceUnicode()),
)
.columns(name=sqltypes.Unicode())
)
for row in rp:
if row["index_id"] in indexes:
indexes[row["index_id"]]["column_names"].append(row["name"])
return list(indexes.values())
@reflection.cache
@_db_plus_owner
def get_view_definition(
self, connection, viewname, dbname, owner, schema, **kw
):
rp = connection.execute(
sql.text(
"select definition from sys.sql_modules as mod, "
"sys.views as views, "
"sys.schemas as sch"
" where "
"mod.object_id=views.object_id and "
"views.schema_id=sch.schema_id and "
"views.name=:viewname and sch.name=:schname"
).bindparams(
sql.bindparam("viewname", viewname, ischema.CoerceUnicode()),
sql.bindparam("schname", owner, ischema.CoerceUnicode()),
)
)
if rp:
view_def = rp.scalar()
return view_def
@reflection.cache
@_db_plus_owner
def get_columns(self, connection, tablename, dbname, owner, schema, **kw):
# Get base columns
columns = ischema.columns
computed_cols = ischema.computed_columns
if owner:
whereclause = sql.and_(
columns.c.table_name == tablename,
columns.c.table_schema == owner,
)
table_fullname = "%s.%s" % (owner, tablename)
full_name = columns.c.table_schema + "." + columns.c.table_name
join_on = computed_cols.c.object_id == func.object_id(full_name)
else:
whereclause = columns.c.table_name == tablename
table_fullname = tablename
join_on = computed_cols.c.object_id == func.object_id(
columns.c.table_name
)
join_on = sql.and_(
join_on, columns.c.column_name == computed_cols.c.name
)
join = columns.join(computed_cols, onclause=join_on, isouter=True)
if self._supports_nvarchar_max:
computed_definition = computed_cols.c.definition
else:
# tds_version 4.2 does not support NVARCHAR(MAX)
computed_definition = sql.cast(
computed_cols.c.definition, NVARCHAR(4000)
)
s = sql.select(
[columns, computed_definition, computed_cols.c.is_persisted],
whereclause,
from_obj=join,
order_by=[columns.c.ordinal_position],
)
c = connection.execute(s)
cols = []
while True:
row = c.fetchone()
if row is None:
break
name = row[columns.c.column_name]
type_ = row[columns.c.data_type]
nullable = row[columns.c.is_nullable] == "YES"
charlen = row[columns.c.character_maximum_length]
numericprec = row[columns.c.numeric_precision]
numericscale = row[columns.c.numeric_scale]
default = row[columns.c.column_default]
collation = row[columns.c.collation_name]
definition = row[computed_definition]
is_persisted = row[computed_cols.c.is_persisted]
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (
MSString,
MSChar,
MSNVarchar,
MSNChar,
MSText,
MSNText,
MSBinary,
MSVarBinary,
sqltypes.LargeBinary,
):
if charlen == -1:
charlen = None
kwargs["length"] = charlen
if collation:
kwargs["collation"] = collation
if coltype is None:
util.warn(
"Did not recognize type '%s' of column '%s'"
% (type_, name)
)
coltype = sqltypes.NULLTYPE
else:
if issubclass(coltype, sqltypes.Numeric):
kwargs["precision"] = numericprec
if not issubclass(coltype, sqltypes.Float):
kwargs["scale"] = numericscale
coltype = coltype(**kwargs)
cdict = {
"name": name,
"type": coltype,
"nullable": nullable,
"default": default,
"autoincrement": False,
}
if definition is not None and is_persisted is not None:
cdict["computed"] = {
"sqltext": definition,
"persisted": is_persisted,
}
cols.append(cdict)
# autoincrement and identity
colmap = {}
for col in cols:
colmap[col["name"]] = col
# We also run an sp_columns to check for identity columns:
cursor = connection.execute(
"sp_columns @table_name = '%s', "
"@table_owner = '%s'" % (tablename, owner)
)
ic = None
while True:
row = cursor.fetchone()
if row is None:
break
(col_name, type_name) = row[3], row[5]
if type_name.endswith("identity") and col_name in colmap:
ic = col_name
colmap[col_name]["autoincrement"] = True
colmap[col_name]["dialect_options"] = {
"mssql_identity_start": 1,
"mssql_identity_increment": 1,
}
break
cursor.close()
if ic is not None and self.server_version_info >= MS_2005_VERSION:
table_fullname = "%s.%s" % (owner, tablename)
cursor = connection.execute(
"select ident_seed('%s'), ident_incr('%s')"
% (table_fullname, table_fullname)
)
row = cursor.first()
if row is not None and row[0] is not None:
colmap[ic]["dialect_options"].update(
{
"mssql_identity_start": int(row[0]),
"mssql_identity_increment": int(row[1]),
}
)
return cols
@reflection.cache
@_db_plus_owner
def get_pk_constraint(
self, connection, tablename, dbname, owner, schema, **kw
):
pkeys = []
TC = ischema.constraints
C = ischema.key_constraints.alias("C")
# Primary key constraints
s = sql.select(
[C.c.column_name, TC.c.constraint_type, C.c.constraint_name],
sql.and_(
TC.c.constraint_name == C.c.constraint_name,
TC.c.table_schema == C.c.table_schema,
C.c.table_name == tablename,
C.c.table_schema == owner,
),
)
c = connection.execute(s)
constraint_name = None
for row in c:
if "PRIMARY" in row[TC.c.constraint_type.name]:
pkeys.append(row[0])
if constraint_name is None:
constraint_name = row[C.c.constraint_name.name]
return {"constrained_columns": pkeys, "name": constraint_name}
@reflection.cache
@_db_plus_owner
def get_foreign_keys(
self, connection, tablename, dbname, owner, schema, **kw
):
RR = ischema.ref_constraints
C = ischema.key_constraints.alias("C")
R = ischema.key_constraints.alias("R")
# Foreign key constraints
s = sql.select(
[
C.c.column_name,
R.c.table_schema,
R.c.table_name,
R.c.column_name,
RR.c.constraint_name,
RR.c.match_option,
RR.c.update_rule,
RR.c.delete_rule,
],
sql.and_(
C.c.table_name == tablename,
C.c.table_schema == owner,
RR.c.constraint_schema == C.c.table_schema,
C.c.constraint_name == RR.c.constraint_name,
R.c.constraint_name == RR.c.unique_constraint_name,
R.c.constraint_schema == RR.c.unique_constraint_schema,
C.c.ordinal_position == R.c.ordinal_position,
),
order_by=[RR.c.constraint_name, R.c.ordinal_position],
)
# group rows by constraint ID, to handle multi-column FKs
fkeys = []
def fkey_rec():
return {
"name": None,
"constrained_columns": [],
"referred_schema": None,
"referred_table": None,
"referred_columns": [],
}
fkeys = util.defaultdict(fkey_rec)
for r in connection.execute(s).fetchall():
scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
rec = fkeys[rfknm]
rec["name"] = rfknm
if not rec["referred_table"]:
rec["referred_table"] = rtbl
if schema is not None or owner != rschema:
if dbname:
rschema = dbname + "." + rschema
rec["referred_schema"] = rschema
local_cols, remote_cols = (
rec["constrained_columns"],
rec["referred_columns"],
)
local_cols.append(scol)
remote_cols.append(rcol)
return list(fkeys.values())
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/firebird/__init__.py
|
# firebird/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.dialects.firebird.base import BIGINT
from sqlalchemy.dialects.firebird.base import BLOB
from sqlalchemy.dialects.firebird.base import CHAR
from sqlalchemy.dialects.firebird.base import DATE
from sqlalchemy.dialects.firebird.base import FLOAT
from sqlalchemy.dialects.firebird.base import NUMERIC
from sqlalchemy.dialects.firebird.base import SMALLINT
from sqlalchemy.dialects.firebird.base import TEXT
from sqlalchemy.dialects.firebird.base import TIME
from sqlalchemy.dialects.firebird.base import TIMESTAMP
from sqlalchemy.dialects.firebird.base import VARCHAR
from . import base # noqa
from . import fdb # noqa
from . import kinterbasdb # noqa
base.dialect = dialect = fdb.dialect
__all__ = (
"SMALLINT",
"BIGINT",
"FLOAT",
"FLOAT",
"DATE",
"TIME",
"TEXT",
"NUMERIC",
"FLOAT",
"TIMESTAMP",
"VARCHAR",
"CHAR",
"BLOB",
"dialect",
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/firebird/fdb.py
|
# firebird/fdb.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: firebird+fdb
:name: fdb
:dbapi: pyodbc
:connectstring: firebird+fdb://user:password@host:port/path/to/db[?key=value&key=value...]
:url: http://pypi.python.org/pypi/fdb/
fdb is a kinterbasdb compatible DBAPI for Firebird.
.. versionchanged:: 0.9 - The fdb dialect is now the default dialect
under the ``firebird://`` URL space, as ``fdb`` is now the official
Python driver for Firebird.
Arguments
----------
The ``fdb`` dialect is based on the
:mod:`sqlalchemy.dialects.firebird.kinterbasdb` dialect, however does not
accept every argument that Kinterbasdb does.
* ``enable_rowcount`` - True by default, setting this to False disables
the usage of "cursor.rowcount" with the
Kinterbasdb dialect, which SQLAlchemy ordinarily calls upon automatically
after any UPDATE or DELETE statement. When disabled, SQLAlchemy's
ResultProxy will return -1 for result.rowcount. The rationale here is
that Kinterbasdb requires a second round trip to the database when
.rowcount is called - since SQLA's resultproxy automatically closes
the cursor after a non-result-returning statement, rowcount must be
called, if at all, before the result object is returned. Additionally,
cursor.rowcount may not return correct results with older versions
of Firebird, and setting this flag to False will also cause the
SQLAlchemy ORM to ignore its usage. The behavior can also be controlled on a
per-execution basis using the ``enable_rowcount`` option with
:meth:`_engine.Connection.execution_options`::
conn = engine.connect().execution_options(enable_rowcount=True)
r = conn.execute(stmt)
print(r.rowcount)
* ``retaining`` - False by default. Setting this to True will pass the
``retaining=True`` keyword argument to the ``.commit()`` and ``.rollback()``
methods of the DBAPI connection, which can improve performance in some
situations, but apparently with significant caveats.
Please read the fdb and/or kinterbasdb DBAPI documentation in order to
understand the implications of this flag.
.. versionchanged:: 0.9.0 - the ``retaining`` flag defaults to ``False``.
In 0.8 it defaulted to ``True``.
.. seealso::
http://pythonhosted.org/fdb/usage-guide.html#retaining-transactions
- information on the "retaining" flag.
""" # noqa
from .kinterbasdb import FBDialect_kinterbasdb
from ... import util
class FBDialect_fdb(FBDialect_kinterbasdb):
def __init__(self, enable_rowcount=True, retaining=False, **kwargs):
super(FBDialect_fdb, self).__init__(
enable_rowcount=enable_rowcount, retaining=retaining, **kwargs
)
@classmethod
def dbapi(cls):
return __import__("fdb")
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if opts.get("port"):
opts["host"] = "%s/%s" % (opts["host"], opts["port"])
del opts["port"]
opts.update(url.query)
util.coerce_kw_type(opts, "type_conv", int)
return ([], opts)
def _get_server_version_info(self, connection):
"""Get the version of the Firebird server used by a connection.
Returns a tuple of (`major`, `minor`, `build`), three integers
representing the version of the attached server.
"""
# This is the simpler approach (the other uses the services api),
# that for backward compatibility reasons returns a string like
# LI-V6.3.3.12981 Firebird 2.0
# where the first version is a fake one resembling the old
# Interbase signature.
isc_info_firebird_version = 103
fbconn = connection.connection
version = fbconn.db_info(isc_info_firebird_version)
return self._parse_version_info(version)
dialect = FBDialect_fdb
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/firebird/kinterbasdb.py
|
# firebird/kinterbasdb.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: firebird+kinterbasdb
:name: kinterbasdb
:dbapi: kinterbasdb
:connectstring: firebird+kinterbasdb://user:password@host:port/path/to/db[?key=value&key=value...]
:url: http://firebirdsql.org/index.php?op=devel&sub=python
Arguments
----------
The Kinterbasdb backend accepts the ``enable_rowcount`` and ``retaining``
arguments accepted by the :mod:`sqlalchemy.dialects.firebird.fdb` dialect.
In addition, it also accepts the following:
* ``type_conv`` - select the kind of mapping done on the types: by default
SQLAlchemy uses 200 with Unicode, datetime and decimal support. See
the linked documents below for further information.
* ``concurrency_level`` - set the backend policy with regards to threading
issues: by default SQLAlchemy uses policy 1. See the linked documents
below for further information.
.. seealso::
http://sourceforge.net/projects/kinterbasdb
http://kinterbasdb.sourceforge.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation
http://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurrency
""" # noqa
import decimal
from re import match
from .base import FBDialect
from .base import FBExecutionContext
from ... import types as sqltypes
from ... import util
class _kinterbasdb_numeric(object):
def bind_processor(self, dialect):
def process(value):
if isinstance(value, decimal.Decimal):
return str(value)
else:
return value
return process
class _FBNumeric_kinterbasdb(_kinterbasdb_numeric, sqltypes.Numeric):
pass
class _FBFloat_kinterbasdb(_kinterbasdb_numeric, sqltypes.Float):
pass
class FBExecutionContext_kinterbasdb(FBExecutionContext):
@property
def rowcount(self):
if self.execution_options.get(
"enable_rowcount", self.dialect.enable_rowcount
):
return self.cursor.rowcount
else:
return -1
class FBDialect_kinterbasdb(FBDialect):
driver = "kinterbasdb"
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
execution_ctx_cls = FBExecutionContext_kinterbasdb
supports_native_decimal = True
colspecs = util.update_copy(
FBDialect.colspecs,
{
sqltypes.Numeric: _FBNumeric_kinterbasdb,
sqltypes.Float: _FBFloat_kinterbasdb,
},
)
def __init__(
self,
type_conv=200,
concurrency_level=1,
enable_rowcount=True,
retaining=False,
**kwargs
):
super(FBDialect_kinterbasdb, self).__init__(**kwargs)
self.enable_rowcount = enable_rowcount
self.type_conv = type_conv
self.concurrency_level = concurrency_level
self.retaining = retaining
if enable_rowcount:
self.supports_sane_rowcount = True
@classmethod
def dbapi(cls):
return __import__("kinterbasdb")
def do_execute(self, cursor, statement, parameters, context=None):
# kinterbase does not accept a None, but wants an empty list
# when there are no arguments.
cursor.execute(statement, parameters or [])
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback(self.retaining)
def do_commit(self, dbapi_connection):
dbapi_connection.commit(self.retaining)
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if opts.get("port"):
opts["host"] = "%s/%s" % (opts["host"], opts["port"])
del opts["port"]
opts.update(url.query)
util.coerce_kw_type(opts, "type_conv", int)
type_conv = opts.pop("type_conv", self.type_conv)
concurrency_level = opts.pop(
"concurrency_level", self.concurrency_level
)
if self.dbapi is not None:
initialized = getattr(self.dbapi, "initialized", None)
if initialized is None:
# CVS rev 1.96 changed the name of the attribute:
# http://kinterbasdb.cvs.sourceforge.net/viewvc/kinterbasdb/
# Kinterbasdb-3.0/__init__.py?r1=1.95&r2=1.96
initialized = getattr(self.dbapi, "_initialized", False)
if not initialized:
self.dbapi.init(
type_conv=type_conv, concurrency_level=concurrency_level
)
return ([], opts)
def _get_server_version_info(self, connection):
"""Get the version of the Firebird server used by a connection.
Returns a tuple of (`major`, `minor`, `build`), three integers
representing the version of the attached server.
"""
# This is the simpler approach (the other uses the services api),
# that for backward compatibility reasons returns a string like
# LI-V6.3.3.12981 Firebird 2.0
# where the first version is a fake one resembling the old
# Interbase signature.
fbconn = connection.connection
version = fbconn.server_version
return self._parse_version_info(version)
def _parse_version_info(self, version):
m = match(
r"\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?", version
)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % version
)
if m.group(5) != None:
return tuple([int(x) for x in m.group(6, 7, 4)] + ["firebird"])
else:
return tuple([int(x) for x in m.group(1, 2, 3)] + ["interbase"])
def is_disconnect(self, e, connection, cursor):
if isinstance(
e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError)
):
msg = str(e)
return (
"Error writing data to the connection" in msg
or "Unable to complete network request to host" in msg
or "Invalid connection state" in msg
or "Invalid cursor state" in msg
or "connection shutdown" in msg
)
else:
return False
dialect = FBDialect_kinterbasdb
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/firebird/base.py
|
# firebird/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: firebird
:name: Firebird
.. note::
The Firebird dialect within SQLAlchemy **is not currently supported**.
It is not tested within continuous integration and is likely to have
many issues and caveats not currently handled. Consider using the
`external dialect <https://github.com/pauldex/sqlalchemy-firebird>`_
instead.
Firebird Dialects
-----------------
Firebird offers two distinct dialects_ (not to be confused with a
SQLAlchemy ``Dialect``):
dialect 1
This is the old syntax and behaviour, inherited from Interbase pre-6.0.
dialect 3
This is the newer and supported syntax, introduced in Interbase 6.0.
The SQLAlchemy Firebird dialect detects these versions and
adjusts its representation of SQL accordingly. However,
support for dialect 1 is not well tested and probably has
incompatibilities.
Locking Behavior
----------------
Firebird locks tables aggressively. For this reason, a DROP TABLE may
hang until other transactions are released. SQLAlchemy does its best
to release transactions as quickly as possible. The most common cause
of hanging transactions is a non-fully consumed result set, i.e.::
result = engine.execute("select * from table")
row = result.fetchone()
return
Where above, the ``ResultProxy`` has not been fully consumed. The
connection will be returned to the pool and the transactional state
rolled back once the Python garbage collector reclaims the objects
which hold onto the connection, which often occurs asynchronously.
The above use case can be alleviated by calling ``first()`` on the
``ResultProxy`` which will fetch the first row and immediately close
all remaining cursor/connection resources.
RETURNING support
-----------------
Firebird 2.0 supports returning a result set from inserts, and 2.1
extends that to deletes and updates. This is generically exposed by
the SQLAlchemy ``returning()`` method, such as::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\
values(name='foo')
print(result.fetchall())
# UPDATE..RETURNING
raises = empl.update().returning(empl.c.id, empl.c.salary).\
where(empl.c.sales>100).\
values(dict(salary=empl.c.salary * 1.1))
print(raises.fetchall())
.. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html
"""
import datetime
from sqlalchemy import exc
from sqlalchemy import sql
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.engine import default
from sqlalchemy.engine import reflection
from sqlalchemy.sql import compiler
from sqlalchemy.sql import expression
from sqlalchemy.types import BIGINT
from sqlalchemy.types import BLOB
from sqlalchemy.types import DATE
from sqlalchemy.types import FLOAT
from sqlalchemy.types import INTEGER
from sqlalchemy.types import Integer
from sqlalchemy.types import NUMERIC
from sqlalchemy.types import SMALLINT
from sqlalchemy.types import TEXT
from sqlalchemy.types import TIME
from sqlalchemy.types import TIMESTAMP
RESERVED_WORDS = set(
[
"active",
"add",
"admin",
"after",
"all",
"alter",
"and",
"any",
"as",
"asc",
"ascending",
"at",
"auto",
"avg",
"before",
"begin",
"between",
"bigint",
"bit_length",
"blob",
"both",
"by",
"case",
"cast",
"char",
"character",
"character_length",
"char_length",
"check",
"close",
"collate",
"column",
"commit",
"committed",
"computed",
"conditional",
"connect",
"constraint",
"containing",
"count",
"create",
"cross",
"cstring",
"current",
"current_connection",
"current_date",
"current_role",
"current_time",
"current_timestamp",
"current_transaction",
"current_user",
"cursor",
"database",
"date",
"day",
"dec",
"decimal",
"declare",
"default",
"delete",
"desc",
"descending",
"disconnect",
"distinct",
"do",
"domain",
"double",
"drop",
"else",
"end",
"entry_point",
"escape",
"exception",
"execute",
"exists",
"exit",
"external",
"extract",
"fetch",
"file",
"filter",
"float",
"for",
"foreign",
"from",
"full",
"function",
"gdscode",
"generator",
"gen_id",
"global",
"grant",
"group",
"having",
"hour",
"if",
"in",
"inactive",
"index",
"inner",
"input_type",
"insensitive",
"insert",
"int",
"integer",
"into",
"is",
"isolation",
"join",
"key",
"leading",
"left",
"length",
"level",
"like",
"long",
"lower",
"manual",
"max",
"maximum_segment",
"merge",
"min",
"minute",
"module_name",
"month",
"names",
"national",
"natural",
"nchar",
"no",
"not",
"null",
"numeric",
"octet_length",
"of",
"on",
"only",
"open",
"option",
"or",
"order",
"outer",
"output_type",
"overflow",
"page",
"pages",
"page_size",
"parameter",
"password",
"plan",
"position",
"post_event",
"precision",
"primary",
"privileges",
"procedure",
"protected",
"rdb$db_key",
"read",
"real",
"record_version",
"recreate",
"recursive",
"references",
"release",
"reserv",
"reserving",
"retain",
"returning_values",
"returns",
"revoke",
"right",
"rollback",
"rows",
"row_count",
"savepoint",
"schema",
"second",
"segment",
"select",
"sensitive",
"set",
"shadow",
"shared",
"singular",
"size",
"smallint",
"snapshot",
"some",
"sort",
"sqlcode",
"stability",
"start",
"starting",
"starts",
"statistics",
"sub_type",
"sum",
"suspend",
"table",
"then",
"time",
"timestamp",
"to",
"trailing",
"transaction",
"trigger",
"trim",
"uncommitted",
"union",
"unique",
"update",
"upper",
"user",
"using",
"value",
"values",
"varchar",
"variable",
"varying",
"view",
"wait",
"when",
"where",
"while",
"with",
"work",
"write",
"year",
]
)
class _StringType(sqltypes.String):
"""Base for Firebird string types."""
def __init__(self, charset=None, **kw):
self.charset = charset
super(_StringType, self).__init__(**kw)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""Firebird VARCHAR type"""
__visit_name__ = "VARCHAR"
def __init__(self, length=None, **kwargs):
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""Firebird CHAR type"""
__visit_name__ = "CHAR"
def __init__(self, length=None, **kwargs):
super(CHAR, self).__init__(length=length, **kwargs)
class _FBDateTime(sqltypes.DateTime):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
colspecs = {sqltypes.DateTime: _FBDateTime}
ischema_names = {
"SHORT": SMALLINT,
"LONG": INTEGER,
"QUAD": FLOAT,
"FLOAT": FLOAT,
"DATE": DATE,
"TIME": TIME,
"TEXT": TEXT,
"INT64": BIGINT,
"DOUBLE": FLOAT,
"TIMESTAMP": TIMESTAMP,
"VARYING": VARCHAR,
"CSTRING": CHAR,
"BLOB": BLOB,
}
# TODO: date conversion types (should be implemented as _FBDateTime,
# _FBDate, etc. as bind/result functionality is required)
class FBTypeCompiler(compiler.GenericTypeCompiler):
def visit_boolean(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_TIMESTAMP(type_, **kw)
def visit_TEXT(self, type_, **kw):
return "BLOB SUB_TYPE 1"
def visit_BLOB(self, type_, **kw):
return "BLOB SUB_TYPE 0"
def _extend_string(self, type_, basic):
charset = getattr(type_, "charset", None)
if charset is None:
return basic
else:
return "%s CHARACTER SET %s" % (basic, charset)
def visit_CHAR(self, type_, **kw):
basic = super(FBTypeCompiler, self).visit_CHAR(type_, **kw)
return self._extend_string(type_, basic)
def visit_VARCHAR(self, type_, **kw):
if not type_.length:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" % self.dialect.name
)
basic = super(FBTypeCompiler, self).visit_VARCHAR(type_, **kw)
return self._extend_string(type_, basic)
class FBCompiler(sql.compiler.SQLCompiler):
"""Firebird specific idiosyncrasies"""
ansi_bind_rules = True
# def visit_contains_op_binary(self, binary, operator, **kw):
# cant use CONTAINING b.c. it's case insensitive.
# def visit_notcontains_op_binary(self, binary, operator, **kw):
# cant use NOT CONTAINING b.c. it's case insensitive.
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_startswith_op_binary(self, binary, operator, **kw):
return "%s STARTING WITH %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
)
def visit_notstartswith_op_binary(self, binary, operator, **kw):
return "%s NOT STARTING WITH %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
)
def visit_mod_binary(self, binary, operator, **kw):
return "mod(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_alias(self, alias, asfrom=False, **kwargs):
if self.dialect._version_two:
return super(FBCompiler, self).visit_alias(
alias, asfrom=asfrom, **kwargs
)
else:
# Override to not use the AS keyword which FB 1.5 does not like
if asfrom:
alias_name = (
isinstance(alias.name, expression._truncated_label)
and self._truncated_identifier("alias", alias.name)
or alias.name
)
return (
self.process(alias.original, asfrom=asfrom, **kwargs)
+ " "
+ self.preparer.format_alias(alias, alias_name)
)
else:
return self.process(alias.original, **kwargs)
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0])
start = self.process(func.clauses.clauses[1])
if len(func.clauses.clauses) > 2:
length = self.process(func.clauses.clauses[2])
return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
else:
return "SUBSTRING(%s FROM %s)" % (s, start)
def visit_length_func(self, function, **kw):
if self.dialect._version_two:
return "char_length" + self.function_argspec(function)
else:
return "strlen" + self.function_argspec(function)
visit_char_length_func = visit_length_func
def function_argspec(self, func, **kw):
# TODO: this probably will need to be
# narrowed to a fixed list, some no-arg functions
# may require parens - see similar example in the oracle
# dialect
if func.clauses is not None and len(func.clauses):
return self.process(func.clause_expr, **kw)
else:
return ""
def default_from(self):
return " FROM rdb$database"
def visit_sequence(self, seq, **kw):
return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list Firebird puts the limit and offset right
after the ``SELECT``...
"""
result = ""
if select._limit_clause is not None:
result += "FIRST %s " % self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
result += "SKIP %s " % self.process(select._offset_clause, **kw)
if select._distinct:
result += "DISTINCT "
return result
def limit_clause(self, select, **kw):
"""Already taken care of in the `get_select_precolumns` method."""
return ""
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in expression._select_iterables(returning_cols)
]
return "RETURNING " + ", ".join(columns)
class FBDDLCompiler(sql.compiler.DDLCompiler):
"""Firebird syntactic idiosyncrasies"""
def visit_create_sequence(self, create):
"""Generate a ``CREATE GENERATOR`` statement for the sequence."""
# no syntax for these
# http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html
if create.element.start is not None:
raise NotImplementedError(
"Firebird SEQUENCE doesn't support START WITH"
)
if create.element.increment is not None:
raise NotImplementedError(
"Firebird SEQUENCE doesn't support INCREMENT BY"
)
if self.dialect._version_two:
return "CREATE SEQUENCE %s" % self.preparer.format_sequence(
create.element
)
else:
return "CREATE GENERATOR %s" % self.preparer.format_sequence(
create.element
)
def visit_drop_sequence(self, drop):
"""Generate a ``DROP GENERATOR`` statement for the sequence."""
if self.dialect._version_two:
return "DROP SEQUENCE %s" % self.preparer.format_sequence(
drop.element
)
else:
return "DROP GENERATOR %s" % self.preparer.format_sequence(
drop.element
)
def visit_computed_column(self, generated):
if generated.persisted is not None:
raise exc.CompileError(
"Firebird computed columns do not support a persistence "
"method setting; set the 'persisted' flag to None for "
"Firebird support."
)
return "GENERATED ALWAYS AS (%s)" % self.sql_compiler.process(
generated.sqltext, include_table=False, literal_binds=True
)
class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
"""Install Firebird specific reserved words."""
reserved_words = RESERVED_WORDS
illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union(
["_"]
)
def __init__(self, dialect):
super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)
class FBExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
"""Get the next value from the sequence using ``gen_id()``."""
return self._execute_scalar(
"SELECT gen_id(%s, 1) FROM rdb$database"
% self.dialect.identifier_preparer.format_sequence(seq),
type_,
)
class FBDialect(default.DefaultDialect):
"""Firebird dialect"""
name = "firebird"
max_identifier_length = 31
supports_sequences = True
sequences_optional = False
supports_default_values = True
postfetch_lastrowid = False
supports_native_boolean = False
requires_name_normalize = True
supports_empty_insert = False
statement_compiler = FBCompiler
ddl_compiler = FBDDLCompiler
preparer = FBIdentifierPreparer
type_compiler = FBTypeCompiler
execution_ctx_cls = FBExecutionContext
colspecs = colspecs
ischema_names = ischema_names
construct_arguments = []
# defaults to dialect ver. 3,
# will be autodetected off upon
# first connect
_version_two = True
def initialize(self, connection):
super(FBDialect, self).initialize(connection)
self._version_two = (
"firebird" in self.server_version_info
and self.server_version_info >= (2,)
) or (
"interbase" in self.server_version_info
and self.server_version_info >= (6,)
)
if not self._version_two:
# TODO: whatever other pre < 2.0 stuff goes here
self.ischema_names = ischema_names.copy()
self.ischema_names["TIMESTAMP"] = sqltypes.DATE
self.colspecs = {sqltypes.DateTime: sqltypes.DATE}
self.implicit_returning = self._version_two and self.__dict__.get(
"implicit_returning", True
)
def has_table(self, connection, table_name, schema=None):
"""Return ``True`` if the given table exists, ignoring
the `schema`."""
tblqry = """
SELECT 1 AS has_table FROM rdb$database
WHERE EXISTS (SELECT rdb$relation_name
FROM rdb$relations
WHERE rdb$relation_name=?)
"""
c = connection.execute(tblqry, [self.denormalize_name(table_name)])
return c.first() is not None
def has_sequence(self, connection, sequence_name, schema=None):
"""Return ``True`` if the given sequence (generator) exists."""
genqry = """
SELECT 1 AS has_sequence FROM rdb$database
WHERE EXISTS (SELECT rdb$generator_name
FROM rdb$generators
WHERE rdb$generator_name=?)
"""
c = connection.execute(genqry, [self.denormalize_name(sequence_name)])
return c.first() is not None
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
# there are two queries commonly mentioned for this.
# this one, using view_blr, is at the Firebird FAQ among other places:
# http://www.firebirdfaq.org/faq174/
s = """
select rdb$relation_name
from rdb$relations
where rdb$view_blr is null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
# the other query is this one. It's not clear if there's really
# any difference between these two. This link:
# http://www.alberton.info/firebird_sql_meta_info.html#.Ur3vXfZGni8
# states them as interchangeable. Some discussion at [ticket:2898]
# SELECT DISTINCT rdb$relation_name
# FROM rdb$relation_fields
# WHERE rdb$system_flag=0 AND rdb$view_context IS NULL
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
# see http://www.firebirdfaq.org/faq174/
s = """
select rdb$relation_name
from rdb$relations
where rdb$view_blr is not null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
qry = """
SELECT rdb$view_source AS view_source
FROM rdb$relations
WHERE rdb$relation_name=?
"""
rp = connection.execute(qry, [self.denormalize_name(view_name)])
row = rp.first()
if row:
return row["view_source"]
else:
return None
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
# Query to extract the PK/FK constrained fields of the given table
keyqry = """
SELECT se.rdb$field_name AS fname
FROM rdb$relation_constraints rc
JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
"""
tablename = self.denormalize_name(table_name)
# get primary key fields
c = connection.execute(keyqry, ["PRIMARY KEY", tablename])
pkfields = [self.normalize_name(r["fname"]) for r in c.fetchall()]
return {"constrained_columns": pkfields, "name": None}
@reflection.cache
def get_column_sequence(
self, connection, table_name, column_name, schema=None, **kw
):
tablename = self.denormalize_name(table_name)
colname = self.denormalize_name(column_name)
# Heuristic-query to determine the generator associated to a PK field
genqry = """
SELECT trigdep.rdb$depended_on_name AS fgenerator
FROM rdb$dependencies tabdep
JOIN rdb$dependencies trigdep
ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name
AND trigdep.rdb$depended_on_type=14
AND trigdep.rdb$dependent_type=2
JOIN rdb$triggers trig ON
trig.rdb$trigger_name=tabdep.rdb$dependent_name
WHERE tabdep.rdb$depended_on_name=?
AND tabdep.rdb$depended_on_type=0
AND trig.rdb$trigger_type=1
AND tabdep.rdb$field_name=?
AND (SELECT count(*)
FROM rdb$dependencies trigdep2
WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2
"""
genr = connection.execute(genqry, [tablename, colname]).first()
if genr is not None:
return dict(name=self.normalize_name(genr["fgenerator"]))
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
# Query to extract the details of all the fields of the given table
tblqry = """
SELECT r.rdb$field_name AS fname,
r.rdb$null_flag AS null_flag,
t.rdb$type_name AS ftype,
f.rdb$field_sub_type AS stype,
f.rdb$field_length/
COALESCE(cs.rdb$bytes_per_character,1) AS flen,
f.rdb$field_precision AS fprec,
f.rdb$field_scale AS fscale,
COALESCE(r.rdb$default_source,
f.rdb$default_source) AS fdefault
FROM rdb$relation_fields r
JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
JOIN rdb$types t
ON t.rdb$type=f.rdb$field_type AND
t.rdb$field_name='RDB$FIELD_TYPE'
LEFT JOIN rdb$character_sets cs ON
f.rdb$character_set_id=cs.rdb$character_set_id
WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
ORDER BY r.rdb$field_position
"""
# get the PK, used to determine the eventual associated sequence
pk_constraint = self.get_pk_constraint(connection, table_name)
pkey_cols = pk_constraint["constrained_columns"]
tablename = self.denormalize_name(table_name)
# get all of the fields for this table
c = connection.execute(tblqry, [tablename])
cols = []
while True:
row = c.fetchone()
if row is None:
break
name = self.normalize_name(row["fname"])
orig_colname = row["fname"]
# get the data type
colspec = row["ftype"].rstrip()
coltype = self.ischema_names.get(colspec)
if coltype is None:
util.warn(
"Did not recognize type '%s' of column '%s'"
% (colspec, name)
)
coltype = sqltypes.NULLTYPE
elif issubclass(coltype, Integer) and row["fprec"] != 0:
coltype = NUMERIC(
precision=row["fprec"], scale=row["fscale"] * -1
)
elif colspec in ("VARYING", "CSTRING"):
coltype = coltype(row["flen"])
elif colspec == "TEXT":
coltype = TEXT(row["flen"])
elif colspec == "BLOB":
if row["stype"] == 1:
coltype = TEXT()
else:
coltype = BLOB()
else:
coltype = coltype()
# does it have a default value?
defvalue = None
if row["fdefault"] is not None:
# the value comes down as "DEFAULT 'value'": there may be
# more than one whitespace around the "DEFAULT" keyword
# and it may also be lower case
# (see also http://tracker.firebirdsql.org/browse/CORE-356)
defexpr = row["fdefault"].lstrip()
assert defexpr[:8].rstrip().upper() == "DEFAULT", (
"Unrecognized default value: %s" % defexpr
)
defvalue = defexpr[8:].strip()
if defvalue == "NULL":
# Redundant
defvalue = None
col_d = {
"name": name,
"type": coltype,
"nullable": not bool(row["null_flag"]),
"default": defvalue,
"autoincrement": "auto",
}
if orig_colname.lower() == orig_colname:
col_d["quote"] = True
# if the PK is a single field, try to see if its linked to
# a sequence thru a trigger
if len(pkey_cols) == 1 and name == pkey_cols[0]:
seq_d = self.get_column_sequence(connection, tablename, name)
if seq_d is not None:
col_d["sequence"] = seq_d
cols.append(col_d)
return cols
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# Query to extract the details of each UK/FK of the given table
fkqry = """
SELECT rc.rdb$constraint_name AS cname,
cse.rdb$field_name AS fname,
ix2.rdb$relation_name AS targetrname,
se.rdb$field_name AS targetfname
FROM rdb$relation_constraints rc
JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
JOIN rdb$index_segments cse ON
cse.rdb$index_name=ix1.rdb$index_name
JOIN rdb$index_segments se
ON se.rdb$index_name=ix2.rdb$index_name
AND se.rdb$field_position=cse.rdb$field_position
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
ORDER BY se.rdb$index_name, se.rdb$field_position
"""
tablename = self.denormalize_name(table_name)
c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
fks = util.defaultdict(
lambda: {
"name": None,
"constrained_columns": [],
"referred_schema": None,
"referred_table": None,
"referred_columns": [],
}
)
for row in c:
cname = self.normalize_name(row["cname"])
fk = fks[cname]
if not fk["name"]:
fk["name"] = cname
fk["referred_table"] = self.normalize_name(row["targetrname"])
fk["constrained_columns"].append(self.normalize_name(row["fname"]))
fk["referred_columns"].append(
self.normalize_name(row["targetfname"])
)
return list(fks.values())
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
qry = """
SELECT ix.rdb$index_name AS index_name,
ix.rdb$unique_flag AS unique_flag,
ic.rdb$field_name AS field_name
FROM rdb$indices ix
JOIN rdb$index_segments ic
ON ix.rdb$index_name=ic.rdb$index_name
LEFT OUTER JOIN rdb$relation_constraints
ON rdb$relation_constraints.rdb$index_name =
ic.rdb$index_name
WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL
AND rdb$relation_constraints.rdb$constraint_type IS NULL
ORDER BY index_name, ic.rdb$field_position
"""
c = connection.execute(qry, [self.denormalize_name(table_name)])
indexes = util.defaultdict(dict)
for row in c:
indexrec = indexes[row["index_name"]]
if "name" not in indexrec:
indexrec["name"] = self.normalize_name(row["index_name"])
indexrec["column_names"] = []
indexrec["unique"] = bool(row["unique_flag"])
indexrec["column_names"].append(
self.normalize_name(row["field_name"])
)
return list(indexes.values())
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/sync.py
|
# orm/sync.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used for copying data
between instances based on join conditions.
"""
from . import attributes
from . import exc
from . import util as orm_util
from .. import util
def populate(
source,
source_mapper,
dest,
dest_mapper,
synchronize_pairs,
uowcommit,
flag_cascaded_pks,
):
source_dict = source.dict
dest_dict = dest.dict
for l, r in synchronize_pairs:
try:
# inline of source_mapper._get_state_attr_by_column
prop = source_mapper._columntoproperty[l]
value = source.manager[prop.key].impl.get(
source, source_dict, attributes.PASSIVE_OFF
)
except exc.UnmappedColumnError as err:
_raise_col_to_prop(False, source_mapper, l, dest_mapper, r, err)
try:
# inline of dest_mapper._set_state_attr_by_column
prop = dest_mapper._columntoproperty[r]
dest.manager[prop.key].impl.set(dest, dest_dict, value, None)
except exc.UnmappedColumnError as err:
_raise_col_to_prop(True, source_mapper, l, dest_mapper, r, err)
# technically the "r.primary_key" check isn't
# needed here, but we check for this condition to limit
# how often this logic is invoked for memory/performance
# reasons, since we only need this info for a primary key
# destination.
if (
flag_cascaded_pks
and l.primary_key
and r.primary_key
and r.references(l)
):
uowcommit.attributes[("pk_cascaded", dest, r)] = True
def bulk_populate_inherit_keys(source_dict, source_mapper, synchronize_pairs):
# a simplified version of populate() used by bulk insert mode
for l, r in synchronize_pairs:
try:
prop = source_mapper._columntoproperty[l]
value = source_dict[prop.key]
except exc.UnmappedColumnError as err:
_raise_col_to_prop(False, source_mapper, l, source_mapper, r, err)
try:
prop = source_mapper._columntoproperty[r]
source_dict[prop.key] = value
except exc.UnmappedColumnError:
_raise_col_to_prop(True, source_mapper, l, source_mapper, r)
def clear(dest, dest_mapper, synchronize_pairs):
for l, r in synchronize_pairs:
if (
r.primary_key
and dest_mapper._get_state_attr_by_column(dest, dest.dict, r)
not in orm_util._none_set
):
raise AssertionError(
"Dependency rule tried to blank-out primary key "
"column '%s' on instance '%s'" % (r, orm_util.state_str(dest))
)
try:
dest_mapper._set_state_attr_by_column(dest, dest.dict, r, None)
except exc.UnmappedColumnError as err:
_raise_col_to_prop(True, None, l, dest_mapper, r, err)
def update(source, source_mapper, dest, old_prefix, synchronize_pairs):
for l, r in synchronize_pairs:
try:
oldvalue = source_mapper._get_committed_attr_by_column(
source.obj(), l
)
value = source_mapper._get_state_attr_by_column(
source, source.dict, l, passive=attributes.PASSIVE_OFF
)
except exc.UnmappedColumnError as err:
_raise_col_to_prop(False, source_mapper, l, None, r, err)
dest[r.key] = value
dest[old_prefix + r.key] = oldvalue
def populate_dict(source, source_mapper, dict_, synchronize_pairs):
for l, r in synchronize_pairs:
try:
value = source_mapper._get_state_attr_by_column(
source, source.dict, l, passive=attributes.PASSIVE_OFF
)
except exc.UnmappedColumnError as err:
_raise_col_to_prop(False, source_mapper, l, None, r, err)
dict_[r.key] = value
def source_modified(uowcommit, source, source_mapper, synchronize_pairs):
"""return true if the source object has changes from an old to a
new value on the given synchronize pairs
"""
for l, r in synchronize_pairs:
try:
prop = source_mapper._columntoproperty[l]
except exc.UnmappedColumnError as err:
_raise_col_to_prop(False, source_mapper, l, None, r, err)
history = uowcommit.get_attribute_history(
source, prop.key, attributes.PASSIVE_NO_INITIALIZE
)
if bool(history.deleted):
return True
else:
return False
def _raise_col_to_prop(
isdest, source_mapper, source_column, dest_mapper, dest_column, err
):
if isdest:
util.raise_(
exc.UnmappedColumnError(
"Can't execute sync rule for "
"destination column '%s'; mapper '%s' does not map "
"this column. Try using an explicit `foreign_keys` "
"collection which does not include this column (or use "
"a viewonly=True relation)." % (dest_column, dest_mapper)
),
replace_context=err,
)
else:
util.raise_(
exc.UnmappedColumnError(
"Can't execute sync rule for "
"source column '%s'; mapper '%s' does not map this "
"column. Try using an explicit `foreign_keys` "
"collection which does not include destination column "
"'%s' (or use a viewonly=True relation)."
% (source_column, source_mapper, dest_column)
),
replace_context=err,
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/interfaces.py
|
# orm/interfaces.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Contains various base classes used throughout the ORM.
Defines some key base classes prominent within the internals,
as well as the now-deprecated ORM extension classes.
Other than the deprecated extensions, this module and the
classes within are mostly private, though some attributes
are exposed when inspecting mappings.
"""
from __future__ import absolute_import
import collections
from . import exc as orm_exc
from . import path_registry
from .base import _MappedAttribute # noqa
from .base import EXT_CONTINUE
from .base import EXT_SKIP
from .base import EXT_STOP
from .base import InspectionAttr # noqa
from .base import InspectionAttrInfo # noqa
from .base import MANYTOMANY
from .base import MANYTOONE
from .base import NOT_EXTENSION
from .base import ONETOMANY
from .. import inspect
from .. import util
from ..sql import operators
# imported later
MapperExtension = SessionExtension = AttributeExtension = None
__all__ = (
"AttributeExtension",
"EXT_CONTINUE",
"EXT_STOP",
"EXT_SKIP",
"ONETOMANY",
"MANYTOMANY",
"MANYTOONE",
"NOT_EXTENSION",
"LoaderStrategy",
"MapperExtension",
"MapperOption",
"MapperProperty",
"PropComparator",
"SessionExtension",
"StrategizedProperty",
)
class MapperProperty(_MappedAttribute, InspectionAttr, util.MemoizedSlots):
"""Represent a particular class attribute mapped by :class:`_orm.Mapper`.
The most common occurrences of :class:`.MapperProperty` are the
mapped :class:`_schema.Column`, which is represented in a mapping as
an instance of :class:`.ColumnProperty`,
and a reference to another class produced by :func:`_orm.relationship`,
represented in the mapping as an instance of
:class:`.RelationshipProperty`.
"""
__slots__ = (
"_configure_started",
"_configure_finished",
"parent",
"key",
"info",
)
cascade = frozenset()
"""The set of 'cascade' attribute names.
This collection is checked before the 'cascade_iterator' method is called.
The collection typically only applies to a RelationshipProperty.
"""
is_property = True
"""Part of the InspectionAttr interface; states this object is a
mapper property.
"""
def _memoized_attr_info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.InspectionAttr`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`_orm.relationship`, or
:func:`.composite`
functions.
.. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
available on extension types via the
:attr:`.InspectionAttrInfo.info` attribute, so that it can apply
to a wider variety of ORM and extension constructs.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
def setup(self, context, query_entity, path, adapter, **kwargs):
"""Called by Query for the purposes of constructing a SQL statement.
Each MapperProperty associated with the target mapper processes the
statement referenced by the query context, adding columns and/or
criterion as appropriate.
"""
def create_row_processor(
self, context, path, mapper, result, adapter, populators
):
"""Produce row processing functions and append to the given
set of populators lists.
"""
def cascade_iterator(
self, type_, state, visited_instances=None, halt_on=None
):
"""Iterate through instances related to the given instance for
a particular 'cascade', starting with this MapperProperty.
Return an iterator3-tuples (instance, mapper, state).
Note that the 'cascade' collection on this MapperProperty is
checked first for the given type before cascade_iterator is called.
This method typically only applies to RelationshipProperty.
"""
return iter(())
def set_parent(self, parent, init):
"""Set the parent mapper that references this MapperProperty.
This method is overridden by some subclasses to perform extra
setup when the mapper is first known.
"""
self.parent = parent
def instrument_class(self, mapper):
"""Hook called by the Mapper to the property to initiate
instrumentation of the class attribute managed by this
MapperProperty.
The MapperProperty here will typically call out to the
attributes module to set up an InstrumentedAttribute.
This step is the first of two steps to set up an InstrumentedAttribute,
and is called early in the mapper setup process.
The second step is typically the init_class_attribute step,
called from StrategizedProperty via the post_instrument_class()
hook. This step assigns additional state to the InstrumentedAttribute
(specifically the "impl") which has been determined after the
MapperProperty has determined what kind of persistence
management it needs to do (e.g. scalar, object, collection, etc).
"""
def __init__(self):
self._configure_started = False
self._configure_finished = False
def init(self):
"""Called after all mappers are created to assemble
relationships between mappers and perform other post-mapper-creation
initialization steps.
"""
self._configure_started = True
self.do_init()
self._configure_finished = True
@property
def class_attribute(self):
"""Return the class-bound descriptor corresponding to this
:class:`.MapperProperty`.
This is basically a ``getattr()`` call::
return getattr(self.parent.class_, self.key)
I.e. if this :class:`.MapperProperty` were named ``addresses``,
and the class to which it is mapped is ``User``, this sequence
is possible::
>>> from sqlalchemy import inspect
>>> mapper = inspect(User)
>>> addresses_property = mapper.attrs.addresses
>>> addresses_property.class_attribute is User.addresses
True
>>> User.addresses.property is addresses_property
True
"""
return getattr(self.parent.class_, self.key)
def do_init(self):
"""Perform subclass-specific initialization post-mapper-creation
steps.
This is a template method called by the ``MapperProperty``
object's init() method.
"""
def post_instrument_class(self, mapper):
"""Perform instrumentation adjustments that need to occur
after init() has completed.
The given Mapper is the Mapper invoking the operation, which
may not be the same Mapper as self.parent in an inheritance
scenario; however, Mapper will always at least be a sub-mapper of
self.parent.
This method is typically used by StrategizedProperty, which delegates
it to LoaderStrategy.init_class_attribute() to perform final setup
on the class-bound InstrumentedAttribute.
"""
def merge(
self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load,
_recursive,
_resolve_conflict_map,
):
"""Merge the attribute represented by this ``MapperProperty``
from source to destination object.
"""
def __repr__(self):
return "<%s at 0x%x; %s>" % (
self.__class__.__name__,
id(self),
getattr(self, "key", "no key"),
)
class PropComparator(operators.ColumnOperators):
r"""Defines SQL operators for :class:`.MapperProperty` objects.
SQLAlchemy allows for operators to
be redefined at both the Core and ORM level. :class:`.PropComparator`
is the base class of operator redefinition for ORM-level operations,
including those of :class:`.ColumnProperty`,
:class:`.RelationshipProperty`, and :class:`.CompositeProperty`.
.. note:: With the advent of Hybrid properties introduced in SQLAlchemy
0.7, as well as Core-level operator redefinition in
SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator`
instances is extremely rare. See :ref:`hybrids_toplevel` as well
as :ref:`types_operators`.
User-defined subclasses of :class:`.PropComparator` may be created. The
built-in Python comparison and math operator methods, such as
:meth:`.operators.ColumnOperators.__eq__`,
:meth:`.operators.ColumnOperators.__lt__`, and
:meth:`.operators.ColumnOperators.__add__`, can be overridden to provide
new operator behavior. The custom :class:`.PropComparator` is passed to
the :class:`.MapperProperty` instance via the ``comparator_factory``
argument. In each case,
the appropriate subclass of :class:`.PropComparator` should be used::
# definition of custom PropComparator subclasses
from sqlalchemy.orm.properties import \
ColumnProperty,\
CompositeProperty,\
RelationshipProperty
class MyColumnComparator(ColumnProperty.Comparator):
def __eq__(self, other):
return self.__clause_element__() == other
class MyRelationshipComparator(RelationshipProperty.Comparator):
def any(self, expression):
"define the 'any' operation"
# ...
class MyCompositeComparator(CompositeProperty.Comparator):
def __gt__(self, other):
"redefine the 'greater than' operation"
return sql.and_(*[a>b for a, b in
zip(self.__clause_element__().clauses,
other.__composite_values__())])
# application of custom PropComparator subclasses
from sqlalchemy.orm import column_property, relationship, composite
from sqlalchemy import Column, String
class SomeMappedClass(Base):
some_column = column_property(Column("some_column", String),
comparator_factory=MyColumnComparator)
some_relationship = relationship(SomeOtherClass,
comparator_factory=MyRelationshipComparator)
some_composite = composite(
Column("a", String), Column("b", String),
comparator_factory=MyCompositeComparator
)
Note that for column-level operator redefinition, it's usually
simpler to define the operators at the Core level, using the
:attr:`.TypeEngine.comparator_factory` attribute. See
:ref:`types_operators` for more detail.
.. seealso::
:class:`.ColumnProperty.Comparator`
:class:`.RelationshipProperty.Comparator`
:class:`.CompositeProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__slots__ = "prop", "property", "_parententity", "_adapt_to_entity"
def __init__(self, prop, parentmapper, adapt_to_entity=None):
self.prop = self.property = prop
self._parententity = adapt_to_entity or parentmapper
self._adapt_to_entity = adapt_to_entity
def __clause_element__(self):
raise NotImplementedError("%r" % self)
def _query_clause_element(self):
return self.__clause_element__()
def _bulk_update_tuples(self, value):
return [(self.__clause_element__(), value)]
def adapt_to_entity(self, adapt_to_entity):
"""Return a copy of this PropComparator which will use the given
:class:`.AliasedInsp` to produce corresponding expressions.
"""
return self.__class__(self.prop, self._parententity, adapt_to_entity)
@property
def _parentmapper(self):
"""legacy; this is renamed to _parententity to be
compatible with QueryableAttribute."""
return inspect(self._parententity).mapper
@property
def adapter(self):
"""Produce a callable that adapts column expressions
to suit an aliased version of this comparator.
"""
if self._adapt_to_entity is None:
return None
else:
return self._adapt_to_entity._adapt_element
@property
def info(self):
return self.property.info
@staticmethod
def any_op(a, b, **kwargs):
return a.any(b, **kwargs)
@staticmethod
def has_op(a, b, **kwargs):
return a.has(b, **kwargs)
@staticmethod
def of_type_op(a, class_):
return a.of_type(class_)
def of_type(self, class_):
r"""Redefine this object in terms of a polymorphic subclass,
:func:`.with_polymorphic` construct, or :func:`.aliased` construct.
Returns a new PropComparator from which further criterion can be
evaluated.
e.g.::
query.join(Company.employees.of_type(Engineer)).\
filter(Engineer.name=='foo')
:param \class_: a class or mapper indicating that criterion will be
against this specific subclass.
.. seealso::
:ref:`inheritance_of_type`
"""
return self.operate(PropComparator.of_type_op, class_)
def any(self, criterion=None, **kwargs):
r"""Return true if this collection contains any member that meets the
given criterion.
The usual implementation of ``any()`` is
:meth:`.RelationshipProperty.Comparator.any`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.any_op, criterion, **kwargs)
def has(self, criterion=None, **kwargs):
r"""Return true if this element references a member which meets the
given criterion.
The usual implementation of ``has()`` is
:meth:`.RelationshipProperty.Comparator.has`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.has_op, criterion, **kwargs)
class StrategizedProperty(MapperProperty):
"""A MapperProperty which uses selectable strategies to affect
loading behavior.
There is a single strategy selected by default. Alternate
strategies can be selected at Query time through the usage of
``StrategizedOption`` objects via the Query.options() method.
The mechanics of StrategizedProperty are used for every Query
invocation for every mapped attribute participating in that Query,
to determine first how the attribute will be rendered in SQL
and secondly how the attribute will retrieve a value from a result
row and apply it to a mapped object. The routines here are very
performance-critical.
"""
__slots__ = (
"_strategies",
"strategy",
"_wildcard_token",
"_default_path_loader_key",
)
strategy_wildcard_key = None
def _memoized_attr__wildcard_token(self):
return (
"%s:%s"
% (self.strategy_wildcard_key, path_registry._WILDCARD_TOKEN),
)
def _memoized_attr__default_path_loader_key(self):
return (
"loader",
(
"%s:%s"
% (self.strategy_wildcard_key, path_registry._DEFAULT_TOKEN),
),
)
def _get_context_loader(self, context, path):
load = None
search_path = path[self]
# search among: exact match, "attr.*", "default" strategy
# if any.
for path_key in (
search_path._loader_key,
search_path._wildcard_path_loader_key,
search_path._default_path_loader_key,
):
if path_key in context.attributes:
load = context.attributes[path_key]
break
return load
def _get_strategy(self, key):
try:
return self._strategies[key]
except KeyError:
pass
# run outside to prevent transfer of exception context
cls = self._strategy_lookup(self, *key)
self._strategies[key] = self._strategies[cls] = strategy = cls(
self, key
)
return strategy
def setup(self, context, query_entity, path, adapter, **kwargs):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.setup_query(
context, query_entity, path, loader, adapter, **kwargs
)
def create_row_processor(
self, context, path, mapper, result, adapter, populators
):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.create_row_processor(
context, path, loader, mapper, result, adapter, populators
)
def do_init(self):
self._strategies = {}
self.strategy = self._get_strategy(self.strategy_key)
def post_instrument_class(self, mapper):
if (
not self.parent.non_primary
and not mapper.class_manager._attr_has_impl(self.key)
):
self.strategy.init_class_attribute(mapper)
_all_strategies = collections.defaultdict(dict)
@classmethod
def strategy_for(cls, **kw):
def decorate(dec_cls):
# ensure each subclass of the strategy has its
# own _strategy_keys collection
if "_strategy_keys" not in dec_cls.__dict__:
dec_cls._strategy_keys = []
key = tuple(sorted(kw.items()))
cls._all_strategies[cls][key] = dec_cls
dec_cls._strategy_keys.append(key)
return dec_cls
return decorate
@classmethod
def _strategy_lookup(cls, requesting_property, *key):
for prop_cls in cls.__mro__:
if prop_cls in cls._all_strategies:
strategies = cls._all_strategies[prop_cls]
try:
return strategies[key]
except KeyError:
pass
for property_type, strats in cls._all_strategies.items():
if key in strats:
intended_property_type = property_type
actual_strategy = strats[key]
break
else:
intended_property_type = None
actual_strategy = None
raise orm_exc.LoaderStrategyException(
cls,
requesting_property,
intended_property_type,
actual_strategy,
key,
)
class MapperOption(object):
"""Describe a modification to a Query."""
propagate_to_loaders = False
"""if True, indicate this option should be carried along
to "secondary" Query objects produced during lazy loads
or refresh operations.
"""
def process_query(self, query):
"""Apply a modification to the given :class:`_query.Query`."""
def process_query_conditionally(self, query):
"""same as process_query(), except that this option may not
apply to the given query.
This is typically used during a lazy load or scalar refresh
operation to propagate options stated in the original Query to the
new Query being used for the load. It occurs for those options that
specify propagate_to_loaders=True.
"""
self.process_query(query)
def _generate_cache_key(self, path):
"""Used by the "baked lazy loader" to see if this option can be cached.
The "baked lazy loader" refers to the :class:`_query.Query` that is
produced during a lazy load operation for a mapped relationship.
It does not yet apply to the "lazy" load operation for deferred
or expired column attributes, however this may change in the future.
This loader generates SQL for a query only once and attempts to cache
it; from that point on, if the SQL has been cached it will no longer
run the :meth:`_query.Query.options` method of the
:class:`_query.Query`. The
:class:`.MapperOption` object that wishes to participate within a lazy
load operation therefore needs to tell the baked loader that it either
needs to forego this caching, or that it needs to include the state of
the :class:`.MapperOption` itself as part of its cache key, otherwise
SQL or other query state that has been affected by the
:class:`.MapperOption` may be cached in place of a query that does not
include these modifications, or the option may not be invoked at all.
By default, this method returns the value ``False``, which means
the :class:`.BakedQuery` generated by the lazy loader will
not cache the SQL when this :class:`.MapperOption` is present.
This is the safest option and ensures both that the option is
invoked every time, and also that the cache isn't filled up with
an unlimited number of :class:`_query.Query` objects for an unlimited
number of :class:`.MapperOption` objects.
.. versionchanged:: 1.2.8 the default return value of
:meth:`.MapperOption._generate_cache_key` is False; previously it
was ``None`` indicating "safe to cache, don't include as part of
the cache key"
To enable caching of :class:`_query.Query` objects within lazy loaders
, a
given :class:`.MapperOption` that returns a cache key must return a key
that uniquely identifies the complete state of this option, which will
match any other :class:`.MapperOption` that itself retains the
identical state. This includes path options, flags, etc. It should
be a state that is repeatable and part of a limited set of possible
options.
If the :class:`.MapperOption` does not apply to the given path and
would not affect query results on such a path, it should return None,
indicating the :class:`_query.Query` is safe to cache for this given
loader path and that this :class:`.MapperOption` need not be
part of the cache key.
"""
return False
class LoaderStrategy(object):
"""Describe the loading behavior of a StrategizedProperty object.
The ``LoaderStrategy`` interacts with the querying process in three
ways:
* it controls the configuration of the ``InstrumentedAttribute``
placed on a class to handle the behavior of the attribute. this
may involve setting up class-level callable functions to fire
off a select operation when the attribute is first accessed
(i.e. a lazy load)
* it processes the ``QueryContext`` at statement construction time,
where it can modify the SQL statement that is being produced.
For example, simple column attributes will add their represented
column to the list of selected columns, a joined eager loader
may establish join clauses to add to the statement.
* It produces "row processor" functions at result fetching time.
These "row processor" functions populate a particular attribute
on a particular mapped instance.
"""
__slots__ = (
"parent_property",
"is_class_level",
"parent",
"key",
"strategy_key",
"strategy_opts",
)
def __init__(self, parent, strategy_key):
self.parent_property = parent
self.is_class_level = False
self.parent = self.parent_property.parent
self.key = self.parent_property.key
self.strategy_key = strategy_key
self.strategy_opts = dict(strategy_key)
def init_class_attribute(self, mapper):
pass
def setup_query(
self, context, query_entity, path, loadopt, adapter, **kwargs
):
"""Establish column and other state for a given QueryContext.
This method fulfills the contract specified by MapperProperty.setup().
StrategizedProperty delegates its setup() method
directly to this method.
"""
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
"""Establish row processing functions for a given QueryContext.
This method fulfills the contract specified by
MapperProperty.create_row_processor().
StrategizedProperty delegates its create_row_processor() method
directly to this method.
"""
def __str__(self):
return str(self.parent_property)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/instrumentation.py
|
# orm/instrumentation.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines SQLAlchemy's system of class instrumentation.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
instrumentation.py deals with registration of end-user classes
for state tracking. It interacts closely with state.py
and attributes.py which establish per-instance and per-class-attribute
instrumentation, respectively.
The class instrumentation system can be customized on a per-class
or global basis using the :mod:`sqlalchemy.ext.instrumentation`
module, which provides the means to build and specify
alternate instrumentation forms.
.. versionchanged: 0.8
The instrumentation extension system was moved out of the
ORM and into the external :mod:`sqlalchemy.ext.instrumentation`
package. When that package is imported, it installs
itself within sqlalchemy.orm so that its more comprehensive
resolution mechanics take effect.
"""
from . import base
from . import collections
from . import exc
from . import interfaces
from . import state
from .. import util
_memoized_key_collection = util.group_expirable_memoized_property()
class ClassManager(dict):
"""tracks state information at the class level."""
MANAGER_ATTR = base.DEFAULT_MANAGER_ATTR
STATE_ATTR = base.DEFAULT_STATE_ATTR
_state_setter = staticmethod(util.attrsetter(STATE_ATTR))
deferred_scalar_loader = None
original_init = object.__init__
factory = None
def __init__(self, class_):
self.class_ = class_
self.info = {}
self.new_init = None
self.local_attrs = {}
self.originals = {}
self._bases = [
mgr
for mgr in [
manager_of_class(base)
for base in self.class_.__bases__
if isinstance(base, type)
]
if mgr is not None
]
for base_ in self._bases:
self.update(base_)
self.dispatch._events._new_classmanager_instance(class_, self)
# events._InstanceEventsHold.populate(class_, self)
for basecls in class_.__mro__:
mgr = manager_of_class(basecls)
if mgr is not None:
self.dispatch._update(mgr.dispatch)
self.manage()
self._instrument_init()
if "__del__" in class_.__dict__:
util.warn(
"__del__() method on class %s will "
"cause unreachable cycles and memory leaks, "
"as SQLAlchemy instrumentation often creates "
"reference cycles. Please remove this method." % class_
)
def __hash__(self):
return id(self)
def __eq__(self, other):
return other is self
@property
def is_mapped(self):
return "mapper" in self.__dict__
@_memoized_key_collection
def _all_key_set(self):
return frozenset(self)
@_memoized_key_collection
def _collection_impl_keys(self):
return frozenset(
[attr.key for attr in self.values() if attr.impl.collection]
)
@_memoized_key_collection
def _scalar_loader_impls(self):
return frozenset(
[
attr.impl
for attr in self.values()
if attr.impl.accepts_scalar_loader
]
)
@util.memoized_property
def mapper(self):
# raises unless self.mapper has been assigned
raise exc.UnmappedClassError(self.class_)
def _all_sqla_attributes(self, exclude=None):
"""return an iterator of all classbound attributes that are
implement :class:`.InspectionAttr`.
This includes :class:`.QueryableAttribute` as well as extension
types such as :class:`.hybrid_property` and
:class:`.AssociationProxy`.
"""
if exclude is None:
exclude = set()
for supercls in self.class_.__mro__:
for key in set(supercls.__dict__).difference(exclude):
exclude.add(key)
val = supercls.__dict__[key]
if (
isinstance(val, interfaces.InspectionAttr)
and val.is_attribute
):
yield key, val
def _get_class_attr_mro(self, key, default=None):
"""return an attribute on the class without tripping it."""
for supercls in self.class_.__mro__:
if key in supercls.__dict__:
return supercls.__dict__[key]
else:
return default
def _attr_has_impl(self, key):
"""Return True if the given attribute is fully initialized.
i.e. has an impl.
"""
return key in self and self[key].impl is not None
def _subclass_manager(self, cls):
"""Create a new ClassManager for a subclass of this ClassManager's
class.
This is called automatically when attributes are instrumented so that
the attributes can be propagated to subclasses against their own
class-local manager, without the need for mappers etc. to have already
pre-configured managers for the full class hierarchy. Mappers
can post-configure the auto-generated ClassManager when needed.
"""
manager = manager_of_class(cls)
if manager is None:
manager = _instrumentation_factory.create_manager_for_cls(cls)
return manager
def _instrument_init(self):
# TODO: self.class_.__init__ is often the already-instrumented
# __init__ from an instrumented superclass. We still need to make
# our own wrapper, but it would
# be nice to wrap the original __init__ and not our existing wrapper
# of such, since this adds method overhead.
self.original_init = self.class_.__init__
self.new_init = _generate_init(self.class_, self)
self.install_member("__init__", self.new_init)
def _uninstrument_init(self):
if self.new_init:
self.uninstall_member("__init__")
self.new_init = None
@util.memoized_property
def _state_constructor(self):
self.dispatch.first_init(self, self.class_)
return state.InstanceState
def manage(self):
"""Mark this instance as the manager for its class."""
setattr(self.class_, self.MANAGER_ATTR, self)
def dispose(self):
"""Dissasociate this manager from its class."""
delattr(self.class_, self.MANAGER_ATTR)
@util.hybridmethod
def manager_getter(self):
return _default_manager_getter
@util.hybridmethod
def state_getter(self):
"""Return a (instance) -> InstanceState callable.
"state getter" callables should raise either KeyError or
AttributeError if no InstanceState could be found for the
instance.
"""
return _default_state_getter
@util.hybridmethod
def dict_getter(self):
return _default_dict_getter
def instrument_attribute(self, key, inst, propagated=False):
if propagated:
if key in self.local_attrs:
return # don't override local attr with inherited attr
else:
self.local_attrs[key] = inst
self.install_descriptor(key, inst)
_memoized_key_collection.expire_instance(self)
self[key] = inst
for cls in self.class_.__subclasses__():
manager = self._subclass_manager(cls)
manager.instrument_attribute(key, inst, True)
def subclass_managers(self, recursive):
for cls in self.class_.__subclasses__():
mgr = manager_of_class(cls)
if mgr is not None and mgr is not self:
yield mgr
if recursive:
for m in mgr.subclass_managers(True):
yield m
def post_configure_attribute(self, key):
_instrumentation_factory.dispatch.attribute_instrument(
self.class_, key, self[key]
)
def uninstrument_attribute(self, key, propagated=False):
if key not in self:
return
if propagated:
if key in self.local_attrs:
return # don't get rid of local attr
else:
del self.local_attrs[key]
self.uninstall_descriptor(key)
_memoized_key_collection.expire_instance(self)
del self[key]
for cls in self.class_.__subclasses__():
manager = manager_of_class(cls)
if manager:
manager.uninstrument_attribute(key, True)
def unregister(self):
"""remove all instrumentation established by this ClassManager."""
self._uninstrument_init()
self.mapper = self.dispatch = None
self.info.clear()
for key in list(self):
if key in self.local_attrs:
self.uninstrument_attribute(key)
def install_descriptor(self, key, inst):
if key in (self.STATE_ATTR, self.MANAGER_ATTR):
raise KeyError(
"%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." % key
)
setattr(self.class_, key, inst)
def uninstall_descriptor(self, key):
delattr(self.class_, key)
def install_member(self, key, implementation):
if key in (self.STATE_ATTR, self.MANAGER_ATTR):
raise KeyError(
"%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." % key
)
self.originals.setdefault(key, getattr(self.class_, key, None))
setattr(self.class_, key, implementation)
def uninstall_member(self, key):
original = self.originals.pop(key, None)
if original is not None:
setattr(self.class_, key, original)
def instrument_collection_class(self, key, collection_class):
return collections.prepare_instrumentation(collection_class)
def initialize_collection(self, key, state, factory):
user_data = factory()
adapter = collections.CollectionAdapter(
self.get_impl(key), state, user_data
)
return adapter, user_data
def is_instrumented(self, key, search=False):
if search:
return key in self
else:
return key in self.local_attrs
def get_impl(self, key):
return self[key].impl
@property
def attributes(self):
return iter(self.values())
# InstanceState management
def new_instance(self, state=None):
instance = self.class_.__new__(self.class_)
if state is None:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
return instance
def setup_instance(self, instance, state=None):
if state is None:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
def teardown_instance(self, instance):
delattr(instance, self.STATE_ATTR)
def _serialize(self, state, state_dict):
return _SerializeManager(state, state_dict)
def _new_state_if_none(self, instance):
"""Install a default InstanceState if none is present.
A private convenience method used by the __init__ decorator.
"""
if hasattr(instance, self.STATE_ATTR):
return False
elif self.class_ is not instance.__class__ and self.is_mapped:
# this will create a new ClassManager for the
# subclass, without a mapper. This is likely a
# user error situation but allow the object
# to be constructed, so that it is usable
# in a non-ORM context at least.
return self._subclass_manager(
instance.__class__
)._new_state_if_none(instance)
else:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
return state
def has_state(self, instance):
return hasattr(instance, self.STATE_ATTR)
def has_parent(self, state, key, optimistic=False):
"""TODO"""
return self.get_impl(key).hasparent(state, optimistic=optimistic)
def __bool__(self):
"""All ClassManagers are non-zero regardless of attribute state."""
return True
__nonzero__ = __bool__
def __repr__(self):
return "<%s of %r at %x>" % (
self.__class__.__name__,
self.class_,
id(self),
)
class _SerializeManager(object):
"""Provide serialization of a :class:`.ClassManager`.
The :class:`.InstanceState` uses ``__init__()`` on serialize
and ``__call__()`` on deserialize.
"""
def __init__(self, state, d):
self.class_ = state.class_
manager = state.manager
manager.dispatch.pickle(state, d)
def __call__(self, state, inst, state_dict):
state.manager = manager = manager_of_class(self.class_)
if manager is None:
raise exc.UnmappedInstanceError(
inst,
"Cannot deserialize object of type %r - "
"no mapper() has "
"been configured for this class within the current "
"Python process!" % self.class_,
)
elif manager.is_mapped and not manager.mapper.configured:
manager.mapper._configure_all()
# setup _sa_instance_state ahead of time so that
# unpickle events can access the object normally.
# see [ticket:2362]
if inst is not None:
manager.setup_instance(inst, state)
manager.dispatch.unpickle(state, state_dict)
class InstrumentationFactory(object):
"""Factory for new ClassManager instances."""
def create_manager_for_cls(self, class_):
assert class_ is not None
assert manager_of_class(class_) is None
# give a more complicated subclass
# a chance to do what it wants here
manager, factory = self._locate_extended_factory(class_)
if factory is None:
factory = ClassManager
manager = factory(class_)
self._check_conflicts(class_, factory)
manager.factory = factory
self.dispatch.class_instrument(class_)
return manager
def _locate_extended_factory(self, class_):
"""Overridden by a subclass to do an extended lookup."""
return None, None
def _check_conflicts(self, class_, factory):
"""Overridden by a subclass to test for conflicting factories."""
return
def unregister(self, class_):
manager = manager_of_class(class_)
manager.unregister()
manager.dispose()
self.dispatch.class_uninstrument(class_)
if ClassManager.MANAGER_ATTR in class_.__dict__:
delattr(class_, ClassManager.MANAGER_ATTR)
# this attribute is replaced by sqlalchemy.ext.instrumentation
# when importred.
_instrumentation_factory = InstrumentationFactory()
# these attributes are replaced by sqlalchemy.ext.instrumentation
# when a non-standard InstrumentationManager class is first
# used to instrument a class.
instance_state = _default_state_getter = base.instance_state
instance_dict = _default_dict_getter = base.instance_dict
manager_of_class = _default_manager_getter = base.manager_of_class
def register_class(class_):
"""Register class instrumentation.
Returns the existing or newly created class manager.
"""
manager = manager_of_class(class_)
if manager is None:
manager = _instrumentation_factory.create_manager_for_cls(class_)
return manager
def unregister_class(class_):
"""Unregister class instrumentation."""
_instrumentation_factory.unregister(class_)
def is_instrumented(instance, key):
"""Return True if the given attribute on the given instance is
instrumented by the attributes package.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
"""
return manager_of_class(instance.__class__).is_instrumented(
key, search=True
)
def _generate_init(class_, class_manager):
"""Build an __init__ decorator that triggers ClassManager events."""
# TODO: we should use the ClassManager's notion of the
# original '__init__' method, once ClassManager is fixed
# to always reference that.
original__init__ = class_.__init__
assert original__init__
# Go through some effort here and don't change the user's __init__
# calling signature, including the unlikely case that it has
# a return value.
# FIXME: need to juggle local names to avoid constructor argument
# clashes.
func_body = """\
def __init__(%(apply_pos)s):
new_state = class_manager._new_state_if_none(%(self_arg)s)
if new_state:
return new_state._initialize_instance(%(apply_kw)s)
else:
return original__init__(%(apply_kw)s)
"""
func_vars = util.format_argspec_init(original__init__, grouped=False)
func_text = func_body % func_vars
if util.py2k:
func = getattr(original__init__, "im_func", original__init__)
func_defaults = getattr(func, "func_defaults", None)
else:
func_defaults = getattr(original__init__, "__defaults__", None)
func_kw_defaults = getattr(original__init__, "__kwdefaults__", None)
env = locals().copy()
exec(func_text, env)
__init__ = env["__init__"]
__init__.__doc__ = original__init__.__doc__
__init__._sa_original_init = original__init__
if func_defaults:
__init__.__defaults__ = func_defaults
if not util.py2k and func_kw_defaults:
__init__.__kwdefaults__ = func_kw_defaults
return __init__
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/loading.py
|
# orm/loading.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to convert database
rows into object instances and associated state.
the functions here are called primarily by Query, Mapper,
as well as some of the attribute loading strategies.
"""
from __future__ import absolute_import
import collections
from . import attributes
from . import exc as orm_exc
from . import path_registry
from . import strategy_options
from .base import _DEFER_FOR_STATE
from .base import _SET_DEFERRED_EXPIRED
from .util import _none_set
from .util import state_str
from .. import exc as sa_exc
from .. import util
from ..sql import util as sql_util
_new_runid = util.counter()
def instances(query, cursor, context):
"""Return an ORM result as an iterator."""
context.runid = _new_runid()
context.post_load_paths = {}
filtered = query._has_mapper_entities
single_entity = query.is_single_entity
if filtered:
if single_entity:
filter_fn = id
else:
def filter_fn(row):
return tuple(
id(item) if ent.use_id_for_hash else item
for ent, item in zip(query._entities, row)
)
try:
(process, labels) = list(
zip(
*[
query_entity.row_processor(query, context, cursor)
for query_entity in query._entities
]
)
)
if not single_entity:
keyed_tuple = util.lightweight_named_tuple("result", labels)
while True:
context.partials = {}
if query._yield_per:
fetch = cursor.fetchmany(query._yield_per)
if not fetch:
break
else:
fetch = cursor.fetchall()
if single_entity:
proc = process[0]
rows = [proc(row) for row in fetch]
else:
rows = [
keyed_tuple([proc(row) for proc in process])
for row in fetch
]
for path, post_load in context.post_load_paths.items():
post_load.invoke(context, path)
if filtered:
rows = util.unique_list(rows, filter_fn)
for row in rows:
yield row
if not query._yield_per:
break
except Exception:
with util.safe_reraise():
cursor.close()
@util.dependencies("sqlalchemy.orm.query")
def merge_result(querylib, query, iterator, load=True):
"""Merge a result into this :class:`_query.Query` object's Session."""
session = query.session
if load:
# flush current contents if we expect to load data
session._autoflush()
autoflush = session.autoflush
try:
session.autoflush = False
single_entity = len(query._entities) == 1
if single_entity:
if isinstance(query._entities[0], querylib._MapperEntity):
result = [
session._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load,
_recursive={},
_resolve_conflict_map={},
)
for instance in iterator
]
else:
result = list(iterator)
else:
mapped_entities = [
i
for i, e in enumerate(query._entities)
if isinstance(e, querylib._MapperEntity)
]
result = []
keys = [ent._label_name for ent in query._entities]
keyed_tuple = util.lightweight_named_tuple("result", keys)
for row in iterator:
newrow = list(row)
for i in mapped_entities:
if newrow[i] is not None:
newrow[i] = session._merge(
attributes.instance_state(newrow[i]),
attributes.instance_dict(newrow[i]),
load=load,
_recursive={},
_resolve_conflict_map={},
)
result.append(keyed_tuple(newrow))
return iter(result)
finally:
session.autoflush = autoflush
def get_from_identity(session, mapper, key, passive):
"""Look up the given key in the given session's identity map,
check the object for expired state if found.
"""
instance = session.identity_map.get(key)
if instance is not None:
state = attributes.instance_state(instance)
if mapper.inherits and not state.mapper.isa(mapper):
return attributes.PASSIVE_CLASS_MISMATCH
# expired - ensure it still exists
if state.expired:
if not passive & attributes.SQL_OK:
# TODO: no coverage here
return attributes.PASSIVE_NO_RESULT
elif not passive & attributes.RELATED_OBJECT_OK:
# this mode is used within a flush and the instance's
# expired state will be checked soon enough, if necessary
return instance
try:
state._load_expired(state, passive)
except orm_exc.ObjectDeletedError:
session._remove_newly_deleted([state])
return None
return instance
else:
return None
def load_on_ident(
query, key, refresh_state=None, with_for_update=None, only_load_props=None
):
"""Load the given identity key from the database."""
if key is not None:
ident = key[1]
identity_token = key[2]
else:
ident = identity_token = None
return load_on_pk_identity(
query,
ident,
refresh_state=refresh_state,
with_for_update=with_for_update,
only_load_props=only_load_props,
identity_token=identity_token,
)
def load_on_pk_identity(
query,
primary_key_identity,
refresh_state=None,
with_for_update=None,
only_load_props=None,
identity_token=None,
):
"""Load the given primary key identity from the database."""
if refresh_state is None:
q = query._clone()
q._get_condition()
else:
q = query._clone()
if primary_key_identity is not None:
mapper = query._mapper_zero()
(_get_clause, _get_params) = mapper._get_clause
# None present in ident - turn those comparisons
# into "IS NULL"
if None in primary_key_identity:
nones = set(
[
_get_params[col].key
for col, value in zip(
mapper.primary_key, primary_key_identity
)
if value is None
]
)
_get_clause = sql_util.adapt_criterion_to_null(_get_clause, nones)
if len(nones) == len(primary_key_identity):
util.warn(
"fully NULL primary key identity cannot load any "
"object. This condition may raise an error in a future "
"release."
)
_get_clause = q._adapt_clause(_get_clause, True, False)
q._criterion = _get_clause
params = dict(
[
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(
primary_key_identity, mapper.primary_key
)
]
)
q._params = params
# with_for_update needs to be query.LockmodeArg()
if with_for_update is not None:
version_check = True
q._for_update_arg = with_for_update
elif query._for_update_arg is not None:
version_check = True
q._for_update_arg = query._for_update_arg
else:
version_check = False
q._get_options(
populate_existing=bool(refresh_state),
version_check=version_check,
only_load_props=only_load_props,
refresh_state=refresh_state,
identity_token=identity_token,
)
q._order_by = None
try:
return q.one()
except orm_exc.NoResultFound:
return None
def _setup_entity_query(
context,
mapper,
query_entity,
path,
adapter,
column_collection,
with_polymorphic=None,
only_load_props=None,
polymorphic_discriminator=None,
**kw
):
if with_polymorphic:
poly_properties = mapper._iterate_polymorphic_properties(
with_polymorphic
)
else:
poly_properties = mapper._polymorphic_properties
quick_populators = {}
path.set(context.attributes, "memoized_setups", quick_populators)
for value in poly_properties:
if only_load_props and value.key not in only_load_props:
continue
value.setup(
context,
query_entity,
path,
adapter,
only_load_props=only_load_props,
column_collection=column_collection,
memoized_populators=quick_populators,
**kw
)
if (
polymorphic_discriminator is not None
and polymorphic_discriminator is not mapper.polymorphic_on
):
if adapter:
pd = adapter.columns[polymorphic_discriminator]
else:
pd = polymorphic_discriminator
column_collection.append(pd)
def _warn_for_runid_changed(state):
util.warn(
"Loading context for %s has changed within a load/refresh "
"handler, suggesting a row refresh operation took place. If this "
"event handler is expected to be "
"emitting row refresh operations within an existing load or refresh "
"operation, set restore_load_context=True when establishing the "
"listener to ensure the context remains unchanged when the event "
"handler completes." % (state_str(state),)
)
def _instance_processor(
mapper,
context,
result,
path,
adapter,
only_load_props=None,
refresh_state=None,
polymorphic_discriminator=None,
_polymorphic_from=None,
):
"""Produce a mapper level row processor callable
which processes rows into mapped instances."""
# note that this method, most of which exists in a closure
# called _instance(), resists being broken out, as
# attempts to do so tend to add significant function
# call overhead. _instance() is the most
# performance-critical section in the whole ORM.
pk_cols = mapper.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
identity_class = mapper._identity_class
populators = collections.defaultdict(list)
props = mapper._prop_set
if only_load_props is not None:
props = props.intersection(mapper._props[k] for k in only_load_props)
quick_populators = path.get(
context.attributes, "memoized_setups", _none_set
)
for prop in props:
if prop in quick_populators:
# this is an inlined path just for column-based attributes.
col = quick_populators[prop]
if col is _DEFER_FOR_STATE:
populators["new"].append(
(prop.key, prop._deferred_column_loader)
)
elif col is _SET_DEFERRED_EXPIRED:
# note that in this path, we are no longer
# searching in the result to see if the column might
# be present in some unexpected way.
populators["expire"].append((prop.key, False))
else:
getter = None
# the "adapter" can be here via different paths,
# e.g. via adapter present at setup_query or adapter
# applied to the query afterwards via eager load subquery.
# If the column here
# were already a product of this adapter, sending it through
# the adapter again can return a totally new expression that
# won't be recognized in the result, and the ColumnAdapter
# currently does not accommodate for this. OTOH, if the
# column were never applied through this adapter, we may get
# None back, in which case we still won't get our "getter".
# so try both against result._getter(). See issue #4048
if adapter:
adapted_col = adapter.columns[col]
if adapted_col is not None:
getter = result._getter(adapted_col, False)
if not getter:
getter = result._getter(col, False)
if getter:
populators["quick"].append((prop.key, getter))
else:
# fall back to the ColumnProperty itself, which
# will iterate through all of its columns
# to see if one fits
prop.create_row_processor(
context, path, mapper, result, adapter, populators
)
else:
prop.create_row_processor(
context, path, mapper, result, adapter, populators
)
propagate_options = context.propagate_options
load_path = (
context.query._current_path + path
if context.query._current_path.path
else path
)
session_identity_map = context.session.identity_map
populate_existing = context.populate_existing or mapper.always_refresh
load_evt = bool(mapper.class_manager.dispatch.load)
refresh_evt = bool(mapper.class_manager.dispatch.refresh)
persistent_evt = bool(context.session.dispatch.loaded_as_persistent)
if persistent_evt:
loaded_as_persistent = context.session.dispatch.loaded_as_persistent
instance_state = attributes.instance_state
instance_dict = attributes.instance_dict
session_id = context.session.hash_key
version_check = context.version_check
runid = context.runid
identity_token = context.identity_token
if not refresh_state and _polymorphic_from is not None:
key = ("loader", path.path)
if key in context.attributes and context.attributes[key].strategy == (
("selectinload_polymorphic", True),
):
selectin_load_via = mapper._should_selectin_load(
context.attributes[key].local_opts["entities"],
_polymorphic_from,
)
else:
selectin_load_via = mapper._should_selectin_load(
None, _polymorphic_from
)
if selectin_load_via and selectin_load_via is not _polymorphic_from:
# only_load_props goes w/ refresh_state only, and in a refresh
# we are a single row query for the exact entity; polymorphic
# loading does not apply
assert only_load_props is None
callable_ = _load_subclass_via_in(context, path, selectin_load_via)
PostLoad.callable_for_path(
context,
load_path,
selectin_load_via.mapper,
selectin_load_via,
callable_,
selectin_load_via,
)
post_load = PostLoad.for_context(context, load_path, only_load_props)
if refresh_state:
refresh_identity_key = refresh_state.key
if refresh_identity_key is None:
# super-rare condition; a refresh is being called
# on a non-instance-key instance; this is meant to only
# occur within a flush()
refresh_identity_key = mapper._identity_key_from_state(
refresh_state
)
else:
refresh_identity_key = None
if mapper.allow_partial_pks:
is_not_primary_key = _none_set.issuperset
else:
is_not_primary_key = _none_set.intersection
def _instance(row):
# determine the state that we'll be populating
if refresh_identity_key:
# fixed state that we're refreshing
state = refresh_state
instance = state.obj()
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = True
loaded_instance = False
else:
# look at the row, see if that identity is in the
# session, or we have to create a new one
identitykey = (
identity_class,
tuple([row[column] for column in pk_cols]),
identity_token,
)
instance = session_identity_map.get(identitykey)
if instance is not None:
# existing instance
state = instance_state(instance)
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = not isnew
loaded_instance = False
if version_check and not currentload:
_validate_version_id(mapper, state, dict_, row, adapter)
else:
# create a new instance
# check for non-NULL values in the primary key columns,
# else no entity is returned for the row
if is_not_primary_key(identitykey[1]):
return None
isnew = True
currentload = True
loaded_instance = True
instance = mapper.class_manager.new_instance()
dict_ = instance_dict(instance)
state = instance_state(instance)
state.key = identitykey
state.identity_token = identity_token
# attach instance to session.
state.session_id = session_id
session_identity_map._add_unpresent(state, identitykey)
# populate. this looks at whether this state is new
# for this load or was existing, and whether or not this
# row is the first row with this identity.
if currentload or populate_existing:
# full population routines. Objects here are either
# just created, or we are doing a populate_existing
# be conservative about setting load_path when populate_existing
# is in effect; want to maintain options from the original
# load. see test_expire->test_refresh_maintains_deferred_options
if isnew and (propagate_options or not populate_existing):
state.load_options = propagate_options
state.load_path = load_path
_populate_full(
context,
row,
state,
dict_,
isnew,
load_path,
loaded_instance,
populate_existing,
populators,
)
if isnew:
# state.runid should be equal to context.runid / runid
# here, however for event checks we are being more conservative
# and checking against existing run id
# assert state.runid == runid
existing_runid = state.runid
if loaded_instance:
if load_evt:
state.manager.dispatch.load(state, context)
if state.runid != existing_runid:
_warn_for_runid_changed(state)
if persistent_evt:
loaded_as_persistent(context.session, state)
if state.runid != existing_runid:
_warn_for_runid_changed(state)
elif refresh_evt:
state.manager.dispatch.refresh(
state, context, only_load_props
)
if state.runid != runid:
_warn_for_runid_changed(state)
if populate_existing or state.modified:
if refresh_state and only_load_props:
state._commit(dict_, only_load_props)
else:
state._commit_all(dict_, session_identity_map)
if post_load:
post_load.add_state(state, True)
else:
# partial population routines, for objects that were already
# in the Session, but a row matches them; apply eager loaders
# on existing objects, etc.
unloaded = state.unloaded
isnew = state not in context.partials
if not isnew or unloaded or populators["eager"]:
# state is having a partial set of its attributes
# refreshed. Populate those attributes,
# and add to the "context.partials" collection.
to_load = _populate_partial(
context,
row,
state,
dict_,
isnew,
load_path,
unloaded,
populators,
)
if isnew:
if refresh_evt:
existing_runid = state.runid
state.manager.dispatch.refresh(state, context, to_load)
if state.runid != existing_runid:
_warn_for_runid_changed(state)
state._commit(dict_, to_load)
if post_load and context.invoke_all_eagers:
post_load.add_state(state, False)
return instance
if mapper.polymorphic_map and not _polymorphic_from and not refresh_state:
# if we are doing polymorphic, dispatch to a different _instance()
# method specific to the subclass mapper
_instance = _decorate_polymorphic_switch(
_instance,
context,
mapper,
result,
path,
polymorphic_discriminator,
adapter,
)
return _instance
def _load_subclass_via_in(context, path, entity):
mapper = entity.mapper
zero_idx = len(mapper.base_mapper.primary_key) == 1
if entity.is_aliased_class:
q, enable_opt, disable_opt = mapper._subclass_load_via_in(entity)
else:
q, enable_opt, disable_opt = mapper._subclass_load_via_in_mapper
def do_load(context, path, states, load_only, effective_entity):
orig_query = context.query
q2 = q._with_lazyload_options(
(enable_opt,) + orig_query._with_options + (disable_opt,),
path.parent,
cache_path=path,
)
if orig_query._populate_existing:
q2.add_criteria(lambda q: q.populate_existing())
q2(context.session).params(
primary_keys=[
state.key[1][0] if zero_idx else state.key[1]
for state, load_attrs in states
]
).all()
return do_load
def _populate_full(
context,
row,
state,
dict_,
isnew,
load_path,
loaded_instance,
populate_existing,
populators,
):
if isnew:
# first time we are seeing a row with this identity.
state.runid = context.runid
for key, getter in populators["quick"]:
dict_[key] = getter(row)
if populate_existing:
for key, set_callable in populators["expire"]:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
else:
for key, set_callable in populators["expire"]:
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
populator(state, dict_, row)
elif load_path != state.load_path:
# new load path, e.g. object is present in more than one
# column position in a series of rows
state.load_path = load_path
# if we have data, and the data isn't in the dict, OK, let's put
# it in.
for key, getter in populators["quick"]:
if key not in dict_:
dict_[key] = getter(row)
# otherwise treat like an "already seen" row
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: allow "existing" populator to know this is
# a new path for the state:
# populator(state, dict_, row, new_path=True)
else:
# have already seen rows with this identity in this same path.
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: same path
# populator(state, dict_, row, new_path=False)
def _populate_partial(
context, row, state, dict_, isnew, load_path, unloaded, populators
):
if not isnew:
to_load = context.partials[state]
for key, populator in populators["existing"]:
if key in to_load:
populator(state, dict_, row)
else:
to_load = unloaded
context.partials[state] = to_load
for key, getter in populators["quick"]:
if key in to_load:
dict_[key] = getter(row)
for key, set_callable in populators["expire"]:
if key in to_load:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["eager"]:
if key not in unloaded:
populator(state, dict_, row)
return to_load
def _validate_version_id(mapper, state, dict_, row, adapter):
version_id_col = mapper.version_id_col
if version_id_col is None:
return
if adapter:
version_id_col = adapter.columns[version_id_col]
if (
mapper._get_state_attr_by_column(state, dict_, mapper.version_id_col)
!= row[version_id_col]
):
raise orm_exc.StaleDataError(
"Instance '%s' has version id '%s' which "
"does not match database-loaded version id '%s'."
% (
state_str(state),
mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col
),
row[version_id_col],
)
)
def _decorate_polymorphic_switch(
instance_fn,
context,
mapper,
result,
path,
polymorphic_discriminator,
adapter,
):
if polymorphic_discriminator is not None:
polymorphic_on = polymorphic_discriminator
else:
polymorphic_on = mapper.polymorphic_on
if polymorphic_on is None:
return instance_fn
if adapter:
polymorphic_on = adapter.columns[polymorphic_on]
def configure_subclass_mapper(discriminator):
try:
sub_mapper = mapper.polymorphic_map[discriminator]
except KeyError:
raise AssertionError(
"No such polymorphic_identity %r is defined" % discriminator
)
else:
if sub_mapper is mapper:
return None
return _instance_processor(
sub_mapper,
context,
result,
path,
adapter,
_polymorphic_from=mapper,
)
polymorphic_instances = util.PopulateDict(configure_subclass_mapper)
def polymorphic_instance(row):
discriminator = row[polymorphic_on]
if discriminator is not None:
_instance = polymorphic_instances[discriminator]
if _instance:
return _instance(row)
return instance_fn(row)
return polymorphic_instance
class PostLoad(object):
"""Track loaders and states for "post load" operations.
"""
__slots__ = "loaders", "states", "load_keys"
def __init__(self):
self.loaders = {}
self.states = util.OrderedDict()
self.load_keys = None
def add_state(self, state, overwrite):
# the states for a polymorphic load here are all shared
# within a single PostLoad object among multiple subtypes.
# Filtering of callables on a per-subclass basis needs to be done at
# the invocation level
self.states[state] = overwrite
def invoke(self, context, path):
if not self.states:
return
path = path_registry.PathRegistry.coerce(path)
for token, limit_to_mapper, loader, arg, kw in self.loaders.values():
states = [
(state, overwrite)
for state, overwrite in self.states.items()
if state.manager.mapper.isa(limit_to_mapper)
]
if states:
loader(context, path, states, self.load_keys, *arg, **kw)
self.states.clear()
@classmethod
def for_context(cls, context, path, only_load_props):
pl = context.post_load_paths.get(path.path)
if pl is not None and only_load_props:
pl.load_keys = only_load_props
return pl
@classmethod
def path_exists(self, context, path, key):
return (
path.path in context.post_load_paths
and key in context.post_load_paths[path.path].loaders
)
@classmethod
def callable_for_path(
cls, context, path, limit_to_mapper, token, loader_callable, *arg, **kw
):
if path.path in context.post_load_paths:
pl = context.post_load_paths[path.path]
else:
pl = context.post_load_paths[path.path] = PostLoad()
pl.loaders[token] = (token, limit_to_mapper, loader_callable, arg, kw)
def load_scalar_attributes(mapper, state, attribute_names):
"""initiate a column-based attribute refresh operation."""
# assert mapper is _state_mapper(state)
session = state.session
if not session:
raise orm_exc.DetachedInstanceError(
"Instance %s is not bound to a Session; "
"attribute refresh operation cannot proceed" % (state_str(state))
)
has_key = bool(state.key)
result = False
# in the case of inheritance, particularly concrete and abstract
# concrete inheritance, the class manager might have some keys
# of attributes on the superclass that we didn't actually map.
# These could be mapped as "concrete, dont load" or could be completely
# exluded from the mapping and we know nothing about them. Filter them
# here to prevent them from coming through.
if attribute_names:
attribute_names = attribute_names.intersection(mapper.attrs.keys())
if mapper.inherits and not mapper.concrete:
# because we are using Core to produce a select() that we
# pass to the Query, we aren't calling setup() for mapped
# attributes; in 1.0 this means deferred attrs won't get loaded
# by default
statement = mapper._optimized_get_statement(state, attribute_names)
if statement is not None:
result = load_on_ident(
session.query(mapper)
.options(strategy_options.Load(mapper).undefer("*"))
.from_statement(statement),
None,
only_load_props=attribute_names,
refresh_state=state,
)
if result is False:
if has_key:
identity_key = state.key
else:
# this codepath is rare - only valid when inside a flush, and the
# object is becoming persistent but hasn't yet been assigned
# an identity_key.
# check here to ensure we have the attrs we need.
pk_attrs = [
mapper._columntoproperty[col].key for col in mapper.primary_key
]
if state.expired_attributes.intersection(pk_attrs):
raise sa_exc.InvalidRequestError(
"Instance %s cannot be refreshed - it's not "
" persistent and does not "
"contain a full primary key." % state_str(state)
)
identity_key = mapper._identity_key_from_state(state)
if (
_none_set.issubset(identity_key) and not mapper.allow_partial_pks
) or _none_set.issuperset(identity_key):
util.warn_limited(
"Instance %s to be refreshed doesn't "
"contain a full primary key - can't be refreshed "
"(and shouldn't be expired, either).",
state_str(state),
)
return
result = load_on_ident(
session.query(mapper),
identity_key,
refresh_state=state,
only_load_props=attribute_names,
)
# if instance is pending, a refresh operation
# may not complete (even if PK attributes are assigned)
if has_key and result is None:
raise orm_exc.ObjectDeletedError(state)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/persistence.py
|
# orm/persistence.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`_orm.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
from itertools import chain
from itertools import groupby
import operator
from . import attributes
from . import evaluator
from . import exc as orm_exc
from . import loading
from . import sync
from .base import _entity_descriptor
from .base import state_str
from .. import exc as sa_exc
from .. import sql
from .. import util
from ..sql import expression
from ..sql.base import _from_objects
def _bulk_insert(
mapper,
mappings,
session_transaction,
isstates,
return_defaults,
render_nulls,
):
base_mapper = mapper.base_mapper
cached_connections = _cached_connection_dict(base_mapper)
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_insert()"
)
if isstates:
if return_defaults:
states = [(state, state.dict) for state in mappings]
mappings = [dict_ for (state, dict_) in states]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = (
(
None,
state_dict,
params,
mapper,
connection,
value_params,
has_all_pks,
has_all_defaults,
)
for (
state,
state_dict,
params,
mp,
conn,
value_params,
has_all_pks,
has_all_defaults,
) in _collect_insert_commands(
table,
((None, mapping, mapper, connection) for mapping in mappings),
bulk=True,
return_defaults=return_defaults,
render_nulls=render_nulls,
)
)
_emit_insert_statements(
base_mapper,
None,
cached_connections,
super_mapper,
table,
records,
bookkeeping=return_defaults,
)
if return_defaults and isstates:
identity_cls = mapper._identity_class
identity_props = [p.key for p in mapper._identity_key_props]
for state, dict_ in states:
state.key = (
identity_cls,
tuple([dict_[key] for key in identity_props]),
)
def _bulk_update(
mapper, mappings, session_transaction, isstates, update_changed_only
):
base_mapper = mapper.base_mapper
cached_connections = _cached_connection_dict(base_mapper)
search_keys = mapper._primary_key_propkeys
if mapper._version_id_prop:
search_keys = {mapper._version_id_prop.key}.union(search_keys)
def _changed_dict(mapper, state):
return dict(
(k, v)
for k, v in state.dict.items()
if k in state.committed_state or k in search_keys
)
if isstates:
if update_changed_only:
mappings = [_changed_dict(mapper, state) for state in mappings]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_update()"
)
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = _collect_update_commands(
None,
table,
(
(
None,
mapping,
mapper,
connection,
(
mapping[mapper._version_id_prop.key]
if mapper._version_id_prop
else None
),
)
for mapping in mappings
),
bulk=True,
)
_emit_update_statements(
base_mapper,
None,
cached_connections,
super_mapper,
table,
records,
bookkeeping=False,
)
def save_obj(base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(base_mapper, states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_update = []
states_to_insert = []
cached_connections = _cached_connection_dict(base_mapper)
for (
state,
dict_,
mapper,
connection,
has_identity,
row_switch,
update_version_id,
) in _organize_states_for_save(base_mapper, states, uowtransaction):
if has_identity or row_switch:
states_to_update.append(
(state, dict_, mapper, connection, update_version_id)
)
else:
states_to_insert.append((state, dict_, mapper, connection))
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
insert = _collect_insert_commands(table, states_to_insert)
update = _collect_update_commands(
uowtransaction, table, states_to_update
)
_emit_update_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
update,
)
_emit_insert_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
insert,
)
_finalize_insert_update_commands(
base_mapper,
uowtransaction,
chain(
(
(state, state_dict, mapper, connection, False)
for (state, state_dict, mapper, connection) in states_to_insert
),
(
(state, state_dict, mapper, connection, True)
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_update
),
),
)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = list(
_organize_states_for_post_update(base_mapper, states, uowtransaction)
)
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
update = (
(
state,
state_dict,
sub_mapper,
connection,
mapper._get_committed_state_attr_by_column(
state, state_dict, mapper.version_id_col
)
if mapper.version_id_col is not None
else None,
)
for state, state_dict, sub_mapper, connection in states_to_update
if table in sub_mapper._pks_by_table
)
update = _collect_post_update_commands(
base_mapper, uowtransaction, table, update, post_update_cols
)
_emit_post_update_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
update,
)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = list(
_organize_states_for_delete(base_mapper, states, uowtransaction)
)
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
mapper = table_to_mapper[table]
if table not in mapper._pks_by_table:
continue
elif mapper.inherits and mapper.passive_deletes:
continue
delete = _collect_delete_commands(
base_mapper, uowtransaction, table, states_to_delete
)
_emit_delete_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
delete,
)
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction, states
):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = update_version_id = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
if mapper._validate_polymorphic_identity:
mapper._validate_polymorphic_identity(mapper, state, dict_)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if (
not has_identity
and instance_key in uowtransaction.session.identity_map
):
instance = uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.was_already_deleted(existing):
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s"
% (state_str(state), instance_key, state_str(existing))
)
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction",
instance_key,
state_str(state),
state_str(existing),
)
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if (has_identity or row_switch) and mapper.version_id_col is not None:
update_version_id = mapper._get_committed_state_attr_by_column(
row_switch if row_switch else state,
row_switch.dict if row_switch else dict_,
mapper.version_id_col,
)
yield (
state,
dict_,
mapper,
connection,
has_identity,
row_switch,
update_version_id,
)
def _organize_states_for_post_update(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return _connections_for_states(base_mapper, uowtransaction, states)
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction, states
):
mapper.dispatch.before_delete(mapper, connection, state)
if mapper.version_id_col is not None:
update_version_id = mapper._get_committed_state_attr_by_column(
state, dict_, mapper.version_id_col
)
else:
update_version_id = None
yield (state, dict_, mapper, connection, update_version_id)
def _collect_insert_commands(
table,
states_to_insert,
bulk=False,
return_defaults=False,
render_nulls=False,
):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
for state, state_dict, mapper, connection in states_to_insert:
if table not in mapper._pks_by_table:
continue
params = {}
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
eval_none = mapper._insert_cols_evaluating_none[table]
for propkey in set(propkey_to_col).intersection(state_dict):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if value is None and col not in eval_none and not render_nulls:
continue
elif not bulk and (
hasattr(value, "__clause_element__")
or isinstance(value, sql.ClauseElement)
):
value_params[col] = (
value.__clause_element__()
if hasattr(value, "__clause_element__")
else value
)
else:
params[col.key] = value
if not bulk:
# for all the columns that have no default and we don't have
# a value and where "None" is not a special value, add
# explicit None to the INSERT. This is a legacy behavior
# which might be worth removing, as it should not be necessary
# and also produces confusion, given that "missing" and None
# now have distinct meanings
for colkey in (
mapper._insert_cols_as_none[table]
.difference(params)
.difference([c.key for c in value_params])
):
params[colkey] = None
if not bulk or return_defaults:
# params are in terms of Column key objects, so
# compare to pk_keys_by_table
has_all_pks = mapper._pk_keys_by_table[table].issubset(params)
if mapper.base_mapper.eager_defaults:
has_all_defaults = mapper._server_default_cols[table].issubset(
params
)
else:
has_all_defaults = True
else:
has_all_defaults = has_all_pks = True
if (
mapper.version_id_generator is not False
and mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
params[mapper.version_id_col.key] = mapper.version_id_generator(
None
)
yield (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_pks,
has_all_defaults,
)
def _collect_update_commands(
uowtransaction, table, states_to_update, bulk=False
):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
if bulk:
# keys here are mapped attribute keys, so
# look at mapper attribute keys for pk
params = dict(
(propkey_to_col[propkey].key, state_dict[propkey])
for propkey in set(propkey_to_col)
.intersection(state_dict)
.difference(mapper._pk_attr_keys_by_table[table])
)
has_all_defaults = True
else:
params = {}
for propkey in set(propkey_to_col).intersection(
state.committed_state
):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if hasattr(value, "__clause_element__") or isinstance(
value, sql.ClauseElement
):
value_params[col] = (
value.__clause_element__()
if hasattr(value, "__clause_element__")
else value
)
# guard against values that generate non-__nonzero__
# objects for __eq__()
elif (
state.manager[propkey].impl.is_equal(
value, state.committed_state[propkey]
)
is not True
):
params[col.key] = value
if mapper.base_mapper.eager_defaults:
has_all_defaults = (
mapper._server_onupdate_default_cols[table]
).issubset(params)
else:
has_all_defaults = True
if (
update_version_id is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
if not bulk and not (params or value_params):
# HACK: check for history in other tables, in case the
# history is only in a different table than the one
# where the version_id_col is. This logic was lost
# from 0.9 -> 1.0.0 and restored in 1.0.6.
for prop in mapper._columntoproperty.values():
history = state.manager[prop.key].impl.get_history(
state, state_dict, attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
break
else:
# no net change, break
continue
col = mapper.version_id_col
no_params = not params and not value_params
params[col._label] = update_version_id
if (
bulk or col.key not in params
) and mapper.version_id_generator is not False:
val = mapper.version_id_generator(update_version_id)
params[col.key] = val
elif mapper.version_id_generator is False and no_params:
# no version id generator, no values set on the table,
# and version id wasn't manually incremented.
# set version id to itself so we get an UPDATE
# statement
params[col.key] = update_version_id
elif not (params or value_params):
continue
has_all_pks = True
expect_pk_cascaded = False
if bulk:
# keys here are mapped attribute keys, so
# look at mapper attribute keys for pk
pk_params = dict(
(propkey_to_col[propkey]._label, state_dict.get(propkey))
for propkey in set(propkey_to_col).intersection(
mapper._pk_attr_keys_by_table[table]
)
)
else:
pk_params = {}
for col in pks:
propkey = mapper._columntoproperty[col].key
history = state.manager[propkey].impl.get_history(
state, state_dict, attributes.PASSIVE_OFF
)
if history.added:
if (
not history.deleted
or ("pk_cascaded", state, col)
in uowtransaction.attributes
):
expect_pk_cascaded = True
pk_params[col._label] = history.added[0]
params.pop(col.key, None)
else:
# else, use the old value to locate the row
pk_params[col._label] = history.deleted[0]
if col in value_params:
has_all_pks = False
else:
pk_params[col._label] = history.unchanged[0]
if pk_params[col._label] is None:
raise orm_exc.FlushError(
"Can't update table %s using NULL for primary "
"key value on column %s" % (table, col)
)
if params or value_params:
params.update(pk_params)
yield (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
)
elif expect_pk_cascaded:
# no UPDATE occurs on this table, but we expect that CASCADE rules
# have changed the primary key of the row; propagate this event to
# other columns that expect to have been modified. this normally
# occurs after the UPDATE is emitted however we invoke it here
# explicitly in the absence of our invoking an UPDATE
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(
state,
m,
state,
m,
equated_pairs,
uowtransaction,
mapper.passive_updates,
)
def _collect_post_update_commands(
base_mapper, uowtransaction, table, states_to_update, post_update_cols
):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_update:
# assert table in mapper._pks_by_table
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = mapper._get_state_attr_by_column(
state, state_dict, col, passive=attributes.PASSIVE_OFF
)
elif col in post_update_cols or col.onupdate is not None:
prop = mapper._columntoproperty[col]
history = state.manager[prop.key].impl.get_history(
state, state_dict, attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
if (
update_version_id is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
col = mapper.version_id_col
params[col._label] = update_version_id
if (
bool(state.key)
and col.key not in params
and mapper.version_id_generator is not False
):
val = mapper.version_id_generator(update_version_id)
params[col.key] = val
yield state, state_dict, mapper, connection, params
def _collect_delete_commands(
base_mapper, uowtransaction, table, states_to_delete
):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_delete:
if table not in mapper._pks_by_table:
continue
params = {}
for col in mapper._pks_by_table[table]:
params[
col.key
] = value = mapper._get_committed_state_attr_by_column(
state, state_dict, col
)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table %s "
"using NULL for primary "
"key value on column %s" % (table, col)
)
if (
update_version_id is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
params[mapper.version_id_col.key] = update_version_id
yield params, connection
def _emit_update_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
update,
bookkeeping=True,
):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
)
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col._label, type_=col.type)
)
if needs_version_id:
clause.clauses.append(
mapper.version_id_col
== sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type,
)
)
stmt = table.update(clause)
return stmt
cached_stmt = base_mapper._memo(("update", table), update_stmt)
for (
(connection, paramkeys, hasvalue, has_all_defaults, has_all_pks),
records,
) in groupby(
update,
lambda rec: (
rec[4], # connection
set(rec[2]), # set of parameter keys
bool(rec[5]), # whether or not we have "value" parameters
rec[6], # has_all_defaults
rec[7], # has all pks
),
):
rows = 0
records = list(records)
statement = cached_stmt
return_defaults = False
if not has_all_pks:
statement = statement.return_defaults()
return_defaults = True
elif (
bookkeeping
and not has_all_defaults
and mapper.base_mapper.eager_defaults
):
statement = statement.return_defaults()
return_defaults = True
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
return_defaults = True
assert_singlerow = (
connection.dialect.supports_sane_rowcount
if not return_defaults
else connection.dialect.supports_sane_rowcount_returning
)
assert_multirow = (
assert_singlerow
and connection.dialect.supports_sane_multi_rowcount
)
allow_multirow = has_all_defaults and not needs_version_id
if hasvalue:
for (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
) in records:
c = connection.execute(statement.values(value_params), params)
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params,
True,
)
rows += c.rowcount
check_rowcount = assert_singlerow
else:
if not allow_multirow:
check_rowcount = assert_singlerow
for (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
) in records:
c = cached_connections[connection].execute(
statement, params
)
# TODO: why with bookkeeping=False?
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params,
True,
)
rows += c.rowcount
else:
multiparams = [rec[2] for rec in records]
check_rowcount = assert_multirow or (
assert_singlerow and len(multiparams) == 1
)
c = cached_connections[connection].execute(
statement, multiparams
)
rows += c.rowcount
for (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
) in records:
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params,
True,
)
if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched."
% (table.description, len(records), rows)
)
elif needs_version_id:
util.warn(
"Dialect %s does not support updated rowcount "
"- versioning cannot be verified."
% c.dialect.dialect_description
)
def _emit_insert_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
insert,
bookkeeping=True,
):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
cached_stmt = base_mapper._memo(("insert", table), table.insert)
for (
(connection, pkeys, hasvalue, has_all_pks, has_all_defaults),
records,
) in groupby(
insert,
lambda rec: (
rec[4], # connection
set(rec[2]), # parameter keys
bool(rec[5]), # whether we have "value" parameters
rec[6],
rec[7],
),
):
statement = cached_stmt
if (
not bookkeeping
or (
has_all_defaults
or not base_mapper.eager_defaults
or not connection.dialect.implicit_returning
)
and has_all_pks
and not hasvalue
):
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].execute(statement, multiparams)
if bookkeeping:
for (
(
state,
state_dict,
params,
mapper_rec,
conn,
value_params,
has_all_pks,
has_all_defaults,
),
last_inserted_params,
) in zip(records, c.context.compiled_parameters):
if state:
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
last_inserted_params,
value_params,
False,
)
else:
_postfetch_bulk_save(mapper_rec, state_dict, table)
else:
if not has_all_defaults and base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
for (
state,
state_dict,
params,
mapper_rec,
connection,
value_params,
has_all_pks,
has_all_defaults,
) in records:
if value_params:
result = connection.execute(
statement.values(value_params), params
)
else:
result = cached_connections[connection].execute(
statement, params
)
primary_key = result.context.inserted_primary_key
if primary_key is not None:
# set primary key attributes
for pk, col in zip(
primary_key, mapper._pks_by_table[table]
):
prop = mapper_rec._columntoproperty[col]
if pk is not None and (
col in value_params
or state_dict.get(prop.key) is None
):
state_dict[prop.key] = pk
if bookkeeping:
if state:
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
result,
result.context.compiled_parameters[0],
value_params,
False,
)
else:
_postfetch_bulk_save(mapper_rec, state_dict, table)
def _emit_post_update_statements(
base_mapper, uowtransaction, cached_connections, mapper, table, update
):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
needs_version_id = (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
)
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col._label, type_=col.type)
)
if needs_version_id:
clause.clauses.append(
mapper.version_id_col
== sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type,
)
)
stmt = table.update(clause)
if mapper.version_id_col is not None:
stmt = stmt.return_defaults(mapper.version_id_col)
return stmt
statement = base_mapper._memo(("post_update", table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, records in groupby(
update,
lambda rec: (rec[3], set(rec[4])), # connection # parameter keys
):
rows = 0
records = list(records)
connection = key[0]
assert_singlerow = (
connection.dialect.supports_sane_rowcount
if mapper.version_id_col is None
else connection.dialect.supports_sane_rowcount_returning
)
assert_multirow = (
assert_singlerow
and connection.dialect.supports_sane_multi_rowcount
)
allow_multirow = not needs_version_id or assert_multirow
if not allow_multirow:
check_rowcount = assert_singlerow
for state, state_dict, mapper_rec, connection, params in records:
c = cached_connections[connection].execute(statement, params)
_postfetch_post_update(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
)
rows += c.rowcount
else:
multiparams = [
params
for state, state_dict, mapper_rec, conn, params in records
]
check_rowcount = assert_multirow or (
assert_singlerow and len(multiparams) == 1
)
c = cached_connections[connection].execute(statement, multiparams)
rows += c.rowcount
for state, state_dict, mapper_rec, connection, params in records:
_postfetch_post_update(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
)
if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched."
% (table.description, len(records), rows)
)
elif needs_version_id:
util.warn(
"Dialect %s does not support updated rowcount "
"- versioning cannot be verified."
% c.dialect.dialect_description
)
def _emit_delete_statements(
base_mapper, uowtransaction, cached_connections, mapper, table, delete
):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
)
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type)
)
if need_version_id:
clause.clauses.append(
mapper.version_id_col
== sql.bindparam(
mapper.version_id_col.key, type_=mapper.version_id_col.type
)
)
return table.delete(clause)
statement = base_mapper._memo(("delete", table), delete_stmt)
for connection, recs in groupby(delete, lambda rec: rec[1]): # connection
del_objects = [params for params, connection in recs]
connection = cached_connections[connection]
expected = len(del_objects)
rows_matched = -1
only_warn = False
if (
need_version_id
and not connection.dialect.supports_sane_multi_rowcount
):
if connection.dialect.supports_sane_rowcount:
rows_matched = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows_matched += c.rowcount
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified."
% connection.dialect.dialect_description
)
connection.execute(statement, del_objects)
else:
c = connection.execute(statement, del_objects)
if not need_version_id:
only_warn = True
rows_matched = c.rowcount
if (
base_mapper.confirm_deleted_rows
and rows_matched > -1
and expected != rows_matched
and (
connection.dialect.supports_sane_multi_rowcount
or len(del_objects) == 1
)
):
# TODO: why does this "only warn" if versioning is turned off,
# whereas the UPDATE raises?
if only_warn:
util.warn(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning."
% (table.description, expected, rows_matched)
)
else:
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning."
% (table.description, expected, rows_matched)
)
def _finalize_insert_update_commands(base_mapper, uowtransaction, states):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity in states:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[
p.key
for p in mapper._readonly_props
if (
p.expire_on_flush
and (not p.deferred or p.key in state.dict)
)
or (
not p.expire_on_flush
and not p.deferred
and p.key not in state.dict
)
]
)
if readonly:
state._expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled, load
# all expired cols. Else if we have a version_id_col, make sure
# it isn't expired.
toload_now = []
if base_mapper.eager_defaults:
toload_now.extend(
state._unloaded_non_object.intersection(
mapper._server_default_plus_onupdate_propkeys
)
)
if (
mapper.version_id_col is not None
and mapper.version_id_generator is False
):
if mapper._version_id_prop.key in state.unloaded:
toload_now.extend([mapper._version_id_prop.key])
if toload_now:
state.key = base_mapper._identity_key_from_state(state)
loading.load_on_ident(
uowtransaction.session.query(mapper),
state.key,
refresh_state=state,
only_load_props=toload_now,
)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
if (
mapper.version_id_generator is False
and mapper.version_id_col is not None
):
if state_dict[mapper._version_id_prop.key] is None:
raise orm_exc.FlushError(
"Instance does not contain a non-NULL version value"
)
def _postfetch_post_update(
mapper, uowtransaction, table, state, dict_, result, params
):
if uowtransaction.is_deleted(state):
return
prefetch_cols = result.context.compiled.prefetch
postfetch_cols = result.context.compiled.postfetch
if (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush)
if refresh_flush:
load_evt_attrs = []
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
dict_[mapper._columntoproperty[c].key] = params[c.key]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[c].key)
if refresh_flush and load_evt_attrs:
mapper.class_manager.dispatch.refresh_flush(
state, uowtransaction, load_evt_attrs
)
if postfetch_cols:
state._expire_attributes(
state.dict,
[
mapper._columntoproperty[c].key
for c in postfetch_cols
if c in mapper._columntoproperty
],
)
def _postfetch(
mapper,
uowtransaction,
table,
state,
dict_,
result,
params,
value_params,
isupdate,
):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
prefetch_cols = result.context.compiled.prefetch
postfetch_cols = result.context.compiled.postfetch
returning_cols = result.context.compiled.returning
if (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush)
if refresh_flush:
load_evt_attrs = []
if returning_cols:
row = result.context.returned_defaults
if row is not None:
for col in returning_cols:
# pk cols returned from insert are handled
# distinctly, don't step on the values here
if col.primary_key and result.context.isinsert:
continue
# note that columns can be in the "return defaults" that are
# not mapped to this mapper, typically because they are
# "excluded", which can be specified directly or also occurs
# when using declarative w/ single table inheritance
prop = mapper._columntoproperty.get(col)
if prop:
dict_[prop.key] = row[col]
if refresh_flush:
load_evt_attrs.append(prop.key)
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
dict_[mapper._columntoproperty[c].key] = params[c.key]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[c].key)
if refresh_flush and load_evt_attrs:
mapper.class_manager.dispatch.refresh_flush(
state, uowtransaction, load_evt_attrs
)
if isupdate and value_params:
# explicitly suit the use case specified by
# [ticket:3801], PK SQL expressions for UPDATE on non-RETURNING
# database which are set to themselves in order to do a version bump.
postfetch_cols.extend(
[
col
for col in value_params
if col.primary_key and col not in returning_cols
]
)
if postfetch_cols:
state._expire_attributes(
state.dict,
[
mapper._columntoproperty[c].key
for c in postfetch_cols
if c in mapper._columntoproperty
],
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(
state,
m,
state,
m,
equated_pairs,
uowtransaction,
mapper.passive_updates,
)
def _postfetch_bulk_save(mapper, dict_, table):
for m, equated_pairs in mapper._table_to_equated[table]:
sync.bulk_populate_inherit_keys(dict_, m, equated_pairs)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = uowtransaction.session.connection_callable
else:
connection = uowtransaction.transaction.connection(base_mapper)
connection_callable = None
for state in _sort_states(base_mapper, states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
mapper = state.manager.mapper
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn: conn.execution_options(
compiled_cache=base_mapper._compiled_cache
)
)
def _sort_states(mapper, states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
try:
persistent_sorted = sorted(
persistent, key=mapper._persistent_sortkey_fn
)
except TypeError as err:
util.raise_(
sa_exc.InvalidRequestError(
"Could not sort objects by primary key; primary key "
"values must be sortable in Python (was: %s)" % err
),
replace_context=err,
)
return (
sorted(pending, key=operator.attrgetter("insert_order"))
+ persistent_sorted
)
class BulkUD(object):
"""Handle bulk update and deletes via a :class:`_query.Query`."""
def __init__(self, query):
self.query = query.enable_eagerloads(False)
self.mapper = self.query._bind_mapper()
self._validate_query_state()
def _validate_query_state(self):
for attr, methname, notset, op in (
("_limit", "limit()", None, operator.is_),
("_offset", "offset()", None, operator.is_),
("_order_by", "order_by()", False, operator.is_),
("_group_by", "group_by()", False, operator.is_),
("_distinct", "distinct()", False, operator.is_),
(
"_from_obj",
"join(), outerjoin(), select_from(), or from_self()",
(),
operator.eq,
),
):
if not op(getattr(self.query, attr), notset):
raise sa_exc.InvalidRequestError(
"Can't call Query.update() or Query.delete() "
"when %s has been called" % (methname,)
)
@property
def session(self):
return self.query.session
@classmethod
def _factory(cls, lookup, synchronize_session, *arg):
try:
klass = lookup[synchronize_session]
except KeyError as err:
util.raise_(
sa_exc.ArgumentError(
"Valid strategies for session synchronization "
"are %s" % (", ".join(sorted(repr(x) for x in lookup)))
),
replace_context=err,
)
else:
return klass(*arg)
def exec_(self):
self._do_before_compile()
self._do_pre()
self._do_pre_synchronize()
self._do_exec()
self._do_post_synchronize()
self._do_post()
def _execute_stmt(self, stmt):
self.result = self.query._execute_crud(stmt, self.mapper)
self.rowcount = self.result.rowcount
def _do_before_compile(self):
raise NotImplementedError()
@util.dependencies("sqlalchemy.orm.query")
def _do_pre(self, querylib):
query = self.query
self.context = querylib.QueryContext(query)
if isinstance(query._entities[0], querylib._ColumnEntity):
# check for special case of query(table)
tables = set()
for ent in query._entities:
if not isinstance(ent, querylib._ColumnEntity):
tables.clear()
break
else:
tables.update(_from_objects(ent.column))
if len(tables) != 1:
raise sa_exc.InvalidRequestError(
"This operation requires only one Table or "
"entity be specified as the target."
)
else:
self.primary_table = tables.pop()
else:
self.primary_table = query._only_entity_zero(
"This operation requires only one Table or "
"entity be specified as the target."
).mapper.local_table
session = query.session
if query._autoflush:
session._autoflush()
def _do_pre_synchronize(self):
pass
def _do_post_synchronize(self):
pass
class BulkEvaluate(BulkUD):
"""BulkUD which does the 'evaluate' method of session state resolution."""
def _additional_evaluators(self, evaluator_compiler):
pass
def _do_pre_synchronize(self):
query = self.query
target_cls = query._mapper_zero().class_
try:
evaluator_compiler = evaluator.EvaluatorCompiler(target_cls)
if query.whereclause is not None:
eval_condition = evaluator_compiler.process(query.whereclause)
else:
def eval_condition(obj):
return True
self._additional_evaluators(evaluator_compiler)
except evaluator.UnevaluatableError as err:
util.raise_(
sa_exc.InvalidRequestError(
'Could not evaluate current criteria in Python: "%s". '
"Specify 'fetch' or False for the "
"synchronize_session parameter." % err
),
from_=err,
)
# TODO: detect when the where clause is a trivial primary key match
self.matched_objects = [
obj
for (
cls,
pk,
identity_token,
), obj in query.session.identity_map.items()
if issubclass(cls, target_cls) and eval_condition(obj)
]
class BulkFetch(BulkUD):
"""BulkUD which does the 'fetch' method of session state resolution."""
def _do_pre_synchronize(self):
query = self.query
session = query.session
context = query._compile_context()
select_stmt = context.statement.with_only_columns(
self.primary_table.primary_key
)
self.matched_rows = session.execute(
select_stmt, mapper=self.mapper, params=query._params
).fetchall()
class BulkUpdate(BulkUD):
"""BulkUD which handles UPDATEs."""
def __init__(self, query, values, update_kwargs):
super(BulkUpdate, self).__init__(query)
self.values = values
self.update_kwargs = update_kwargs
@classmethod
def factory(cls, query, synchronize_session, values, update_kwargs):
return BulkUD._factory(
{
"evaluate": BulkUpdateEvaluate,
"fetch": BulkUpdateFetch,
False: BulkUpdate,
},
synchronize_session,
query,
values,
update_kwargs,
)
def _do_before_compile(self):
if self.query.dispatch.before_compile_update:
for fn in self.query.dispatch.before_compile_update:
new_query = fn(self.query, self)
if new_query is not None:
self.query = new_query
@property
def _resolved_values(self):
values = []
for k, v in (
self.values.items()
if hasattr(self.values, "items")
else self.values
):
if self.mapper:
if isinstance(k, util.string_types):
desc = _entity_descriptor(self.mapper, k)
values.extend(desc._bulk_update_tuples(v))
elif isinstance(k, attributes.QueryableAttribute):
values.extend(k._bulk_update_tuples(v))
else:
values.append((k, v))
else:
values.append((k, v))
return values
@property
def _resolved_values_keys_as_propnames(self):
values = []
for k, v in self._resolved_values:
if isinstance(k, attributes.QueryableAttribute):
values.append((k.key, v))
continue
elif hasattr(k, "__clause_element__"):
k = k.__clause_element__()
if self.mapper and isinstance(k, expression.ColumnElement):
try:
attr = self.mapper._columntoproperty[k]
except orm_exc.UnmappedColumnError:
pass
else:
values.append((attr.key, v))
else:
raise sa_exc.InvalidRequestError(
"Invalid expression type: %r" % k
)
return values
def _do_exec(self):
values = self._resolved_values
if not self.update_kwargs.get("preserve_parameter_order", False):
values = dict(values)
update_stmt = sql.update(
self.primary_table,
self.context.whereclause,
values,
**self.update_kwargs
)
self._execute_stmt(update_stmt)
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_update(self)
class BulkDelete(BulkUD):
"""BulkUD which handles DELETEs."""
def __init__(self, query):
super(BulkDelete, self).__init__(query)
@classmethod
def factory(cls, query, synchronize_session):
return BulkUD._factory(
{
"evaluate": BulkDeleteEvaluate,
"fetch": BulkDeleteFetch,
False: BulkDelete,
},
synchronize_session,
query,
)
def _do_before_compile(self):
if self.query.dispatch.before_compile_delete:
for fn in self.query.dispatch.before_compile_delete:
new_query = fn(self.query, self)
if new_query is not None:
self.query = new_query
def _do_exec(self):
delete_stmt = sql.delete(self.primary_table, self.context.whereclause)
self._execute_stmt(delete_stmt)
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_delete(self)
class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
"""BulkUD which handles UPDATEs using the "evaluate"
method of session resolution."""
def _additional_evaluators(self, evaluator_compiler):
self.value_evaluators = {}
values = self._resolved_values_keys_as_propnames
for key, value in values:
self.value_evaluators[key] = evaluator_compiler.process(
expression._literal_as_binds(value)
)
def _do_post_synchronize(self):
session = self.query.session
states = set()
evaluated_keys = list(self.value_evaluators.keys())
for obj in self.matched_objects:
state, dict_ = (
attributes.instance_state(obj),
attributes.instance_dict(obj),
)
# only evaluate unmodified attributes
to_evaluate = state.unmodified.intersection(evaluated_keys)
for key in to_evaluate:
dict_[key] = self.value_evaluators[key](obj)
state.manager.dispatch.refresh(state, None, to_evaluate)
state._commit(dict_, list(to_evaluate))
# expire attributes with pending changes
# (there was no autoflush, so they are overwritten)
state._expire_attributes(
dict_, set(evaluated_keys).difference(to_evaluate)
)
states.add(state)
session._register_altered(states)
class BulkDeleteEvaluate(BulkEvaluate, BulkDelete):
"""BulkUD which handles DELETEs using the "evaluate"
method of session resolution."""
def _do_post_synchronize(self):
self.query.session._remove_newly_deleted(
[attributes.instance_state(obj) for obj in self.matched_objects]
)
class BulkUpdateFetch(BulkFetch, BulkUpdate):
"""BulkUD which handles UPDATEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
states = set(
[
attributes.instance_state(session.identity_map[identity_key])
for identity_key in [
target_mapper.identity_key_from_primary_key(
list(primary_key)
)
for primary_key in self.matched_rows
]
if identity_key in session.identity_map
]
)
values = self._resolved_values_keys_as_propnames
attrib = set(k for k, v in values)
for state in states:
to_expire = attrib.intersection(state.dict)
if to_expire:
session._expire_state(state, to_expire)
session._register_altered(states)
class BulkDeleteFetch(BulkFetch, BulkDelete):
"""BulkUD which handles DELETEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
for primary_key in self.matched_rows:
# TODO: inline this and call remove_newly_deleted
# once
identity_key = target_mapper.identity_key_from_primary_key(
list(primary_key)
)
if identity_key in session.identity_map:
session._remove_newly_deleted(
[
attributes.instance_state(
session.identity_map[identity_key]
)
]
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/path_registry.py
|
# orm/path_registry.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Path tracking utilities, representing mapper graph traversals.
"""
from itertools import chain
import logging
from .base import class_mapper
from .. import exc
from .. import inspection
from .. import util
log = logging.getLogger(__name__)
def _unreduce_path(path):
return PathRegistry.deserialize(path)
_WILDCARD_TOKEN = "*"
_DEFAULT_TOKEN = "_sa_default"
class PathRegistry(object):
"""Represent query load paths and registry functions.
Basically represents structures like:
(<User mapper>, "orders", <Order mapper>, "items", <Item mapper>)
These structures are generated by things like
query options (joinedload(), subqueryload(), etc.) and are
used to compose keys stored in the query._attributes dictionary
for various options.
They are then re-composed at query compile/result row time as
the query is formed and as rows are fetched, where they again
serve to compose keys to look up options in the context.attributes
dictionary, which is copied from query._attributes.
The path structure has a limited amount of caching, where each
"root" ultimately pulls from a fixed registry associated with
the first mapper, that also contains elements for each of its
property keys. However paths longer than two elements, which
are the exception rather than the rule, are generated on an
as-needed basis.
"""
__slots__ = ()
is_token = False
is_root = False
def __eq__(self, other):
try:
return other is not None and self.path == other.path
except AttributeError:
util.warn(
"Comparison of PathRegistry to %r is not supported"
% (type(other))
)
return False
def __ne__(self, other):
try:
return other is None or self.path != other.path
except AttributeError:
util.warn(
"Comparison of PathRegistry to %r is not supported"
% (type(other))
)
return True
def set(self, attributes, key, value):
log.debug("set '%s' on path '%s' to '%s'", key, self, value)
attributes[(key, self.natural_path)] = value
def setdefault(self, attributes, key, value):
log.debug("setdefault '%s' on path '%s' to '%s'", key, self, value)
attributes.setdefault((key, self.natural_path), value)
def get(self, attributes, key, value=None):
key = (key, self.natural_path)
if key in attributes:
return attributes[key]
else:
return value
def __len__(self):
return len(self.path)
@property
def length(self):
return len(self.path)
def pairs(self):
path = self.path
for i in range(0, len(path), 2):
yield path[i], path[i + 1]
def contains_mapper(self, mapper):
for path_mapper in [self.path[i] for i in range(0, len(self.path), 2)]:
if path_mapper.is_mapper and path_mapper.isa(mapper):
return True
else:
return False
def contains(self, attributes, key):
return (key, self.path) in attributes
def __reduce__(self):
return _unreduce_path, (self.serialize(),)
@classmethod
def _serialize_path(cls, path):
return list(
zip(
[m.class_ for m in [path[i] for i in range(0, len(path), 2)]],
[path[i].key for i in range(1, len(path), 2)] + [None],
)
)
@classmethod
def _deserialize_path(cls, path):
p = tuple(
chain(
*[
(
class_mapper(mcls),
class_mapper(mcls).attrs[key]
if key is not None
else None,
)
for mcls, key in path
]
)
)
if p and p[-1] is None:
p = p[0:-1]
return p
@classmethod
def serialize_context_dict(cls, dict_, tokens):
return [
((key, cls._serialize_path(path)), value)
for (key, path), value in [
(k, v)
for k, v in dict_.items()
if isinstance(k, tuple) and k[0] in tokens
]
]
@classmethod
def deserialize_context_dict(cls, serialized):
return util.OrderedDict(
((key, tuple(cls._deserialize_path(path))), value)
for (key, path), value in serialized
)
def serialize(self):
path = self.path
return self._serialize_path(path)
@classmethod
def deserialize(cls, path):
if path is None:
return None
p = cls._deserialize_path(path)
return cls.coerce(p)
@classmethod
def per_mapper(cls, mapper):
if mapper.is_mapper:
return CachingEntityRegistry(cls.root, mapper)
else:
return SlotsEntityRegistry(cls.root, mapper)
@classmethod
def coerce(cls, raw):
return util.reduce(lambda prev, next: prev[next], raw, cls.root)
def token(self, token):
if token.endswith(":" + _WILDCARD_TOKEN):
return TokenRegistry(self, token)
elif token.endswith(":" + _DEFAULT_TOKEN):
return TokenRegistry(self.root, token)
else:
raise exc.ArgumentError("invalid token: %s" % token)
def __add__(self, other):
return util.reduce(lambda prev, next: prev[next], other.path, self)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.path)
class RootRegistry(PathRegistry):
"""Root registry, defers to mappers so that
paths are maintained per-root-mapper.
"""
path = natural_path = ()
has_entity = False
is_aliased_class = False
is_root = True
def __getitem__(self, entity):
return entity._path_registry
PathRegistry.root = RootRegistry()
class TokenRegistry(PathRegistry):
__slots__ = ("token", "parent", "path", "natural_path")
def __init__(self, parent, token):
self.token = token
self.parent = parent
self.path = parent.path + (token,)
self.natural_path = parent.natural_path + (token,)
has_entity = False
is_token = True
def generate_for_superclasses(self):
if not self.parent.is_aliased_class and not self.parent.is_root:
for ent in self.parent.mapper.iterate_to_root():
yield TokenRegistry(self.parent.parent[ent], self.token)
elif (
self.parent.is_aliased_class
and self.parent.entity._is_with_polymorphic
):
yield self
for ent in self.parent.entity._with_polymorphic_entities:
yield TokenRegistry(self.parent.parent[ent], self.token)
else:
yield self
def __getitem__(self, entity):
raise NotImplementedError()
class PropRegistry(PathRegistry):
is_unnatural = False
def __init__(self, parent, prop):
# restate this path in terms of the
# given MapperProperty's parent.
insp = inspection.inspect(parent[-1])
natural_parent = parent
if not insp.is_aliased_class or insp._use_mapper_path:
parent = natural_parent = parent.parent[prop.parent]
elif (
insp.is_aliased_class
and insp.with_polymorphic_mappers
and prop.parent in insp.with_polymorphic_mappers
):
subclass_entity = parent[-1]._entity_for_mapper(prop.parent)
parent = parent.parent[subclass_entity]
# when building a path where with_polymorphic() is in use,
# special logic to determine the "natural path" when subclass
# entities are used.
#
# here we are trying to distinguish between a path that starts
# on a the with_polymorhpic entity vs. one that starts on a
# normal entity that introduces a with_polymorphic() in the
# middle using of_type():
#
# # as in test_polymorphic_rel->
# # test_subqueryload_on_subclass_uses_path_correctly
# wp = with_polymorphic(RegularEntity, "*")
# sess.query(wp).options(someload(wp.SomeSubEntity.foos))
#
# vs
#
# # as in test_relationship->JoinedloadWPolyOfTypeContinued
# wp = with_polymorphic(SomeFoo, "*")
# sess.query(RegularEntity).options(
# someload(RegularEntity.foos.of_type(wp))
# .someload(wp.SubFoo.bar)
# )
#
# in the former case, the Query as it generates a path that we
# want to match will be in terms of the with_polymorphic at the
# beginning. in the latter case, Query will generate simple
# paths that don't know about this with_polymorphic, so we must
# use a separate natural path.
#
#
if parent.parent:
natural_parent = parent.parent[subclass_entity.mapper]
self.is_unnatural = True
else:
natural_parent = parent
elif (
natural_parent.parent
and insp.is_aliased_class
and prop.parent # this should always be the case here
is not insp.mapper
and insp.mapper.isa(prop.parent)
):
natural_parent = parent.parent[prop.parent]
self.prop = prop
self.parent = parent
self.path = parent.path + (prop,)
self.natural_path = natural_parent.natural_path + (prop,)
self._wildcard_path_loader_key = (
"loader",
parent.path + self.prop._wildcard_token,
)
self._default_path_loader_key = self.prop._default_path_loader_key
self._loader_key = ("loader", self.path)
def __str__(self):
return " -> ".join(str(elem) for elem in self.path)
@util.memoized_property
def has_entity(self):
return hasattr(self.prop, "mapper")
@util.memoized_property
def entity(self):
return self.prop.mapper
@property
def mapper(self):
return self.entity
@property
def entity_path(self):
return self[self.entity]
def __getitem__(self, entity):
if isinstance(entity, (int, slice)):
return self.path[entity]
else:
return SlotsEntityRegistry(self, entity)
class AbstractEntityRegistry(PathRegistry):
__slots__ = ()
has_entity = True
def __init__(self, parent, entity):
self.key = entity
self.parent = parent
self.is_aliased_class = entity.is_aliased_class
self.entity = entity
self.path = parent.path + (entity,)
# the "natural path" is the path that we get when Query is traversing
# from the lead entities into the various relationships; it corresponds
# to the structure of mappers and relationships. when we are given a
# path that comes from loader options, as of 1.3 it can have ac-hoc
# with_polymorphic() and other AliasedInsp objects inside of it, which
# are usually not present in mappings. So here we track both the
# "enhanced" path in self.path and the "natural" path that doesn't
# include those objects so these two traversals can be matched up.
# the test here for "(self.is_aliased_class or parent.is_unnatural)"
# are to avoid the more expensive conditional logic that follows if we
# know we don't have to do it. This conditional can just as well be
# "if parent.path:", it just is more function calls.
if parent.path and (self.is_aliased_class or parent.is_unnatural):
# this is an infrequent code path used only for loader strategies
# that also make use of of_type().
if entity.mapper.isa(parent.natural_path[-1].entity):
self.natural_path = parent.natural_path + (entity.mapper,)
else:
self.natural_path = parent.natural_path + (
parent.natural_path[-1].entity,
)
else:
self.natural_path = self.path
@property
def entity_path(self):
return self
@property
def mapper(self):
return inspection.inspect(self.entity).mapper
def __bool__(self):
return True
__nonzero__ = __bool__
def __getitem__(self, entity):
if isinstance(entity, (int, slice)):
return self.path[entity]
else:
return PropRegistry(self, entity)
class SlotsEntityRegistry(AbstractEntityRegistry):
# for aliased class, return lightweight, no-cycles created
# version
__slots__ = (
"key",
"parent",
"is_aliased_class",
"entity",
"path",
"natural_path",
)
class CachingEntityRegistry(AbstractEntityRegistry, dict):
# for long lived mapper, return dict based caching
# version that creates reference cycles
def __getitem__(self, entity):
if isinstance(entity, (int, slice)):
return self.path[entity]
else:
return dict.__getitem__(self, entity)
def __missing__(self, key):
self[key] = item = PropRegistry(self, key)
return item
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/query.py
|
# orm/query.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`_query.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`_query.Query` class should not be confused with the
:class:`_expression.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import attributes
from . import exc as orm_exc
from . import interfaces
from . import loading
from . import persistence
from . import properties
from .base import _entity_descriptor
from .base import _generative
from .base import _is_aliased_class
from .base import _is_mapped_class
from .base import _orm_columns
from .base import InspectionAttr
from .path_registry import PathRegistry
from .util import _entity_corresponds_to
from .util import aliased
from .util import AliasedClass
from .util import join as orm_join
from .util import object_mapper
from .util import ORMAdapter
from .util import with_parent
from .. import exc as sa_exc
from .. import inspect
from .. import inspection
from .. import log
from .. import sql
from .. import util
from ..sql import expression
from ..sql import util as sql_util
from ..sql import visitors
from ..sql.base import ColumnCollection
from ..sql.expression import _interpret_as_from
from ..sql.selectable import ForUpdateArg
__all__ = ["Query", "QueryContext", "aliased"]
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`_query.Query`
is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`_query.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`_query.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`, and in
less common cases by instantiating the :class:`_query.Query` directly and
associating with a :class:`.Session` using the
:meth:`_query.Query.with_session`
method.
For a full walkthrough of :class:`_query.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_only_return_tuples = False
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_suffixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_refresh_identity_token = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = ()
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_orm_only_adapt = True
_orm_only_from_obj_alias = True
_current_path = _path_registry
_has_mapper_entities = False
_bake_ok = True
lazy_loaded_from = None
"""An :class:`.InstanceState` that is using this :class:`_query.Query`
for a
lazy load operation.
The primary rationale for this attribute is to support the horizontal
sharding extension, where it is available within specific query
execution time hooks created by this extension. To that end, the
attribute is only intended to be meaningful at **query execution time**,
and importantly not any time prior to that, including query compilation
time.
.. note::
Within the realm of regular :class:`_query.Query` usage,
this attribute is
set by the lazy loader strategy before the query is invoked. However
there is no established hook that is available to reliably intercept
this value programmatically. It is set by the lazy loading strategy
after any mapper option objects would have been applied, and now that
the lazy loading strategy in the ORM makes use of "baked" queries to
cache SQL compilation, the :meth:`.QueryEvents.before_compile` hook is
also not reliable.
Currently, setting the :paramref:`_orm.relationship.bake_queries` to
``False`` on the target :func:`_orm.relationship`,
and then making use of
the :meth:`.QueryEvents.before_compile` event hook, is the only
available programmatic path to intercepting this attribute. In future
releases, there will be new hooks available that allow interception of
the :class:`_query.Query` before it is executed,
rather than before it is
compiled.
.. versionadded:: 1.2.9
"""
def __init__(self, entities, session=None):
"""Construct a :class:`_query.Query` directly.
E.g.::
q = Query([User, Address], session=some_session)
The above is equivalent to::
q = some_session.query(User, Address)
:param entities: a sequence of entities and/or SQL expressions.
:param session: a :class:`.Session` with which the
:class:`_query.Query`
will be associated. Optional; a :class:`_query.Query`
can be associated
with a :class:`.Session` generatively via the
:meth:`_query.Query.with_session` method as well.
.. seealso::
:meth:`.Session.query`
:meth:`_query.Query.with_session`
"""
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
self._has_mapper_entities = False
# 1. don't run util.to_list() or _set_entity_selectables
# if no entities were passed - major performance bottleneck
# from lazy loader implementation when it seeks to use Query
# class for an identity lookup, causes test_orm.py to fail
# with thousands of extra function calls, see issue #4228
# for why this use had to be added
# 2. can't use classmethod on Query because session.query_cls
# is an arbitrary callable in some user recipes, not
# necessarily a class, so we don't have the class available.
# see issue #4256
# 3. can't do "if entities is not None" because we usually get here
# from session.query() which takes in *entities.
# 4. can't do "if entities" because users make use of undocumented
# to_list() behavior here and they pass clause expressions that
# can't be evaluated as boolean. See issue #4269.
if entities != ():
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if (
not ext_info.is_aliased_class
and ext_info.mapper.with_polymorphic
):
if (
ext_info.mapper.persist_selectable
not in self._polymorphic_adapters
):
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns,
),
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (ext_info, aliased_adapter)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, "mapper") and (
info.is_mapper or info.is_aliased_class
):
self._select_from_entity = info
if set_base_alias and not info.is_aliased_class:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set."
)
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance."
)
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if (
set_base_alias
and len(self._from_obj) == 1
and isinstance(select_from_alias, expression.Alias)
):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs
)
elif (
set_base_alias
and len(self._from_obj) == 1
and hasattr(info, "mapper")
and info.is_aliased_class
):
self._from_obj_alias = info._adapter
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations["parententity"]
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, "table"):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o), True, True
)
for o in cols
]
@_generative()
def _set_lazyload_from(self, state):
self.lazy_loaded_from = state
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
if not self._orm_only_adapt:
orm_only = False
if as_filter and self._filter_aliases:
for fa in self._filter_aliases:
adapters.append((orm_only, fa.replace))
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
orm_only if self._orm_only_from_obj_alias else False,
self._from_obj_alias.replace,
)
)
if self._polymorphic_adapters:
adapters.append((orm_only, self._adapt_polymorphic_element))
if not adapters:
return clause
def replace(elem):
is_orm_adapt = (
"_orm_adapt" in elem._annotations
or "parententity" in elem._annotations
)
for _orm_only, adapter in adapters:
if not _orm_only or is_orm_adapt:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(clause, {}, replace)
def _query_entity_zero(self):
"""Return the first QueryEntity."""
return self._entities[0]
def _mapper_zero(self):
"""return the Mapper associated with the first QueryEntity."""
return self._entities[0].mapper
def _entity_zero(self):
"""Return the 'entity' (mapper or AliasedClass) associated
with the first QueryEntity, or alternatively the 'select from'
entity if specified."""
return (
self._select_from_entity
if self._select_from_entity is not None
else self._query_entity_zero().entity_zero
)
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get("_joinpoint_entity", self._entity_zero())
def _bind_mapper(self):
ezero = self._entity_zero()
if ezero is not None:
insp = inspect(ezero)
if not insp.is_clause_element:
return insp.mapper
return None
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname
)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale
or "This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False
)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if (
self._criterion is not None
or self._statement is not None
or self._from_obj
or self._limit is not None
or self._offset is not None
or self._group_by
or (order_by and self._order_by)
or (distinct and self._distinct)
):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth
)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth
)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
(
"Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion."
)
% meth
)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied." % (meth, meth)
)
def _get_options(
self,
populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None,
identity_token=None,
):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
if identity_token:
self._refresh_identity_token = identity_token
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).statement
if self._params:
stmt = stmt.params(self._params)
return stmt
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`_query.Query`, embedded within an
:class:`_expression.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`_expression.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`_query.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True,
:meth:`_expression.Select.reduce_columns` will
be called on the resulting :func:`_expression.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
r"""Return the full SELECT statement represented by this
:class:`_query.Query` represented as a common table expression (CTE).
Parameters and usage are the same as those of the
:meth:`_expression.SelectBase.cte` method; see that method for
further details.
Here is the `PostgreSQL WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`_orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\
filter(Part.part=="our part").\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`_expression.HasCTE.cte`
"""
return self.enable_eagerloads(False).statement.cte(
name=name, recursive=recursive
)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`_query.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`_query.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`_expression.Select` object emitted by this
:class:`_query.Query`.
Used for :func:`_sa.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def only_return_tuples(self, value):
"""When set to True, the query results will always be a tuple.
This is specifically for single element queries. The default is False.
.. versionadded:: 1.2.5
.. seealso::
:meth:`_query.Query.is_single_entity`
"""
self._only_return_tuples = value
@property
def is_single_entity(self):
"""Indicates if this :class:`_query.Query`
returns tuples or single entities.
Returns True if this query returns a single entity for each instance
in its result list, and False if this query returns a tuple of entities
for each result.
.. versionadded:: 1.3.11
.. seealso::
:meth:`_query.Query.only_return_tuples`
"""
return (
not self._only_return_tuples
and len(self._entities) == 1
and self._entities[0].supports_single_entity
)
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`_query.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message
)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
.. note:: The :meth:`_query.Query.with_labels` method *only* applies
the output of :attr:`_query.Query.statement`, and *not* to any of
the result-row invoking systems of :class:`_query.Query` itself, e.
g.
:meth:`_query.Query.first`, :meth:`_query.Query.all`, etc.
To execute
a query using :meth:`_query.Query.with_labels`, invoke the
:attr:`_query.Query.statement` using :meth:`.Session.execute`::
result = session.execute(query.with_labels().statement)
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(
self, cls_or_mappers, selectable=None, polymorphic_on=None
):
"""Load columns for inheriting classes.
:meth:`_query.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`_query.Query`.
The "main" mapped class here means the :class:`_query.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query."
)
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(
self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on,
)
@_generative()
def yield_per(self, count):
r"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`_query.Query.yield_per` method **is not compatible
subqueryload eager loading or joinedload eager loading when
using collections**. It is potentially compatible with "select in"
eager loading, **provided the database driver supports multiple,
independent cursors** (pysqlite and psycopg2 are known to work,
MySQL and SQL Server ODBC drivers do not).
Therefore in some cases, it may be helpful to disable
eager loads, either unconditionally with
:meth:`_query.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2`,
:mod:`~sqlalchemy.dialects.mysql.mysqldb` and
:mod:`~sqlalchemy.dialects.mysql.pymysql` dialects
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`_query.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True, "max_row_buffer": count}
)
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
some_object = session.query(VersionedFoo).get(
{"id": 5, "version_id": 10})
:meth:`_query.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`_query.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`_query.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`_query.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`_query.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`_orm.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`_query.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading_relationships`
for further details on relationship loading.
:param ident: A scalar, tuple, or dictionary representing the
primary key. For a composite (e.g. multiple column) primary key,
a tuple or dictionary should be passed.
For a single-column primary key, the scalar calling form is typically
the most expedient. If the primary key of a row is the value "5",
the call looks like::
my_object = query.get(5)
The tuple form contains primary key values typically in
the order in which they correspond to the mapped
:class:`_schema.Table`
object's primary key columns, or if the
:paramref:`_orm.Mapper.primary_key` configuration parameter were used
, in
the order used for that parameter. For example, if the primary key
of a row is represented by the integer
digits "5, 10" the call would look like::
my_object = query.get((5, 10))
The dictionary form should include as keys the mapped attribute names
corresponding to each element of the primary key. If the mapped class
has the attributes ``id``, ``version_id`` as the attributes which
store the object's primary key value, the call would look like::
my_object = query.get({"id": 5, "version_id": 10})
.. versionadded:: 1.3 the :meth:`_query.Query.get`
method now optionally
accepts a dictionary of attribute names to values in order to
indicate a primary key identifier.
:return: The object instance, or ``None``.
"""
return self._get_impl(ident, loading.load_on_pk_identity)
def _identity_lookup(
self,
mapper,
primary_key_identity,
identity_token=None,
passive=attributes.PASSIVE_OFF,
lazy_loaded_from=None,
):
"""Locate an object in the identity map.
Given a primary key identity, constructs an identity key and then
looks in the session's identity map. If present, the object may
be run through unexpiration rules (e.g. load unloaded attributes,
check if was deleted).
For performance reasons, while the :class:`_query.Query` must be
instantiated, it may be instantiated with no entities, and the
mapper is passed::
obj = session.query()._identity_lookup(inspect(SomeClass), (1, ))
:param mapper: mapper in use
:param primary_key_identity: the primary key we are searching for, as
a tuple.
:param identity_token: identity token that should be used to create
the identity key. Used as is, however overriding subclasses can
repurpose this in order to interpret the value in a special way,
such as if None then look among multiple target tokens.
:param passive: passive load flag passed to
:func:`.loading.get_from_identity`, which impacts the behavior if
the object is found; the object may be validated and/or unexpired
if the flag allows for SQL to be emitted.
:param lazy_loaded_from: an :class:`.InstanceState` that is
specifically asking for this identity as a related identity. Used
for sharding schemes where there is a correspondence between an object
and a related object being lazy-loaded (or otherwise
relationship-loaded).
.. versionadded:: 1.2.9
:return: None if the object is not found in the identity map, *or*
if the object was unexpired and found to have been deleted.
if passive flags disallow SQL and the object is expired, returns
PASSIVE_NO_RESULT. In all other cases the instance is returned.
.. versionadded:: 1.2.7
"""
key = mapper.identity_key_from_primary_key(
primary_key_identity, identity_token=identity_token
)
return loading.get_from_identity(self.session, mapper, key, passive)
def _get_impl(self, primary_key_identity, db_load_fn, identity_token=None):
# convert composite types to individual args
if hasattr(primary_key_identity, "__composite_values__"):
primary_key_identity = primary_key_identity.__composite_values__()
mapper = self._only_full_mapper_zero("get")
is_dict = isinstance(primary_key_identity, dict)
if not is_dict:
primary_key_identity = util.to_list(
primary_key_identity, default=(None,)
)
if len(primary_key_identity) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s"
% ",".join("'%s'" % c for c in mapper.primary_key)
)
if is_dict:
try:
primary_key_identity = list(
primary_key_identity[prop.key]
for prop in mapper._identity_key_props
)
except KeyError as err:
util.raise_(
sa_exc.InvalidRequestError(
"Incorrect names of values in identifier to formulate "
"primary key for query.get(); primary key attribute "
"names are %s"
% ",".join(
"'%s'" % prop.key
for prop in mapper._identity_key_props
)
),
replace_context=err,
)
if (
not self._populate_existing
and not mapper.always_refresh
and self._for_update_arg is None
):
instance = self._identity_lookup(
mapper, primary_key_identity, identity_token=identity_token
)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
elif instance is attributes.PASSIVE_CLASS_MISMATCH:
return None
return db_load_fn(self, primary_key_identity)
@_generative()
def correlate(self, *args):
"""Return a :class:`_query.Query`
construct which will correlate the given
FROM clauses to that of an enclosing :class:`_query.Query` or
:func:`_expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`_expression.Select.correlate`
after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`_query.Query.from_self` is used, or when
a subquery as returned by :meth:`_query.Query.subquery` is
embedded in another :func:`_expression.select` construct.
"""
for s in args:
if s is None:
self._correlate = self._correlate.union([None])
else:
self._correlate = self._correlate.union(
sql_util.surface_selectables(_interpret_as_from(s))
)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`_query.Query`
that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`_query.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None, from_entity=None): # noqa
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`_orm.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to
:meth:`_query.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`_query.Query` object's target mapper.
:param instance:
An instance which has some :func:`_orm.relationship`.
:param property:
String property name, or class-bound attribute, which indicates
what relationship from the instance should be used to reconcile the
parent/child relationship.
:param from_entity:
Entity in which to consider as the left side. This defaults to the
"zero" entity of the :class:`_query.Query` itself.
"""
if from_entity:
entity_zero = inspect(from_entity)
else:
entity_zero = self._entity_zero()
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if (
isinstance(prop, properties.RelationshipProperty)
and prop.mapper is entity_zero.mapper
):
property = prop # noqa
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'"
% (
entity_zero.mapper.class_.__name__,
instance.__class__.__name__,
)
)
return self.filter(with_parent(instance, property, entity_zero.entity))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`_query.Query` that will use the given
:class:`.Session`.
While the :class:`_query.Query`
object is normally instantiated using the
:meth:`.Session.query` method, it is legal to build the
:class:`_query.Query`
directly without necessarily using a :class:`.Session`. Such a
:class:`_query.Query` object, or any :class:`_query.Query`
already associated
with a different :class:`.Session`, can produce a new
:class:`_query.Query`
object associated with a target session using this method::
from sqlalchemy.orm import Query
query = Query([MyClass]).filter(MyClass.id == 5)
result = query.with_session(my_session).one()
"""
self.session = session
def from_self(self, *entities):
r"""return a Query that selects from this Query's
SELECT statement.
:meth:`_query.Query.from_self` essentially turns the SELECT statement
into a SELECT of itself. Given a query such as::
q = session.query(User).filter(User.name.like('e%'))
Given the :meth:`_query.Query.from_self` version::
q = session.query(User).filter(User.name.like('e%')).from_self()
This query renders as:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1) AS anon_1
There are lots of cases where :meth:`_query.Query.from_self`
may be useful.
A simple one is where above, we may want to apply a row LIMIT to
the set of user objects we query against, and then apply additional
joins against that row-limited set::
q = session.query(User).filter(User.name.like('e%')).\
limit(5).from_self().\
join(User.addresses).filter(Address.email.like('q%'))
The above query joins to the ``Address`` entity but only against the
first five results of the ``User`` query:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1
**Automatic Aliasing**
Another key behavior of :meth:`_query.Query.from_self`
is that it applies
**automatic aliasing** to the entities inside the subquery, when
they are referenced on the outside. Above, if we continue to
refer to the ``User`` entity without any additional aliasing applied
to it, those references wil be in terms of the subquery::
q = session.query(User).filter(User.name.like('e%')).\
limit(5).from_self().\
join(User.addresses).filter(Address.email.like('q%')).\
order_by(User.name)
The ORDER BY against ``User.name`` is aliased to be in terms of the
inner subquery:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1 ORDER BY anon_1.user_name
The automatic aliasing feature only works in a **limited** way,
for simple filters and orderings. More ambitious constructions
such as referring to the entity in joins should prefer to use
explicit subquery objects, typically making use of the
:meth:`_query.Query.subquery`
method to produce an explicit subquery object.
Always test the structure of queries by viewing the SQL to ensure
a particular structure does what's expected!
**Changing the Entities**
:meth:`_query.Query.from_self`
also includes the ability to modify what
columns are being queried. In our example, we want ``User.id``
to be queried by the inner query, so that we can join to the
``Address`` entity on the outside, but we only wanted the outer
query to return the ``Address.email`` column::
q = session.query(User).filter(User.name.like('e%')).\
limit(5).from_self(Address.email).\
join(User.addresses).filter(Address.email.like('q%'))
yielding:
.. sourcecode:: sql
SELECT address.email AS address_email
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1
**Looking out for Inner / Outer Columns**
Keep in mind that when referring to columns that originate from
inside the subquery, we need to ensure they are present in the
columns clause of the subquery itself; this is an ordinary aspect of
SQL. For example, if we wanted to load from a joined entity inside
the subquery using :func:`.contains_eager`, we need to add those
columns. Below illustrates a join of ``Address`` to ``User``,
then a subquery, and then we'd like :func:`.contains_eager` to access
the ``User`` columns::
q = session.query(Address).join(Address.user).\
filter(User.name.like('e%'))
q = q.add_entity(User).from_self().\
options(contains_eager(Address.user))
We use :meth:`_query.Query.add_entity` above **before** we call
:meth:`_query.Query.from_self`
so that the ``User`` columns are present
in the inner subquery, so that they are available to the
:func:`.contains_eager` modifier we are using on the outside,
producing:
.. sourcecode:: sql
SELECT anon_1.address_id AS anon_1_address_id,
anon_1.address_email AS anon_1_address_email,
anon_1.address_user_id AS anon_1_address_user_id,
anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (
SELECT address.id AS address_id,
address.email AS address_email,
address.user_id AS address_user_id,
"user".id AS user_id,
"user".name AS user_name
FROM address JOIN "user" ON "user".id = address.user_id
WHERE "user".name LIKE :name_1) AS anon_1
If we didn't call ``add_entity(User)``, but still asked
:func:`.contains_eager` to load the ``User`` entity, it would be
forced to add the table on the outside without the correct
join criteria - note the ``anon1, "user"`` phrase at
the end:
.. sourcecode:: sql
-- incorrect query
SELECT anon_1.address_id AS anon_1_address_id,
anon_1.address_email AS anon_1_address_email,
anon_1.address_user_id AS anon_1_address_user_id,
"user".id AS user_id,
"user".name AS user_name
FROM (
SELECT address.id AS address_id,
address.email AS address_email,
address.user_id AS address_user_id
FROM address JOIN "user" ON "user".id = address.user_id
WHERE "user".name LIKE :name_1) AS anon_1, "user"
:param \*entities: optional list of entities which will replace
those being selected.
"""
fromclause = (
self.with_labels()
.enable_eagerloads(False)
.statement.correlate(None)
)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
q._select_from_entity = self._entity_zero()
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
"_statement",
"_criterion",
"_order_by",
"_group_by",
"_limit",
"_offset",
"_joinpath",
"_joinpoint",
"_distinct",
"_having",
"_prefixes",
"_suffixes",
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
r"""Return a new :class:`_query.Query`
replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\
join(User.address).\
filter(User.name.like('%ed%')).\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\
order_by(None).\
filter(User.id==5).\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\
limit(1)
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation(
"0.7",
":meth:`.add_column` is superseded " "by :meth:`.add_columns`",
False,
)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new :class:`_query.Query` object,
applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded.
.. seealso::
:ref:`deferred_options`
:ref:`relationship_loader_options`
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
if "_unbound_load_dedupes" not in self._attributes:
self._attributes["_unbound_load_dedupes"] = set()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`_query.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`_query.Query`
objects. See the example at :ref:`hybrid_transformers`.
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name="*"):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`_query.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`_schema.Table`, :class:`_expression.Alias`,
or ORM entity / mapped class
/etc.
.. seealso::
:meth:`_query.Query.with_statement_hint`
:meth:`.Query.prefix_with` - generic SELECT prefixing which also
can suit some database-specific HINT syntaxes such as MySQL
optimizer hints
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name="*"):
"""add a statement hint to this :class:`_expression.Select`.
This method is similar to :meth:`_expression.Select.with_hint`
except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into
:meth:`_expression.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`_query.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
def get_execution_options(self):
""" Get the non-SQL options which will take effect during execution.
.. versionadded:: 1.3
.. seealso::
:meth:`_query.Query.execution_options`
"""
return self._execution_options
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`_engine.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
.. seealso::
:meth:`_query.Query.get_execution_options`
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
@util.deprecated(
"0.9",
"The :meth:`_query.Query.with_lockmode` "
"method is deprecated and will "
"be removed in a future release. Please refer to "
":meth:`_query.Query.with_for_update`. ",
)
def with_lockmode(self, mode):
"""Return a new :class:`_query.Query`
object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`_query.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(
self,
read=False,
nowait=False,
of=None,
skip_locked=False,
key_share=False,
):
"""return a new :class:`_query.Query`
with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`_expression.SelectBase.with_for_update`.
When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a PostgreSQL backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`_query.Query.with_for_update`
supersedes
the :meth:`_query.Query.with_lockmode` method.
.. seealso::
:meth:`_expression.GenerativeSelect.with_for_update`
- Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(
read=read,
nowait=nowait,
of=of,
skip_locked=skip_locked,
key_share=key_share,
)
@_generative()
def params(self, *args, **kwargs):
r"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary."
)
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
r"""apply the given filtering criterion to a copy
of this :class:`_query.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria may be specified as comma separated; the effect
is that they will be joined together using the :func:`.and_`
function::
session.query(MyClass).\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`_expression.text`
construct.
.. seealso::
:meth:`_query.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
r"""apply the given filtering criterion to a copy
of this :class:`_query.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria may be specified as comma separated; the effect
is that they will be joined together using the :func:`.and_`
function::
session.query(MyClass).\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`_query.Query.join`.
.. seealso::
:meth:`_query.Query.filter` - filter on SQL expressions.
"""
zero = self._joinpoint_zero()
if zero is None:
raise sa_exc.InvalidRequestError(
"Can't use filter_by when the first entity '%s' of a query "
"is not a mapped class. Please use the filter method instead, "
"or change the order of the entities in the query"
% self._query_entity_zero()
)
clauses = [
_entity_descriptor(zero, key) == value
for key, value in kwargs.items()
]
return self.filter(*clauses)
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ordering configured
on the :func:`.mapper` object using the deprecated
:paramref:`.mapper.order_by` parameter.
"""
if len(criterion) == 1:
if criterion[0] is False:
if "_order_by" in self.__dict__:
self._order_by = False
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`_query.Query`
All existing GROUP BY settings can be suppressed by
passing ``None`` - this will suppress any GROUP BY configured
on mappers as well.
.. versionadded:: 1.1 GROUP BY can be cancelled by passing None,
in the same way as ORDER BY.
"""
if len(criterion) == 1:
if criterion[0] is None:
self._group_by = False
return
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
r"""apply a HAVING criterion to the query and return the
newly resulting :class:`_query.Query`.
:meth:`_query.Query.having` is used in conjunction with
:meth:`_query.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\
join(User.addresses).\
group_by(User.id).\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and not isinstance(
criterion, sql.ClauseElement
):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string"
)
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def _set_op(self, expr_fn, *q):
return self._from_selectable(
expr_fn(*([self] + list(q)))
)._set_enable_single_crit(False)
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`_query.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._set_op(expression.union, *q)
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.union_all, *q)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.intersect, *q)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.intersect_all, *q)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.except_, *q)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.except_all, *q)
def join(self, *props, **kwargs):
r"""Create a SQL JOIN against this :class:`_query.Query`
object's criterion
and apply generatively, returning the newly resulting
:class:`_query.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`_query.Query.join`
is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`_query.Query.join` along
``User.addresses`` will result in SQL approximately equivalent to::
SELECT user.id, User.name
FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`_query.Query.join` as the "on clause", that is, it indicates
how the "ON" portion of the JOIN should be constructed.
To construct a chain of joins, multiple :meth:`_query.Query.join`
calls may be used. The relationship-bound attribute implies both
the left and right side of the join at once::
q = session.query(User).\
join(User.orders).\
join(Order.items).\
join(Item.keywords)
.. note:: as seen in the above example, **the order in which each
call to the join() method occurs is important**. Query would not,
for example, know how to join correctly if we were to specify
``User``, then ``Item``, then ``Order``, in our chain of joins; in
such a case, depending on the arguments passed, it may raise an
error that it doesn't know how to join, or it may produce invalid
SQL in which case the database will raise an error. In correct
practice, the
:meth:`_query.Query.join` method is invoked in such a way that lines
up with how we would want the JOIN clauses in SQL to be
rendered, and each call should represent a clear link from what
precedes it.
**Joins to a Target Entity or Selectable**
A second form of :meth:`_query.Query.join` allows any mapped entity or
core selectable construct as a target. In this usage,
:meth:`_query.Query.join` will attempt to create a JOIN along the
natural foreign key relationship between two entities::
q = session.query(User).join(Address)
In the above calling form, :meth:`_query.Query.join` is called upon to
create the "on clause" automatically for us. This calling form will
ultimately raise an error if either there are no foreign keys between
the two entities, or if there are multiple foreign key linkages between
the target entity and the entity or entities already present on the
left side such that creating a join requires more information. Note
that when indicating a join to a target without any ON clause, ORM
configured relationships are not taken into account.
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. A example that includes
a SQL expression as the ON clause is as follows::
q = session.query(User).join(Address, User.id==Address.user_id)
The above form may also use a relationship-bound attribute as the
ON clause as well::
q = session.query(User).join(Address, User.addresses)
The above syntax can be useful for the case where we wish
to join to an alias of a particular target entity. If we wanted
to join to ``Address`` twice, it could be achieved using two
aliases set up using the :func:`~sqlalchemy.orm.aliased` function::
a1 = aliased(Address)
a2 = aliased(Address)
q = session.query(User).\
join(a1, User.addresses).\
join(a2, User.addresses).\
filter(a1.email_address=='ed@foo.com').\
filter(a2.email_address=='ed@bar.com')
The relationship-bound calling form can also specify a target entity
using the :meth:`_orm.PropComparator.of_type` method; a query
equivalent to the one above would be::
a1 = aliased(Address)
a2 = aliased(Address)
q = session.query(User).\
join(User.addresses.of_type(a1)).\
join(User.addresses.of_type(a2)).\
filter(a1.email_address == 'ed@foo.com').\
filter(a2.email_address == 'ed@bar.com')
**Joining to Tables and Subqueries**
The target of a join may also be any table or SELECT statement,
which may be related to a target entity or not. Use the
appropriate ``.subquery()`` method in order to make a subquery
out of a query::
subq = session.query(Address).\
filter(Address.email_address == 'ed@foo.com').\
subquery()
q = session.query(User).join(
subq, User.id == subq.c.user_id
)
Joining to a subquery in terms of a specific relationship and/or
target entity may be achieved by linking the subquery to the
entity using :func:`_orm.aliased`::
subq = session.query(Address).\
filter(Address.email_address == 'ed@foo.com').\
subquery()
address_subq = aliased(Address, subq)
q = session.query(User).join(
User.addresses.of_type(address_subq)
)
**Controlling what to Join From**
In cases where the left side of the current state of
:class:`_query.Query` is not in line with what we want to join from,
the :meth:`_query.Query.select_from` method may be used::
q = session.query(Address).select_from(User).\
join(User.addresses).\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Legacy Features of Query.join()**
The :meth:`_query.Query.join` method currently supports several
usage patterns and arguments that are considered to be legacy
as of SQLAlchemy 1.3. A deprecation path will follow
in the 1.4 series for the following features:
* Joining on relationship names rather than attributes::
session.query(User).join("addresses")
**Why it's legacy**: the string name does not provide enough context
for :meth:`_query.Query.join` to always know what is desired,
notably in that there is no indication of what the left side
of the join should be. This gives rise to flags like
``from_joinpoint`` as well as the ability to place several
join clauses in a single :meth:`_query.Query.join` call
which don't solve the problem fully while also
adding new calling styles that are unnecessary and expensive to
accommodate internally.
**Modern calling pattern**: Use the actual relationship,
e.g. ``User.addresses`` in the above case::
session.query(User).join(User.addresses)
* Automatic aliasing with the ``aliased=True`` flag::
session.query(Node).join(Node.children, aliased=True).\
filter(Node.name == 'some name')
**Why it's legacy**: the automatic aliasing feature of
:class:`_query.Query` is intensely complicated, both in its internal
implementation as well as in its observed behavior, and is almost
never used. It is difficult to know upon inspection where and when
its aliasing of a target entity, ``Node`` in the above case, will be
applied and when it won't, and additionally the feature has to use
very elaborate heuristics to achieve this implicit behavior.
**Modern calling pattern**: Use the :func:`_orm.aliased` construct
explicitly::
from sqlalchemy.orm import aliased
n1 = aliased(Node)
session.query(Node).join(Node.children.of_type(n1)).\
filter(n1.name == 'some name')
* Multiple joins in one call::
session.query(User).join("orders", "items")
session.query(User).join(User.orders, Order.items)
session.query(User).join(
(Order, User.orders),
(Item, Item.order_id == Order.id)
)
# ... and several more forms actually
**Why it's legacy**: being able to chain multiple ON clauses in one
call to :meth:`_query.Query.join` is yet another attempt to solve
the problem of being able to specify what entity to join from,
and is the source of a large variety of potential calling patterns
that are internally expensive and complicated to parse and
accommodate.
**Modern calling pattern**: Use relationship-bound attributes
or SQL-oriented ON clauses within separate calls, so that
each call to :meth:`_query.Query.join` knows what the left
side should be::
session.query(User).join(User.orders).join(
Item, Item.order_id == Order.id)
:param \*props: Incoming arguments for :meth:`_query.Query.join`,
the props collection in modern use should be considered to be a one
or two argument form, either as a single "target" entity or ORM
attribute-bound relationship, or as a target entity plus an "on
clause" which may be a SQL expression or ORM attribute-bound
relationship.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`_query.Query.outerjoin` method were called.
:param full=False: render FULL OUTER JOIN; implies ``isouter``.
.. versionadded:: 1.1
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. note:: This flag is considered legacy.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`_query.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`_query.Query.reset_joinpoint` is called.
.. note:: This flag is considered legacy.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`_query.Query.join` is used for inheritance relationships.
:func:`_orm.join` - a standalone ORM-level join function,
used internally by :meth:`_query.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter, full = (
kwargs.pop("aliased", False),
kwargs.pop("from_joinpoint", False),
kwargs.pop("isouter", False),
kwargs.pop("full", False),
)
if kwargs:
raise TypeError(
"unknown arguments: %s" % ", ".join(sorted(kwargs))
)
return self._join(
props,
outerjoin=isouter,
full=full,
create_aliases=aliased,
from_joinpoint=from_joinpoint,
)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint, full = (
kwargs.pop("aliased", False),
kwargs.pop("from_joinpoint", False),
kwargs.pop("full", False),
)
if kwargs:
raise TypeError(
"unknown arguments: %s" % ", ".join(sorted(kwargs))
)
return self._join(
props,
outerjoin=True,
full=full,
create_aliases=aliased,
from_joinpoint=from_joinpoint,
)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while "prev" in jp:
f, prev = jp["prev"]
prev = prev.copy()
prev[f] = jp.copy()
jp["prev"] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, full, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if (
len(keys) == 2
and isinstance(
keys[0], (expression.FromClause, type, AliasedClass)
)
and isinstance(
keys[1],
(str, expression.ClauseElement, interfaces.PropComparator),
)
):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
# Query.join() accepts a list of join paths all at once.
# step one is to iterate through these paths and determine the
# intent of each path individually. as we encounter a path token,
# we add a new ORMJoin construct to the self._from_obj tuple,
# either by adding a new element to it, or by replacing an existing
# element with a new ORMJoin.
keylist = util.to_list(keys)
for idx, arg1 in enumerate(keylist):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)
):
right, onclause = arg2, arg1
else:
right, onclause = arg1, arg2
if onclause is None:
r_info = inspect(right)
if not r_info.is_selectable and not hasattr(r_info, "mapper"):
raise sa_exc.ArgumentError(
"Expected mapped entity or "
"selectable/table as join target"
)
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, "_of_type", None)
else:
of_type = None
if isinstance(onclause, util.string_types):
# string given, e.g. query(Foo).join("bar").
# we look to the left entity or what we last joined
# towards
onclause = _entity_descriptor(self._joinpoint_zero(), onclause)
# check for q.join(Class.propname, from_joinpoint=True)
# and Class corresponds at the mapper level to the current
# joinpoint. this match intentionally looks for a non-aliased
# class-bound descriptor as the onclause and if it matches the
# current joinpoint at the mapper level, it's used. This
# is a very old use case that is intended to make it easier
# to work with the aliased=True flag, which is also something
# that probably shouldn't exist on join() due to its high
# complexity/usefulness ratio
elif from_joinpoint and isinstance(
onclause, interfaces.PropComparator
):
jp0 = self._joinpoint_zero()
info = inspect(jp0)
if getattr(info, "mapper", None) is onclause._parententity:
onclause = _entity_descriptor(jp0, onclause.key)
if isinstance(onclause, interfaces.PropComparator):
# descriptor/property given (or determined); this tells
# us explicitly what the expected "left" side of the join is.
if right is None:
if of_type:
right = of_type
else:
right = onclause.property.entity
left = onclause._parententity
alias = self._polymorphic_adapters.get(left, None)
# could be None or could be ColumnAdapter also
if isinstance(alias, ORMAdapter) and alias.mapper.isa(left):
left = alias.aliased_class
onclause = getattr(left, onclause.key)
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left, right, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp["prev"] = (edge, self._joinpoint)
self._update_joinpoint(jp)
# warn only on the last element of the list
if idx == len(keylist) - 1:
util.warn(
"Pathed join target %s has already "
"been joined to; skipping" % prop
)
continue
else:
# no descriptor/property given; we will need to figure out
# what the effective "left" side is
prop = left = None
# figure out the final "left" and "right" sides and create an
# ORMJoin to add to our _from_obj tuple
self._join_left_to_right(
left, right, onclause, prop, create_aliases, outerjoin, full
)
def _join_left_to_right(
self, left, right, onclause, prop, create_aliases, outerjoin, full
):
"""given raw "left", "right", "onclause" parameters consumed from
a particular key within _join(), add a real ORMJoin object to
our _from_obj list (or augment an existing one)
"""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
# left not given (e.g. no relationship object/name specified)
# figure out the best "left" side based on our existing froms /
# entities
assert prop is None
(
left,
replace_from_obj_index,
use_entity_index,
) = self._join_determine_implicit_left_side(left, right, onclause)
else:
# left is given via a relationship/name. Determine where in our
# "froms" list it should be spliced/appended as well as what
# existing entity it corresponds to.
assert prop is not None
(
replace_from_obj_index,
use_entity_index,
) = self._join_place_explicit_left_side(left)
if left is right and not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" % (left, right)
)
# the right side as given often needs to be adapted. additionally
# a lot of things can be wrong with it. handle all that and
# get back the new effective "right" side
r_info, right, onclause = self._join_check_and_adapt_right_side(
left, right, onclause, prop, create_aliases
)
if replace_from_obj_index is not None:
# splice into an existing element in the
# self._from_obj list
left_clause = self._from_obj[replace_from_obj_index]
self._from_obj = (
self._from_obj[:replace_from_obj_index]
+ (
orm_join(
left_clause,
right,
onclause,
isouter=outerjoin,
full=full,
),
)
+ self._from_obj[replace_from_obj_index + 1 :]
)
else:
# add a new element to the self._from_obj list
if use_entity_index is not None:
# why doesn't this work as .entity_zero_or_selectable?
left_clause = self._entities[use_entity_index].selectable
else:
left_clause = left
self._from_obj = self._from_obj + (
orm_join(
left_clause, right, onclause, isouter=outerjoin, full=full
),
)
def _join_determine_implicit_left_side(self, left, right, onclause):
"""When join conditions don't express the left side explicitly,
determine if an existing FROM or entity in this query
can serve as the left hand side.
"""
# when we are here, it means join() was called without an ORM-
# specific way of telling us what the "left" side is, e.g.:
#
# join(RightEntity)
#
# or
#
# join(RightEntity, RightEntity.foo == LeftEntity.bar)
#
r_info = inspect(right)
replace_from_obj_index = use_entity_index = None
if self._from_obj:
# we have a list of FROMs already. So by definition this
# join has to connect to one of those FROMs.
indexes = sql_util.find_left_clause_to_join_from(
self._from_obj, r_info.selectable, onclause
)
if len(indexes) == 1:
replace_from_obj_index = indexes[0]
left = self._from_obj[replace_from_obj_index]
elif len(indexes) > 1:
raise sa_exc.InvalidRequestError(
"Can't determine which FROM clause to join "
"from, there are multiple FROMS which can "
"join to this entity. Please use the .select_from() "
"method to establish an explicit left side, as well as "
"providing an explcit ON clause if not present already to "
"help resolve the ambiguity."
)
else:
raise sa_exc.InvalidRequestError(
"Don't know how to join to %r. "
"Please use the .select_from() "
"method to establish an explicit left side, as well as "
"providing an explcit ON clause if not present already to "
"help resolve the ambiguity." % (right,)
)
elif self._entities:
# we have no explicit FROMs, so the implicit left has to
# come from our list of entities.
potential = {}
for entity_index, ent in enumerate(self._entities):
entity = ent.entity_zero_or_selectable
if entity is None:
continue
ent_info = inspect(entity)
if ent_info is r_info: # left and right are the same, skip
continue
# by using a dictionary with the selectables as keys this
# de-duplicates those selectables as occurs when the query is
# against a series of columns from the same selectable
if isinstance(ent, _MapperEntity):
potential[ent.selectable] = (entity_index, entity)
else:
potential[ent_info.selectable] = (None, entity)
all_clauses = list(potential.keys())
indexes = sql_util.find_left_clause_to_join_from(
all_clauses, r_info.selectable, onclause
)
if len(indexes) == 1:
use_entity_index, left = potential[all_clauses[indexes[0]]]
elif len(indexes) > 1:
raise sa_exc.InvalidRequestError(
"Can't determine which FROM clause to join "
"from, there are multiple FROMS which can "
"join to this entity. Please use the .select_from() "
"method to establish an explicit left side, as well as "
"providing an explcit ON clause if not present already to "
"help resolve the ambiguity."
)
else:
raise sa_exc.InvalidRequestError(
"Don't know how to join to %r. "
"Please use the .select_from() "
"method to establish an explicit left side, as well as "
"providing an explcit ON clause if not present already to "
"help resolve the ambiguity." % (right,)
)
else:
raise sa_exc.InvalidRequestError(
"No entities to join from; please use "
"select_from() to establish the left "
"entity/selectable of this join"
)
return left, replace_from_obj_index, use_entity_index
def _join_place_explicit_left_side(self, left):
"""When join conditions express a left side explicitly, determine
where in our existing list of FROM clauses we should join towards,
or if we need to make a new join, and if so is it from one of our
existing entities.
"""
# when we are here, it means join() was called with an indicator
# as to an exact left side, which means a path to a
# RelationshipProperty was given, e.g.:
#
# join(RightEntity, LeftEntity.right)
#
# or
#
# join(LeftEntity.right)
#
# as well as string forms:
#
# join(RightEntity, "right")
#
# etc.
#
replace_from_obj_index = use_entity_index = None
l_info = inspect(left)
if self._from_obj:
indexes = sql_util.find_left_clause_that_matches_given(
self._from_obj, l_info.selectable
)
if len(indexes) > 1:
raise sa_exc.InvalidRequestError(
"Can't identify which entity in which to assign the "
"left side of this join. Please use a more specific "
"ON clause."
)
# have an index, means the left side is already present in
# an existing FROM in the self._from_obj tuple
if indexes:
replace_from_obj_index = indexes[0]
# no index, means we need to add a new element to the
# self._from_obj tuple
# no from element present, so we will have to add to the
# self._from_obj tuple. Determine if this left side matches up
# with existing mapper entities, in which case we want to apply the
# aliasing / adaptation rules present on that entity if any
if (
replace_from_obj_index is None
and self._entities
and hasattr(l_info, "mapper")
):
for idx, ent in enumerate(self._entities):
# TODO: should we be checking for multiple mapper entities
# matching?
if isinstance(ent, _MapperEntity) and ent.corresponds_to(left):
use_entity_index = idx
break
return replace_from_obj_index, use_entity_index
def _join_check_and_adapt_right_side(
self, left, right, onclause, prop, create_aliases
):
"""transform the "right" side of the join as well as the onclause
according to polymorphic mapping translations, aliasing on the query
or on the join, special cases where the right and left side have
overlapping tables.
"""
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic
or isinstance(right_mapper.persist_selectable, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj
) and sql_util.selectables_overlap(
from_obj, r_info.selectable
):
overlap = True
break
if (
overlap or not create_aliases
) and l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself"
% l_info.selectable
)
right_mapper, right_selectable, right_is_aliased = (
getattr(r_info, "mapper", None),
r_info.selectable,
getattr(r_info, "is_aliased_class", False),
)
if (
right_mapper
and prop
and not right_mapper.common_parent(prop.mapper)
):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
# _join_entities is used as a hint for single-table inheritance
# purposes at the moment
if hasattr(r_info, "mapper"):
self._join_entities += (r_info,)
need_adapter = False
# test for joining to an unmapped selectable as the target
if r_info.is_clause_element:
if prop:
right_mapper = prop.mapper
if right_selectable._is_lateral:
# orm_only is disabled to suit the case where we have to
# adapt an explicit correlate(Entity) - the select() loses
# the ORM-ness in this case right now, ideally it would not
right = self._adapt_clause(right, True, False)
elif prop:
# joining to selectable with a mapper property given
# as the ON clause
if not right_selectable.is_derived_from(
right_mapper.persist_selectable
):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'"
% (
right_selectable.description,
right_mapper.persist_selectable.description,
)
)
# if the destination selectable is a plain select(),
# turn it into an alias().
if isinstance(right_selectable, expression.SelectBase):
right_selectable = right_selectable.alias()
need_adapter = True
# make the right hand side target into an ORM entity
right = aliased(right_mapper, right_selectable)
elif create_aliases:
# it *could* work, but it doesn't right now and I'd rather
# get rid of aliased=True completely
raise sa_exc.InvalidRequestError(
"The aliased=True parameter on query.join() only works "
"with an ORM entity, not a plain selectable, as the "
"target."
)
aliased_entity = (
right_mapper
and not right_is_aliased
and (
right_mapper.with_polymorphic
and isinstance(
right_mapper._with_polymorphic_selectable, expression.Alias
)
or overlap
# test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
if need_adapter:
assert right_mapper
# if an alias() of the right side was generated,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
adapter = ORMAdapter(
right, equivalents=right_mapper._equivalent_columns
)
# current adapter takes highest precedence
self._filter_aliases = (adapter,) + self._filter_aliases
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if not create_aliases:
self._mapper_loads_polymorphically_with(right_mapper, adapter)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint(
{
"_joinpoint_entity": right,
"prev": ((left, right, prop.key), self._joinpoint),
}
)
else:
self._joinpoint = {"_joinpoint_entity": right}
return right, inspect(right), onclause
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = ()
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`_query.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`_query.Query.join`
method. See the example in :meth:`_query.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
r"""Set the FROM clause of this :class:`_query.Query` explicitly.
:meth:`_query.Query.select_from` is often used in conjunction with
:meth:`_query.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`_query.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`_query.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\
join(User.addresses).\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`_orm.Mapper` objects
as well as core :class:`_expression.FromClause`
elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`_query.Query.join`
:meth:`_query.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
r"""Set the FROM clause of this :class:`_query.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
The :meth:`_query.Query.select_entity_from`
method supplies an alternative
approach to the use case of applying an :func:`.aliased` construct
explicitly throughout a query. Instead of referring to the
:func:`.aliased` construct explicitly,
:meth:`_query.Query.select_entity_from` automatically *adapts* all
occurrences of the entity to the target selectable.
Given a case for :func:`.aliased` such as selecting ``User``
objects from a SELECT statement::
select_stmt = select([User]).where(User.id == 7)
user_alias = aliased(User, select_stmt)
q = session.query(user_alias).\
filter(user_alias.name == 'ed')
Above, we apply the ``user_alias`` object explicitly throughout the
query. When it's not feasible for ``user_alias`` to be referenced
explicitly in many places, :meth:`_query.Query.select_entity_from`
may be
used at the start of the query to adapt the existing ``User`` entity::
q = session.query(User).\
select_entity_from(select_stmt).\
filter(User.name == 'ed')
Above, the generated SQL will show that the ``User`` entity is
adapted to our statement, even in the case of the WHERE clause:
.. sourcecode:: sql
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
The :meth:`_query.Query.select_entity_from` method is similar to the
:meth:`_query.Query.select_from` method,
in that it sets the FROM clause
of the query. The difference is that it additionally applies
adaptation to the other parts of the query that refer to the
primary entity. If above we had used :meth:`_query.Query.select_from`
instead, the SQL generated would have been:
.. sourcecode:: sql
-- uses plain select_from(), not select_entity_from()
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
To supply textual SQL to the :meth:`_query.Query.select_entity_from`
method,
we can make use of the :func:`_expression.text` construct. However,
the
:func:`_expression.text`
construct needs to be aligned with the columns of our
entity, which is achieved by making use of the
:meth:`_expression.TextClause.columns` method::
text_stmt = text("select id, name from user").columns(
User.id, User.name)
q = session.query(User).select_entity_from(text_stmt)
:meth:`_query.Query.select_entity_from` itself accepts an
:func:`.aliased`
object, so that the special options of :func:`.aliased` such as
:paramref:`.aliased.adapt_on_names` may be used within the
scope of the :meth:`_query.Query.select_entity_from`
method's adaptation
services. Suppose
a view ``user_view`` also returns rows from ``user``. If
we reflect this view into a :class:`_schema.Table`, this view has no
relationship to the :class:`_schema.Table` to which we are mapped,
however
we can use name matching to select from it::
user_view = Table('user_view', metadata,
autoload_with=engine)
user_view_alias = aliased(
User, user_view, adapt_on_names=True)
q = session.query(User).\
select_entity_from(user_view_alias).\
order_by(User.name)
.. versionchanged:: 1.1.7 The :meth:`_query.Query.select_entity_from`
method now accepts an :func:`.aliased` object as an alternative
to a :class:`_expression.FromClause` object.
:param from_obj: a :class:`_expression.FromClause`
object that will replace
the FROM clause of this :class:`_query.Query`.
It also may be an instance
of :func:`.aliased`.
.. seealso::
:meth:`_query.Query.select_from`
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if (
isinstance(stop, int)
and isinstance(start, int)
and stop - start <= 0
):
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) or (
isinstance(stop, int) and stop < 0
):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None : None : item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item : item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""Computes the "slice" of the :class:`_query.Query` represented by
the given indices and returns the resulting :class:`_query.Query`.
The start and stop indices behave like the argument to Python's
built-in :func:`range` function. This method provides an
alternative to using ``LIMIT``/``OFFSET`` to get a slice of the
query.
For example, ::
session.query(User).order_by(User.id).slice(1, 3)
renders as
.. sourcecode:: sql
SELECT users.id AS users_id,
users.name AS users_name
FROM users ORDER BY users.id
LIMIT ? OFFSET ?
(2, 1)
.. seealso::
:meth:`_query.Query.limit`
:meth:`_query.Query.offset`
"""
if start is not None and stop is not None:
self._offset = self._offset if self._offset is not None else 0
if start != 0:
self._offset += start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = self._offset if self._offset is not None else 0
if start != 0:
self._offset += start
if isinstance(self._offset, int) and self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *expr):
r"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
.. note::
The :meth:`.distinct` call includes logic that will automatically
add columns from the ORDER BY of the query to the columns
clause of the SELECT statement, to satisfy the common need
of the database backend that ORDER BY columns be part of the
SELECT list when DISTINCT is used. These columns *are not*
added to the list of columns actually fetched by the
:class:`_query.Query`, however, so would not affect results.
The columns are passed through when using the
:attr:`_query.Query.statement` accessor, however.
:param \*expr: optional column expressions. When present,
the PostgreSQL dialect will render a ``DISTINCT ON (<expressions>)``
construct.
"""
if not expr:
self._distinct = True
else:
expr = self._adapt_col_list(expr)
if isinstance(self._distinct, list):
self._distinct += expr
else:
self._distinct = expr
@_generative()
def prefix_with(self, *prefixes):
r"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords
and optimizer hints:
e.g.::
query = sess.query(User.name).\
prefix_with('HIGH_PRIORITY').\
prefix_with('SQL_SMALL_RESULT', 'ALL').\
prefix_with('/*+ BKA(user) */')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL /*+ BKA(user) */
users.name AS users_name FROM users
.. seealso::
:meth:`_expression.HasPrefixes.prefix_with`
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
@_generative()
def suffix_with(self, *suffixes):
r"""Apply the suffix to the query and return the newly resulting
``Query``.
:param \*suffixes: optional suffixes, typically strings,
not using any commas.
.. versionadded:: 1.0.0
.. seealso::
:meth:`_query.Query.prefix_with`
:meth:`_expression.HasSuffixes.suffix_with`
"""
if self._suffixes:
self._suffixes += suffixes
else:
self._suffixes = suffixes
def all(self):
"""Return the results represented by this :class:`_query.Query`
as a list.
This results in an execution of the underlying SQL statement.
.. warning:: The :class:`_query.Query` object,
when asked to return either
a sequence or iterator that consists of full ORM-mapped entities,
will **deduplicate entries based on primary key**. See the FAQ for
more details.
.. seealso::
:ref:`faq_query_deduplicating`
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`_expression.text`
or :func:`_expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this
:class:`_query.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(
statement, (expression.TextClause, expression.SelectBase)
):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only."
)
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling :meth:`_query.Query.first`
results in an execution of the underlying
query.
.. seealso::
:meth:`_query.Query.one`
:meth:`_query.Query.one_or_none`
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one_or_none(self):
"""Return at most one result or raise an exception.
Returns ``None`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that returns only scalar values
as opposed to full identity-mapped entities.
Calling :meth:`_query.Query.one_or_none`
results in an execution of the
underlying query.
.. versionadded:: 1.0.9
Added :meth:`_query.Query.one_or_none`
.. seealso::
:meth:`_query.Query.first`
:meth:`_query.Query.one`
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
return None
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one_or_none()"
)
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that returns only scalar values
as opposed to full identity-mapped entities.
Calling :meth:`.one` results in an execution of the underlying query.
.. seealso::
:meth:`_query.Query.first`
:meth:`_query.Query.one_or_none`
"""
try:
ret = self.one_or_none()
except orm_exc.MultipleResultsFound as err:
util.raise_(
orm_exc.MultipleResultsFound(
"Multiple rows were found for one()"
),
replace_context=err,
)
else:
if ret is None:
raise orm_exc.NoResultFound("No row was found for one()")
return ret
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def __str__(self):
context = self._compile_context()
try:
bind = (
self._get_bind_args(context, self.session.get_bind)
if self.session
else None
)
except sa_exc.UnboundExecutionError:
bind = None
return str(context.statement.compile(bind))
def _connection_from_session(self, **kw):
conn = self.session.connection(**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._get_bind_args(
querycontext, self._connection_from_session, close_with_result=True
)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(querycontext.query, result, querycontext)
def _execute_crud(self, stmt, mapper):
conn = self._connection_from_session(
mapper=mapper, clause=stmt, close_with_result=True
)
return conn.execute(stmt, self._params)
def _get_bind_args(self, querycontext, fn, **kw):
return fn(
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`_query.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
'entity': User
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
'entity': User
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias,
'entity': user_alias
}
]
"""
return [
{
"name": ent._label_name,
"type": ent.type,
"aliased": getattr(insp_ent, "is_aliased_class", False),
"expr": ent.expr,
"entity": getattr(insp_ent, "entity", None)
if ent.entity_zero is not None
and not insp_ent.is_clause_element
else None,
}
for ent, insp_ent in [
(
_ent,
(
inspect(_ent.entity_zero)
if _ent.entity_zero is not None
else None
),
)
for _ent in self._entities
]
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`_query.Query` object's Session.
Given an iterator returned by a :class:`_query.Query`
of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`_query.Query` - if these do not correspond,
unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`_query.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`_query.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
"limit": self._limit,
"offset": self._offset,
"distinct": self._distinct,
"prefixes": self._prefixes,
"suffixes": self._suffixes,
"group_by": self._group_by or None,
"having": self._having,
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (
kwargs.get("limit") is not None
or kwargs.get("offset") is not None
or kwargs.get("distinct", False)
or kwargs.get("group_by", False)
)
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(
self.enable_eagerloads(False)
.add_columns(sql.literal_column("1"))
.with_labels()
.statement.with_only_columns([1])
)
def count(self):
r"""Return a count of rows this the SQL formed by this :class:`Query`
would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
The above SQL returns a single row, which is the aggregate value
of the count function; the :meth:`_query.Query.count`
method then returns
that single integer value.
.. warning::
It is important to note that the value returned by
count() is **not the same as the number of ORM objects that this
Query would return from a method such as the .all() method**.
The :class:`_query.Query` object,
when asked to return full entities,
will **deduplicate entries based on primary key**, meaning if the
same primary key value would appear in the results more than once,
only one object of that primary key would be present. This does
not apply to a query that is against individual columns.
.. seealso::
:ref:`faq_query_deduplicating`
:ref:`orm_tutorial_query_returning`
For fine grained control over specific columns to count, to skip the
usage of a subquery or otherwise control of the FROM clause, or to use
other aggregate functions, use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column("*"))
return self.from_self(col).scalar()
def delete(self, synchronize_session="evaluate"):
r"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
E.g.::
sess.query(User).filter(User.age == 25).\
delete(synchronize_session=False)
sess.query(User).filter(User.age == 25).\
delete(synchronize_session='evaluate')
.. warning:: The :meth:`_query.Query.delete`
method is a "bulk" operation,
which bypasses ORM unit-of-work automation in favor of greater
performance. **Please read all caveats and warnings below.**
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
.. warning:: **Additional Caveats for bulk query deletes**
* This method does **not work for joined
inheritance mappings**, since the **multiple table
deletes are not supported by SQL** as well as that the
**join condition of an inheritance mapper is not
automatically rendered**. Care must be taken in any
multiple-table delete to first accommodate via some other means
how the related table will be deleted, as well as to
explicitly include the joining
condition between those tables, even in mappings where
this is normally automatic. E.g. if a class ``Engineer``
subclasses ``Employee``, a DELETE against the ``Employee``
table would look like::
session.query(Engineer).\
filter(Engineer.id == Employee.id).\
filter(Employee.name == 'dilbert').\
delete()
However the above SQL will not delete from the Engineer table,
unless an ON DELETE CASCADE rule is established in the database
to handle it.
Short story, **do not use this method for joined inheritance
mappings unless you have taken the additional steps to make
this feasible**.
* The polymorphic identity WHERE criteria is **not** included
for single- or
joined- table updates - this must be added **manually** even
for single table inheritance.
* The method does **not** offer in-Python cascading of
relationships - it is assumed that ON DELETE CASCADE/SET
NULL/etc. is configured for any foreign key references
which require it, otherwise the database may emit an
integrity violation if foreign key references are being
enforced.
After the DELETE, dependent objects in the
:class:`.Session` which were impacted by an ON DELETE
may not contain the current state, or may have been
deleted. This issue is resolved once the
:class:`.Session` is expired, which normally occurs upon
:meth:`.Session.commit` or can be forced by using
:meth:`.Session.expire_all`. Accessing an expired
object whose row has been deleted will invoke a SELECT
to locate the row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is
raised.
* The ``'fetch'`` strategy results in an additional
SELECT statement emitted and will significantly reduce
performance.
* The ``'evaluate'`` strategy performs a scan of
all matching objects within the :class:`.Session`; if the
contents of the :class:`.Session` are expired, such as
via a proceeding :meth:`.Session.commit` call, **this will
result in SELECT queries emitted for every matching object**.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events **are not invoked** from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to
act upon a mass DELETE of entity rows.
.. seealso::
:meth:`_query.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
delete_op = persistence.BulkDelete.factory(self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session="evaluate", update_args=None):
r"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session=False)
sess.query(User).filter(User.age == 25).\
update({"age": User.age - 10}, synchronize_session='evaluate')
.. warning:: The :meth:`_query.Query.update`
method is a "bulk" operation,
which bypasses ORM unit-of-work automation in favor of greater
performance. **Please read all caveats and warnings below.**
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values. If :ref:`parameter-ordered
mode <updates_order_parameters>` is desired, the values can be
passed as a list of 2-tuples;
this requires that the
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
flag is passed to the :paramref:`.Query.update.update_args` dictionary
as well.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:param update_args: Optional dictionary, if present will be passed
to the underlying :func:`_expression.update`
construct as the ``**kw`` for
the object. May be used to pass dialect-specific arguments such
as ``mysql_limit``, as well as other special arguments such as
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`.
.. versionadded:: 1.0.0
:return: the count of rows matched as returned by the database's
"row count" feature.
.. warning:: **Additional Caveats for bulk query updates**
* The method does **not** offer in-Python cascading of
relationships - it is assumed that ON UPDATE CASCADE is
configured for any foreign key references which require
it, otherwise the database may emit an integrity
violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the
:class:`.Session` which were impacted by an ON UPDATE
CASCADE may not contain the current state; this issue is
resolved once the :class:`.Session` is expired, which
normally occurs upon :meth:`.Session.commit` or can be
forced by using :meth:`.Session.expire_all`.
* The ``'fetch'`` strategy results in an additional
SELECT statement emitted and will significantly reduce
performance.
* The ``'evaluate'`` strategy performs a scan of
all matching objects within the :class:`.Session`; if the
contents of the :class:`.Session` are expired, such as
via a proceeding :meth:`.Session.commit` call, **this will
result in SELECT queries emitted for every matching object**.
* The method supports multiple table updates, as detailed
in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and
other multiple table mappings. However, the **join
condition of an inheritance mapper is not
automatically rendered**. Care must be taken in any
multiple-table update to explicitly include the joining
condition between those tables, even in mappings where
this is normally automatic. E.g. if a class ``Engineer``
subclasses ``Employee``, an UPDATE of the ``Engineer``
local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\
filter(Engineer.id == Employee.id).\
filter(Employee.name == 'dilbert').\
update({"engineer_type": "programmer"})
* The polymorphic identity WHERE criteria is **not** included
for single- or
joined- table updates - this must be added **manually**, even
for single table inheritance.
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events **are not invoked from this method**. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to
act upon a mass UPDATE of entity rows.
.. seealso::
:meth:`_query.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_args = update_args or {}
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values, update_args
)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
if self.dispatch.before_compile:
for fn in self.dispatch.before_compile:
new_query = fn(self)
if new_query is not None and new_query is not self:
self = new_query
if not fn._bake_ok:
self._bake_ok = False
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(context, *rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
# else "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items."
)
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to " "SELECT from."
)
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = sql_util.expand_column_list_from_order_by(
context.primary_columns, context.order_by
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
# put FOR UPDATE on the inner query, where MySQL will honor it,
# as well as if it has an OF so PostgreSQL can use it.
inner._for_update_arg = context._for_update_arg
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns, use_labels=context.labels
)
# Oracle however does not allow FOR UPDATE on the subquery,
# and the Oracle dialect ignores it, plus for PostgreSQL, MySQL
# we expect that all elements of the row are locked, so also put it
# on the outside (except in the case of PG when OF is used)
if (
context._for_update_arg is not None
and context._for_update_arg.of is None
):
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause, eager_join, eager_join.stop_on
)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(context.order_by)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct is True and context.order_by:
context.primary_columns += (
sql_util.expand_column_list_from_order_by
)(context.primary_columns, context.order_by)
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns + context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, as well as the "select from entity",
add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
search = set(self._mapper_adapter_map.values())
if (
self._select_from_entity
and self._select_from_entity not in self._mapper_adapter_map
):
insp = inspect(self._select_from_entity)
if insp.is_aliased_class:
adapter = insp._adapter
else:
adapter = None
search = search.union([(self._select_from_entity, adapter)])
for (ext_info, adapter) in search:
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause), single_crit
)
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode
)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and _is_mapped_class(
entity
):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
query._has_mapper_entities = True
self.entities = [entity]
self.expr = entity
supports_single_entity = True
use_id_for_hash = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(
self, query, cls_or_mappers, selectable, polymorphic_on
):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against " "an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable
)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper,
sql_util.ColumnAdapter(from_obj, self.mapper._equivalent_columns),
)
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
return _entity_corresponds_to(self.entity_zero, entity)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable, self.mapper._equivalent_columns
)
if query._primary_entity is self:
only_load_props = query._only_load_props
refresh_state = context.refresh_state
else:
only_load_props = refresh_state = None
_instance = loading._instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=only_load_props,
refresh_state=refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator,
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(context.order_by)
)
loading._setup_entity_query(
context,
self.mapper,
self,
self.path,
adapter,
context.primary_columns,
with_polymorphic=self._with_polymorphic,
only_load_props=query._only_load_props,
polymorphic_discriminator=self._polymorphic_discriminator,
)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(InspectionAttr):
"""A grouping of SQL expressions that are returned by a
:class:`_query.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`_query.Query` object.
It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
is_clause_element = False
is_mapper = False
is_aliased_class = False
def __init__(self, name, *exprs, **kw):
r"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update(
(getattr(col, "key", col._label), col) for col in exprs
)
self.single_entity = kw.pop("single_entity", self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.exprs)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple("result", labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
use_id_for_hash = False
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr)
self.supports_single_entity = self.bundle.single_entity
@property
def mapper(self):
ezero = self.entity_zero
if ezero is not None:
return ezero.mapper
else:
return None
@property
def entities(self):
entities = []
for ent in self._entities:
entities.extend(ent.entities)
return entities
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: we might be able to implement this but for now
# we are working around it
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[
ent.row_processor(query, context, result)
for ent in self._entities
]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
search_entities = True
check_column = False
if isinstance(column, util.string_types):
util.warn_deprecated(
"Plain string expression passed to Query() should be "
"explicitly declared using literal_column(); "
"automatic coercion of this value will be removed in "
"SQLAlchemy 1.4"
)
column = sql.literal_column(column)
self._label_name = column.name
search_entities = False
check_column = True
_entity = None
elif isinstance(
column, (attributes.QueryableAttribute, interfaces.PropComparator)
):
_entity = getattr(column, "_parententity", None)
if _entity is not None:
search_entities = False
self._label_name = column.key
column = column._query_clause_element()
check_column = True
if isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
if hasattr(column, "_select_iterable"):
# break out an object like Table into
# individual columns
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column,)
)
elif not check_column:
self._label_name = getattr(column, "key", None)
search_entities = True
self.type = type_ = column.type
self.use_id_for_hash = not type_.hashable
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, "is_literal", False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = list(column._from_objects)
actual_froms = set(self.actual_froms)
if not search_entities:
self.entity_zero = _entity
if _entity:
self.entities = [_entity]
self.mapper = _entity.mapper
else:
self.entities = []
self.mapper = None
self._from_entities = set(self.entities)
else:
all_elements = [
elem
for elem in sql_util.surface_column_elements(
column, include_scalar_selects=False
)
if "parententity" in elem._annotations
]
self.entities = util.unique_list(
[
elem._annotations["parententity"]
for elem in all_elements
if "parententity" in elem._annotations
]
)
self._from_entities = set(
[
elem._annotations["parententity"]
for elem in all_elements
if "parententity" in elem._annotations
and actual_froms.intersection(elem._from_objects)
]
)
if self.entities:
self.entity_zero = self.entities[0]
self.mapper = self.entity_zero.mapper
elif self.namespace is not None:
self.entity_zero = self.namespace
self.mapper = None
else:
self.entity_zero = None
self.mapper = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return self.actual_froms[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if "selectable" not in self.__dict__:
self.selectable = ext_info.selectable
if set(self.actual_froms).intersection(
ext_info.selectable._from_objects
):
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(
self.entity_zero
) and entity.common_parent(self.entity_zero)
def row_processor(self, query, context, result):
if ("fetch_column", self) in context.attributes:
column = context.attributes[("fetch_column", self)]
else:
column = query._adapt_clause(self.column, False, True)
if column._annotations:
# annotated columns perform more slowly in compiler and
# result due to the __eq__() method, so use deannotated
column = column._deannotate()
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = query._adapt_clause(self.column, False, True)
if column._annotations:
# annotated columns perform more slowly in compiler and
# result due to the __eq__() method, so use deannotated
column = column._deannotate()
context.froms += tuple(self.froms)
context.primary_columns.append(column)
context.attributes[("fetch_column", self)] = column
def __str__(self):
return str(self.column)
class QueryContext(object):
__slots__ = (
"multi_row_eager_loaders",
"adapter",
"froms",
"for_update",
"query",
"session",
"autoflush",
"populate_existing",
"invoke_all_eagers",
"version_check",
"refresh_state",
"primary_columns",
"secondary_columns",
"eager_order_by",
"eager_joins",
"create_eager_joins",
"propagate_options",
"attributes",
"statement",
"from_clause",
"whereclause",
"order_by",
"labels",
"_for_update_arg",
"runid",
"partials",
"post_load_paths",
"identity_token",
)
def __init__(self, query):
if query._statement is not None:
if (
isinstance(query._statement, expression.SelectBase)
and not query._statement._textual
and not query._statement.use_labels
):
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.multi_row_eager_loaders = False
self.adapter = None
self.froms = ()
self.for_update = None
self.query = query
self.session = query.session
self.autoflush = query._autoflush
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(
o for o in query._with_options if o.propagate_to_loaders
)
self.attributes = query._attributes.copy()
if self.refresh_state is not None:
self.identity_token = query._refresh_identity_token
else:
self.identity_token = None
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
r"""Return a :class:`.MapperOption` that will indicate to the
:class:`_query.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\
union(users.select(users.c.user_id>7)).\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`_expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().persist_selectable.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/attributes.py
|
# orm/attributes.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines instrumentation for class attributes and their interaction
with instances.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
"""
import operator
from . import collections
from . import exc as orm_exc
from . import interfaces
from .base import ATTR_EMPTY
from .base import ATTR_WAS_SET
from .base import CALLABLES_OK
from .base import INIT_OK
from .base import instance_dict
from .base import instance_state
from .base import instance_str
from .base import LOAD_AGAINST_COMMITTED
from .base import manager_of_class
from .base import NEVER_SET
from .base import NO_AUTOFLUSH
from .base import NO_CHANGE # noqa
from .base import NO_RAISE
from .base import NO_VALUE
from .base import NON_PERSISTENT_OK # noqa
from .base import PASSIVE_CLASS_MISMATCH # noqa
from .base import PASSIVE_NO_FETCH
from .base import PASSIVE_NO_FETCH_RELATED # noqa
from .base import PASSIVE_NO_INITIALIZE
from .base import PASSIVE_NO_RESULT
from .base import PASSIVE_OFF
from .base import PASSIVE_ONLY_PERSISTENT
from .base import PASSIVE_RETURN_NEVER_SET
from .base import RELATED_OBJECT_OK # noqa
from .base import SQL_OK # noqa
from .base import state_str
from .. import event
from .. import inspection
from .. import util
@inspection._self_inspects
class QueryableAttribute(
interfaces._MappedAttribute,
interfaces.InspectionAttr,
interfaces.PropComparator,
):
"""Base class for :term:`descriptor` objects that intercept
attribute events on behalf of a :class:`.MapperProperty`
object. The actual :class:`.MapperProperty` is accessible
via the :attr:`.QueryableAttribute.property`
attribute.
.. seealso::
:class:`.InstrumentedAttribute`
:class:`.MapperProperty`
:attr:`_orm.Mapper.all_orm_descriptors`
:attr:`_orm.Mapper.attrs`
"""
is_attribute = True
def __init__(
self,
class_,
key,
impl=None,
comparator=None,
parententity=None,
of_type=None,
):
self.class_ = class_
self.key = key
self.impl = impl
self.comparator = comparator
self._parententity = parententity
self._of_type = of_type
manager = manager_of_class(class_)
# manager is None in the case of AliasedClass
if manager:
# propagate existing event listeners from
# immediate superclass
for base in manager._bases:
if key in base:
self.dispatch._update(base[key].dispatch)
if base[key].dispatch._active_history:
self.dispatch._active_history = True
@util.memoized_property
def _supports_population(self):
return self.impl.supports_population
@property
def _impl_uses_objects(self):
return self.impl.uses_objects
def get_history(self, instance, passive=PASSIVE_OFF):
return self.impl.get_history(
instance_state(instance), instance_dict(instance), passive
)
def __selectable__(self):
# TODO: conditionally attach this method based on clause_element ?
return self
@util.memoized_property
def info(self):
"""Return the 'info' dictionary for the underlying SQL element.
The behavior here is as follows:
* If the attribute is a column-mapped property, i.e.
:class:`.ColumnProperty`, which is mapped directly
to a schema-level :class:`_schema.Column` object, this attribute
will return the :attr:`.SchemaItem.info` dictionary associated
with the core-level :class:`_schema.Column` object.
* If the attribute is a :class:`.ColumnProperty` but is mapped to
any other kind of SQL expression other than a
:class:`_schema.Column`,
the attribute will refer to the :attr:`.MapperProperty.info`
dictionary associated directly with the :class:`.ColumnProperty`,
assuming the SQL expression itself does not have its own ``.info``
attribute (which should be the case, unless a user-defined SQL
construct has defined one).
* If the attribute refers to any other kind of
:class:`.MapperProperty`, including :class:`.RelationshipProperty`,
the attribute will refer to the :attr:`.MapperProperty.info`
dictionary associated with that :class:`.MapperProperty`.
* To access the :attr:`.MapperProperty.info` dictionary of the
:class:`.MapperProperty` unconditionally, including for a
:class:`.ColumnProperty` that's associated directly with a
:class:`_schema.Column`, the attribute can be referred to using
:attr:`.QueryableAttribute.property` attribute, as
``MyClass.someattribute.property.info``.
.. seealso::
:attr:`.SchemaItem.info`
:attr:`.MapperProperty.info`
"""
return self.comparator.info
@util.memoized_property
def parent(self):
"""Return an inspection instance representing the parent.
This will be either an instance of :class:`_orm.Mapper`
or :class:`.AliasedInsp`, depending upon the nature
of the parent entity which this attribute is associated
with.
"""
return inspection.inspect(self._parententity)
@property
def expression(self):
return self.comparator.__clause_element__()
def __clause_element__(self):
return self.comparator.__clause_element__()
def _query_clause_element(self):
"""like __clause_element__(), but called specifically
by :class:`_query.Query` to allow special behavior."""
return self.comparator._query_clause_element()
def _bulk_update_tuples(self, value):
"""Return setter tuples for a bulk UPDATE."""
return self.comparator._bulk_update_tuples(value)
def adapt_to_entity(self, adapt_to_entity):
assert not self._of_type
return self.__class__(
adapt_to_entity.entity,
self.key,
impl=self.impl,
comparator=self.comparator.adapt_to_entity(adapt_to_entity),
parententity=adapt_to_entity,
)
def of_type(self, cls):
return QueryableAttribute(
self.class_,
self.key,
self.impl,
self.comparator.of_type(cls),
self._parententity,
of_type=cls,
)
def label(self, name):
return self._query_clause_element().label(name)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def hasparent(self, state, optimistic=False):
return self.impl.hasparent(state, optimistic=optimistic) is not False
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError as err:
util.raise_(
AttributeError(
"Neither %r object nor %r object associated with %s "
"has an attribute %r"
% (
type(self).__name__,
type(self.comparator).__name__,
self,
key,
)
),
replace_context=err,
)
def __str__(self):
return "%s.%s" % (self.class_.__name__, self.key)
@util.memoized_property
def property(self):
"""Return the :class:`.MapperProperty` associated with this
:class:`.QueryableAttribute`.
Return values here will commonly be instances of
:class:`.ColumnProperty` or :class:`.RelationshipProperty`.
"""
return self.comparator.property
class InstrumentedAttribute(QueryableAttribute):
"""Class bound instrumented attribute which adds basic
:term:`descriptor` methods.
See :class:`.QueryableAttribute` for a description of most features.
"""
def __set__(self, instance, value):
self.impl.set(
instance_state(instance), instance_dict(instance), value, None
)
def __delete__(self, instance):
self.impl.delete(instance_state(instance), instance_dict(instance))
def __get__(self, instance, owner):
if instance is None:
return self
dict_ = instance_dict(instance)
if self._supports_population and self.key in dict_:
return dict_[self.key]
else:
return self.impl.get(instance_state(instance), dict_)
def create_proxied_attribute(descriptor):
"""Create an QueryableAttribute / user descriptor hybrid.
Returns a new QueryableAttribute type that delegates descriptor
behavior and getattr() to the given descriptor.
"""
# TODO: can move this to descriptor_props if the need for this
# function is removed from ext/hybrid.py
class Proxy(QueryableAttribute):
"""Presents the :class:`.QueryableAttribute` interface as a
proxy on top of a Python descriptor / :class:`.PropComparator`
combination.
"""
def __init__(
self,
class_,
key,
descriptor,
comparator,
adapt_to_entity=None,
doc=None,
original_property=None,
):
self.class_ = class_
self.key = key
self.descriptor = descriptor
self.original_property = original_property
self._comparator = comparator
self._adapt_to_entity = adapt_to_entity
self.__doc__ = doc
_is_internal_proxy = True
@property
def _impl_uses_objects(self):
return (
self.original_property is not None
and getattr(self.class_, self.key).impl.uses_objects
)
@property
def property(self):
return self.comparator.property
@util.memoized_property
def comparator(self):
if util.callable(self._comparator):
self._comparator = self._comparator()
if self._adapt_to_entity:
self._comparator = self._comparator.adapt_to_entity(
self._adapt_to_entity
)
return self._comparator
def adapt_to_entity(self, adapt_to_entity):
return self.__class__(
adapt_to_entity.entity,
self.key,
self.descriptor,
self._comparator,
adapt_to_entity,
)
def __get__(self, instance, owner):
retval = self.descriptor.__get__(instance, owner)
# detect if this is a plain Python @property, which just returns
# itself for class level access. If so, then return us.
# Otherwise, return the object returned by the descriptor.
if retval is self.descriptor and instance is None:
return self
else:
return retval
def __str__(self):
return "%s.%s" % (self.class_.__name__, self.key)
def __getattr__(self, attribute):
"""Delegate __getattr__ to the original descriptor and/or
comparator."""
try:
return getattr(descriptor, attribute)
except AttributeError as err:
if attribute == "comparator":
util.raise_(
AttributeError("comparator"), replace_context=err
)
try:
# comparator itself might be unreachable
comparator = self.comparator
except AttributeError as err2:
util.raise_(
AttributeError(
"Neither %r object nor unconfigured comparator "
"object associated with %s has an attribute %r"
% (type(descriptor).__name__, self, attribute)
),
replace_context=err2,
)
else:
try:
return getattr(comparator, attribute)
except AttributeError as err3:
util.raise_(
AttributeError(
"Neither %r object nor %r object "
"associated with %s has an attribute %r"
% (
type(descriptor).__name__,
type(comparator).__name__,
self,
attribute,
)
),
replace_context=err3,
)
Proxy.__name__ = type(descriptor).__name__ + "Proxy"
util.monkeypatch_proxied_specials(
Proxy, type(descriptor), name="descriptor", from_instance=descriptor
)
return Proxy
OP_REMOVE = util.symbol("REMOVE")
OP_APPEND = util.symbol("APPEND")
OP_REPLACE = util.symbol("REPLACE")
OP_BULK_REPLACE = util.symbol("BULK_REPLACE")
OP_MODIFIED = util.symbol("MODIFIED")
class Event(object):
"""A token propagated throughout the course of a chain of attribute
events.
Serves as an indicator of the source of the event and also provides
a means of controlling propagation across a chain of attribute
operations.
The :class:`.Event` object is sent as the ``initiator`` argument
when dealing with events such as :meth:`.AttributeEvents.append`,
:meth:`.AttributeEvents.set`,
and :meth:`.AttributeEvents.remove`.
The :class:`.Event` object is currently interpreted by the backref
event handlers, and is used to control the propagation of operations
across two mutually-dependent attributes.
.. versionadded:: 0.9.0
:attribute impl: The :class:`.AttributeImpl` which is the current event
initiator.
:attribute op: The symbol :attr:`.OP_APPEND`, :attr:`.OP_REMOVE`,
:attr:`.OP_REPLACE`, or :attr:`.OP_BULK_REPLACE`, indicating the
source operation.
"""
__slots__ = "impl", "op", "parent_token"
def __init__(self, attribute_impl, op):
self.impl = attribute_impl
self.op = op
self.parent_token = self.impl.parent_token
def __eq__(self, other):
return (
isinstance(other, Event)
and other.impl is self.impl
and other.op == self.op
)
@property
def key(self):
return self.impl.key
def hasparent(self, state):
return self.impl.hasparent(state)
class AttributeImpl(object):
"""internal implementation for instrumented attributes."""
def __init__(
self,
class_,
key,
callable_,
dispatch,
trackparent=False,
extension=None,
compare_function=None,
active_history=False,
parent_token=None,
expire_missing=True,
send_modified_events=True,
accepts_scalar_loader=None,
**kwargs
):
r"""Construct an AttributeImpl.
:param \class_: associated class
:param key: string name of the attribute
:param \callable_:
optional function which generates a callable based on a parent
instance, which produces the "default" values for a scalar or
collection attribute when it's first accessed, if not present
already.
:param trackparent:
if True, attempt to track if an instance has a parent attached
to it via this attribute.
:param extension:
a single or list of AttributeExtension object(s) which will
receive set/delete/append/remove/etc. events.
The event package is now used.
.. deprecated:: 1.3
The :paramref:`.AttributeImpl.extension` parameter is deprecated
and will be removed in a future release, corresponding to the
"extension" parameter on the :class:`.MapperProprty` classes
like :func:`.column_property` and :func:`_orm.relationship` The
events system is now used.
:param compare_function:
a function that compares two values which are normally
assignable to this attribute.
:param active_history:
indicates that get_history() should always return the "old" value,
even if it means executing a lazy callable upon attribute change.
:param parent_token:
Usually references the MapperProperty, used as a key for
the hasparent() function to identify an "owning" attribute.
Allows multiple AttributeImpls to all match a single
owner attribute.
:param expire_missing:
if False, don't add an "expiry" callable to this attribute
during state.expire_attributes(None), if no value is present
for this key.
:param send_modified_events:
if False, the InstanceState._modified_event method will have no
effect; this means the attribute will never show up as changed in a
history entry.
"""
self.class_ = class_
self.key = key
self.callable_ = callable_
self.dispatch = dispatch
self.trackparent = trackparent
self.parent_token = parent_token or self
self.send_modified_events = send_modified_events
if compare_function is None:
self.is_equal = operator.eq
else:
self.is_equal = compare_function
if accepts_scalar_loader is not None:
self.accepts_scalar_loader = accepts_scalar_loader
else:
self.accepts_scalar_loader = self.default_accepts_scalar_loader
# TODO: pass in the manager here
# instead of doing a lookup
attr = manager_of_class(class_)[key]
for ext in util.to_list(extension or []):
ext._adapt_listener(attr, ext)
if active_history:
self.dispatch._active_history = True
self.expire_missing = expire_missing
self._modified_token = Event(self, OP_MODIFIED)
__slots__ = (
"class_",
"key",
"callable_",
"dispatch",
"trackparent",
"parent_token",
"send_modified_events",
"is_equal",
"expire_missing",
"_modified_token",
"accepts_scalar_loader",
)
def __str__(self):
return "%s.%s" % (self.class_.__name__, self.key)
def _get_active_history(self):
"""Backwards compat for impl.active_history"""
return self.dispatch._active_history
def _set_active_history(self, value):
self.dispatch._active_history = value
active_history = property(_get_active_history, _set_active_history)
def hasparent(self, state, optimistic=False):
"""Return the boolean value of a `hasparent` flag attached to
the given state.
The `optimistic` flag determines what the default return value
should be if no `hasparent` flag can be located.
As this function is used to determine if an instance is an
*orphan*, instances that were loaded from storage should be
assumed to not be orphans, until a True/False value for this
flag is set.
An instance attribute that is loaded by a callable function
will also not have a `hasparent` flag.
"""
msg = "This AttributeImpl is not configured to track parents."
assert self.trackparent, msg
return (
state.parents.get(id(self.parent_token), optimistic) is not False
)
def sethasparent(self, state, parent_state, value):
"""Set a boolean flag on the given item corresponding to
whether or not it is attached to a parent object via the
attribute represented by this ``InstrumentedAttribute``.
"""
msg = "This AttributeImpl is not configured to track parents."
assert self.trackparent, msg
id_ = id(self.parent_token)
if value:
state.parents[id_] = parent_state
else:
if id_ in state.parents:
last_parent = state.parents[id_]
if (
last_parent is not False
and last_parent.key != parent_state.key
):
if last_parent.obj() is None:
raise orm_exc.StaleDataError(
"Removing state %s from parent "
"state %s along attribute '%s', "
"but the parent record "
"has gone stale, can't be sure this "
"is the most recent parent."
% (
state_str(state),
state_str(parent_state),
self.key,
)
)
return
state.parents[id_] = False
def get_history(self, state, dict_, passive=PASSIVE_OFF):
raise NotImplementedError()
def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE):
"""Return a list of tuples of (state, obj)
for all objects in this attribute's current state
+ history.
Only applies to object-based attributes.
This is an inlining of existing functionality
which roughly corresponds to:
get_state_history(
state,
key,
passive=PASSIVE_NO_INITIALIZE).sum()
"""
raise NotImplementedError()
def initialize(self, state, dict_):
"""Initialize the given state's attribute with an empty value."""
value = None
for fn in self.dispatch.init_scalar:
ret = fn(state, value, dict_)
if ret is not ATTR_EMPTY:
value = ret
return value
def get(self, state, dict_, passive=PASSIVE_OFF):
"""Retrieve a value from the given object.
If a callable is assembled on this object's attribute, and
passive is False, the callable will be executed and the
resulting value will be set as the new value for this attribute.
"""
if self.key in dict_:
return dict_[self.key]
else:
# if history present, don't load
key = self.key
if (
key not in state.committed_state
or state.committed_state[key] is NEVER_SET
):
if not passive & CALLABLES_OK:
return PASSIVE_NO_RESULT
if key in state.expired_attributes:
value = state._load_expired(state, passive)
elif key in state.callables:
callable_ = state.callables[key]
value = callable_(state, passive)
elif self.callable_:
value = self.callable_(state, passive)
else:
value = ATTR_EMPTY
if value is PASSIVE_NO_RESULT or value is NEVER_SET:
return value
elif value is ATTR_WAS_SET:
try:
return dict_[key]
except KeyError as err:
# TODO: no test coverage here.
util.raise_(
KeyError(
"Deferred loader for attribute "
"%r failed to populate "
"correctly" % key
),
replace_context=err,
)
elif value is not ATTR_EMPTY:
return self.set_committed_value(state, dict_, value)
if not passive & INIT_OK:
return NEVER_SET
else:
# Return a new, empty value
return self.initialize(state, dict_)
def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(state, dict_, value, initiator, passive=passive)
def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(
state, dict_, None, initiator, passive=passive, check_old=value
)
def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(
state,
dict_,
None,
initiator,
passive=passive,
check_old=value,
pop=True,
)
def set(
self,
state,
dict_,
value,
initiator,
passive=PASSIVE_OFF,
check_old=None,
pop=False,
):
raise NotImplementedError()
def get_committed_value(self, state, dict_, passive=PASSIVE_OFF):
"""return the unchanged value of this attribute"""
if self.key in state.committed_state:
value = state.committed_state[self.key]
if value in (NO_VALUE, NEVER_SET):
return None
else:
return value
else:
return self.get(state, dict_, passive=passive)
def set_committed_value(self, state, dict_, value):
"""set an attribute value on the given instance and 'commit' it."""
dict_[self.key] = value
state._commit(dict_, [self.key])
return value
class ScalarAttributeImpl(AttributeImpl):
"""represents a scalar value-holding InstrumentedAttribute."""
default_accepts_scalar_loader = True
uses_objects = False
supports_population = True
collection = False
dynamic = False
__slots__ = "_replace_token", "_append_token", "_remove_token"
def __init__(self, *arg, **kw):
super(ScalarAttributeImpl, self).__init__(*arg, **kw)
self._replace_token = self._append_token = Event(self, OP_REPLACE)
self._remove_token = Event(self, OP_REMOVE)
def delete(self, state, dict_):
if self.dispatch._active_history:
old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET)
else:
old = dict_.get(self.key, NO_VALUE)
if self.dispatch.remove:
self.fire_remove_event(state, dict_, old, self._remove_token)
state._modified_event(dict_, self, old)
existing = dict_.pop(self.key, NO_VALUE)
if (
existing is NO_VALUE
and old is NO_VALUE
and not state.expired
and self.key not in state.expired_attributes
):
raise AttributeError("%s object does not have a value" % self)
def get_history(self, state, dict_, passive=PASSIVE_OFF):
if self.key in dict_:
return History.from_scalar_attribute(self, state, dict_[self.key])
else:
if passive & INIT_OK:
passive ^= INIT_OK
current = self.get(state, dict_, passive=passive)
if current is PASSIVE_NO_RESULT:
return HISTORY_BLANK
else:
return History.from_scalar_attribute(self, state, current)
def set(
self,
state,
dict_,
value,
initiator,
passive=PASSIVE_OFF,
check_old=None,
pop=False,
):
if self.dispatch._active_history:
old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET)
else:
old = dict_.get(self.key, NO_VALUE)
if self.dispatch.set:
value = self.fire_replace_event(
state, dict_, value, old, initiator
)
state._modified_event(dict_, self, old)
dict_[self.key] = value
def fire_replace_event(self, state, dict_, value, previous, initiator):
for fn in self.dispatch.set:
value = fn(
state, value, previous, initiator or self._replace_token
)
return value
def fire_remove_event(self, state, dict_, value, initiator):
for fn in self.dispatch.remove:
fn(state, value, initiator or self._remove_token)
@property
def type(self):
self.property.columns[0].type
class ScalarObjectAttributeImpl(ScalarAttributeImpl):
"""represents a scalar-holding InstrumentedAttribute,
where the target object is also instrumented.
Adds events to delete/set operations.
"""
default_accepts_scalar_loader = False
uses_objects = True
supports_population = True
collection = False
__slots__ = ()
def delete(self, state, dict_):
if self.dispatch._active_history:
old = self.get(
state,
dict_,
passive=PASSIVE_ONLY_PERSISTENT
| NO_AUTOFLUSH
| LOAD_AGAINST_COMMITTED,
)
else:
old = self.get(
state,
dict_,
passive=PASSIVE_NO_FETCH ^ INIT_OK
| LOAD_AGAINST_COMMITTED
| NO_RAISE,
)
self.fire_remove_event(state, dict_, old, self._remove_token)
existing = dict_.pop(self.key, NO_VALUE)
# if the attribute is expired, we currently have no way to tell
# that an object-attribute was expired vs. not loaded. So
# for this test, we look to see if the object has a DB identity.
if (
existing is NO_VALUE
and old is not PASSIVE_NO_RESULT
and state.key is None
):
raise AttributeError("%s object does not have a value" % self)
def get_history(self, state, dict_, passive=PASSIVE_OFF):
if self.key in dict_:
return History.from_object_attribute(self, state, dict_[self.key])
else:
if passive & INIT_OK:
passive ^= INIT_OK
current = self.get(state, dict_, passive=passive)
if current is PASSIVE_NO_RESULT:
return HISTORY_BLANK
else:
return History.from_object_attribute(self, state, current)
def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE):
if self.key in dict_:
current = dict_[self.key]
elif passive & CALLABLES_OK:
current = self.get(state, dict_, passive=passive)
else:
return []
# can't use __hash__(), can't use __eq__() here
if (
current is not None
and current is not PASSIVE_NO_RESULT
and current is not NEVER_SET
):
ret = [(instance_state(current), current)]
else:
ret = [(None, None)]
if self.key in state.committed_state:
original = state.committed_state[self.key]
if (
original is not None
and original is not PASSIVE_NO_RESULT
and original is not NEVER_SET
and original is not current
):
ret.append((instance_state(original), original))
return ret
def set(
self,
state,
dict_,
value,
initiator,
passive=PASSIVE_OFF,
check_old=None,
pop=False,
):
"""Set a value on the given InstanceState.
"""
if self.dispatch._active_history:
old = self.get(
state,
dict_,
passive=PASSIVE_ONLY_PERSISTENT
| NO_AUTOFLUSH
| LOAD_AGAINST_COMMITTED,
)
else:
old = self.get(
state,
dict_,
passive=PASSIVE_NO_FETCH ^ INIT_OK
| LOAD_AGAINST_COMMITTED
| NO_RAISE,
)
if (
check_old is not None
and old is not PASSIVE_NO_RESULT
and check_old is not old
):
if pop:
return
else:
raise ValueError(
"Object %s not associated with %s on attribute '%s'"
% (instance_str(check_old), state_str(state), self.key)
)
value = self.fire_replace_event(state, dict_, value, old, initiator)
dict_[self.key] = value
def fire_remove_event(self, state, dict_, value, initiator):
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), state, False)
for fn in self.dispatch.remove:
fn(state, value, initiator or self._remove_token)
state._modified_event(dict_, self, value)
def fire_replace_event(self, state, dict_, value, previous, initiator):
if self.trackparent:
if previous is not value and previous not in (
None,
PASSIVE_NO_RESULT,
NEVER_SET,
):
self.sethasparent(instance_state(previous), state, False)
for fn in self.dispatch.set:
value = fn(
state, value, previous, initiator or self._replace_token
)
state._modified_event(dict_, self, previous)
if self.trackparent:
if value is not None:
self.sethasparent(instance_state(value), state, True)
return value
class CollectionAttributeImpl(AttributeImpl):
"""A collection-holding attribute that instruments changes in membership.
Only handles collections of instrumented objects.
InstrumentedCollectionAttribute holds an arbitrary, user-specified
container object (defaulting to a list) and brokers access to the
CollectionAdapter, a "view" onto that object that presents consistent bag
semantics to the orm layer independent of the user data implementation.
"""
default_accepts_scalar_loader = False
uses_objects = True
supports_population = True
collection = True
dynamic = False
__slots__ = (
"copy",
"collection_factory",
"_append_token",
"_remove_token",
"_bulk_replace_token",
"_duck_typed_as",
)
def __init__(
self,
class_,
key,
callable_,
dispatch,
typecallable=None,
trackparent=False,
extension=None,
copy_function=None,
compare_function=None,
**kwargs
):
super(CollectionAttributeImpl, self).__init__(
class_,
key,
callable_,
dispatch,
trackparent=trackparent,
extension=extension,
compare_function=compare_function,
**kwargs
)
if copy_function is None:
copy_function = self.__copy
self.copy = copy_function
self.collection_factory = typecallable
self._append_token = Event(self, OP_APPEND)
self._remove_token = Event(self, OP_REMOVE)
self._bulk_replace_token = Event(self, OP_BULK_REPLACE)
self._duck_typed_as = util.duck_type_collection(
self.collection_factory()
)
if getattr(self.collection_factory, "_sa_linker", None):
@event.listens_for(self, "init_collection")
def link(target, collection, collection_adapter):
collection._sa_linker(collection_adapter)
@event.listens_for(self, "dispose_collection")
def unlink(target, collection, collection_adapter):
collection._sa_linker(None)
def __copy(self, item):
return [y for y in collections.collection_adapter(item)]
def get_history(self, state, dict_, passive=PASSIVE_OFF):
current = self.get(state, dict_, passive=passive)
if current is PASSIVE_NO_RESULT:
return HISTORY_BLANK
else:
return History.from_collection(self, state, current)
def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE):
# NOTE: passive is ignored here at the moment
if self.key not in dict_:
return []
current = dict_[self.key]
current = getattr(current, "_sa_adapter")
if self.key in state.committed_state:
original = state.committed_state[self.key]
if original not in (NO_VALUE, NEVER_SET):
current_states = [
((c is not None) and instance_state(c) or None, c)
for c in current
]
original_states = [
((c is not None) and instance_state(c) or None, c)
for c in original
]
current_set = dict(current_states)
original_set = dict(original_states)
return (
[
(s, o)
for s, o in current_states
if s not in original_set
]
+ [(s, o) for s, o in current_states if s in original_set]
+ [
(s, o)
for s, o in original_states
if s not in current_set
]
)
return [(instance_state(o), o) for o in current]
def fire_append_event(self, state, dict_, value, initiator):
for fn in self.dispatch.append:
value = fn(state, value, initiator or self._append_token)
state._modified_event(dict_, self, NEVER_SET, True)
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), state, True)
return value
def fire_pre_remove_event(self, state, dict_, initiator):
"""A special event used for pop() operations.
The "remove" event needs to have the item to be removed passed to
it, which in the case of pop from a set, we don't have a way to access
the item before the operation. the event is used for all pop()
operations (even though set.pop is the one where it is really needed).
"""
state._modified_event(dict_, self, NEVER_SET, True)
def fire_remove_event(self, state, dict_, value, initiator):
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), state, False)
for fn in self.dispatch.remove:
fn(state, value, initiator or self._remove_token)
state._modified_event(dict_, self, NEVER_SET, True)
def delete(self, state, dict_):
if self.key not in dict_:
return
state._modified_event(dict_, self, NEVER_SET, True)
collection = self.get_collection(state, state.dict)
collection.clear_with_event()
# key is always present because we checked above. e.g.
# del is a no-op if collection not present.
del dict_[self.key]
def initialize(self, state, dict_):
"""Initialize this attribute with an empty collection."""
_, user_data = self._initialize_collection(state)
dict_[self.key] = user_data
return user_data
def _initialize_collection(self, state):
adapter, collection = state.manager.initialize_collection(
self.key, state, self.collection_factory
)
self.dispatch.init_collection(state, collection, adapter)
return adapter, collection
def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
collection = self.get_collection(state, dict_, passive=passive)
if collection is PASSIVE_NO_RESULT:
value = self.fire_append_event(state, dict_, value, initiator)
assert (
self.key not in dict_
), "Collection was loaded during event handling."
state._get_pending_mutation(self.key).append(value)
else:
collection.append_with_event(value, initiator)
def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
collection = self.get_collection(state, state.dict, passive=passive)
if collection is PASSIVE_NO_RESULT:
self.fire_remove_event(state, dict_, value, initiator)
assert (
self.key not in dict_
), "Collection was loaded during event handling."
state._get_pending_mutation(self.key).remove(value)
else:
collection.remove_with_event(value, initiator)
def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
try:
# TODO: better solution here would be to add
# a "popper" role to collections.py to complement
# "remover".
self.remove(state, dict_, value, initiator, passive=passive)
except (ValueError, KeyError, IndexError):
pass
def set(
self,
state,
dict_,
value,
initiator=None,
passive=PASSIVE_OFF,
pop=False,
_adapt=True,
):
iterable = orig_iterable = value
# pulling a new collection first so that an adaptation exception does
# not trigger a lazy load of the old collection.
new_collection, user_data = self._initialize_collection(state)
if _adapt:
if new_collection._converter is not None:
iterable = new_collection._converter(iterable)
else:
setting_type = util.duck_type_collection(iterable)
receiving_type = self._duck_typed_as
if setting_type is not receiving_type:
given = (
iterable is None
and "None"
or iterable.__class__.__name__
)
wanted = self._duck_typed_as.__name__
raise TypeError(
"Incompatible collection type: %s is not %s-like"
% (given, wanted)
)
# If the object is an adapted collection, return the (iterable)
# adapter.
if hasattr(iterable, "_sa_iterator"):
iterable = iterable._sa_iterator()
elif setting_type is dict:
if util.py3k:
iterable = iterable.values()
else:
iterable = getattr(
iterable, "itervalues", iterable.values
)()
else:
iterable = iter(iterable)
new_values = list(iterable)
evt = self._bulk_replace_token
self.dispatch.bulk_replace(state, new_values, evt)
old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT)
if old is PASSIVE_NO_RESULT:
old = self.initialize(state, dict_)
elif old is orig_iterable:
# ignore re-assignment of the current collection, as happens
# implicitly with in-place operators (foo.collection |= other)
return
# place a copy of "old" in state.committed_state
state._modified_event(dict_, self, old, True)
old_collection = old._sa_adapter
dict_[self.key] = user_data
collections.bulk_replace(
new_values, old_collection, new_collection, initiator=evt
)
del old._sa_adapter
self.dispatch.dispose_collection(state, old, old_collection)
def _invalidate_collection(self, collection):
adapter = getattr(collection, "_sa_adapter")
adapter.invalidated = True
def set_committed_value(self, state, dict_, value):
"""Set an attribute value on the given instance and 'commit' it."""
collection, user_data = self._initialize_collection(state)
if value:
collection.append_multiple_without_event(value)
state.dict[self.key] = user_data
state._commit(dict_, [self.key])
if self.key in state._pending_mutations:
# pending items exist. issue a modified event,
# add/remove new items.
state._modified_event(dict_, self, user_data, True)
pending = state._pending_mutations.pop(self.key)
added = pending.added_items
removed = pending.deleted_items
for item in added:
collection.append_without_event(item)
for item in removed:
collection.remove_without_event(item)
return user_data
def get_collection(
self, state, dict_, user_data=None, passive=PASSIVE_OFF
):
"""Retrieve the CollectionAdapter associated with the given state.
Creates a new CollectionAdapter if one does not exist.
"""
if user_data is None:
user_data = self.get(state, dict_, passive=passive)
if user_data is PASSIVE_NO_RESULT:
return user_data
return getattr(user_data, "_sa_adapter")
def backref_listeners(attribute, key, uselist):
"""Apply listeners to synchronize a two-way relationship."""
# use easily recognizable names for stack traces.
# in the sections marked "tokens to test for a recursive loop",
# this is somewhat brittle and very performance-sensitive logic
# that is specific to how we might arrive at each event. a marker
# that can target us directly to arguments being invoked against
# the impl might be simpler, but could interfere with other systems.
parent_token = attribute.impl.parent_token
parent_impl = attribute.impl
def _acceptable_key_err(child_state, initiator, child_impl):
raise ValueError(
"Bidirectional attribute conflict detected: "
'Passing object %s to attribute "%s" '
'triggers a modify event on attribute "%s" '
'via the backref "%s".'
% (
state_str(child_state),
initiator.parent_token,
child_impl.parent_token,
attribute.impl.parent_token,
)
)
def emit_backref_from_scalar_set_event(state, child, oldchild, initiator):
if oldchild is child:
return child
if (
oldchild is not None
and oldchild is not PASSIVE_NO_RESULT
and oldchild is not NEVER_SET
):
# With lazy=None, there's no guarantee that the full collection is
# present when updating via a backref.
old_state, old_dict = (
instance_state(oldchild),
instance_dict(oldchild),
)
impl = old_state.manager[key].impl
# tokens to test for a recursive loop.
if not impl.collection and not impl.dynamic:
check_recursive_token = impl._replace_token
else:
check_recursive_token = impl._remove_token
if initiator is not check_recursive_token:
impl.pop(
old_state,
old_dict,
state.obj(),
parent_impl._append_token,
passive=PASSIVE_NO_FETCH,
)
if child is not None:
child_state, child_dict = (
instance_state(child),
instance_dict(child),
)
child_impl = child_state.manager[key].impl
if (
initiator.parent_token is not parent_token
and initiator.parent_token is not child_impl.parent_token
):
_acceptable_key_err(state, initiator, child_impl)
# tokens to test for a recursive loop.
check_append_token = child_impl._append_token
check_bulk_replace_token = (
child_impl._bulk_replace_token
if child_impl.collection
else None
)
if (
initiator is not check_append_token
and initiator is not check_bulk_replace_token
):
child_impl.append(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH,
)
return child
def emit_backref_from_collection_append_event(state, child, initiator):
if child is None:
return
child_state, child_dict = instance_state(child), instance_dict(child)
child_impl = child_state.manager[key].impl
if (
initiator.parent_token is not parent_token
and initiator.parent_token is not child_impl.parent_token
):
_acceptable_key_err(state, initiator, child_impl)
# tokens to test for a recursive loop.
check_append_token = child_impl._append_token
check_bulk_replace_token = (
child_impl._bulk_replace_token if child_impl.collection else None
)
if (
initiator is not check_append_token
and initiator is not check_bulk_replace_token
):
child_impl.append(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH,
)
return child
def emit_backref_from_collection_remove_event(state, child, initiator):
if (
child is not None
and child is not PASSIVE_NO_RESULT
and child is not NEVER_SET
):
child_state, child_dict = (
instance_state(child),
instance_dict(child),
)
child_impl = child_state.manager[key].impl
# tokens to test for a recursive loop.
if not child_impl.collection and not child_impl.dynamic:
check_remove_token = child_impl._remove_token
check_replace_token = child_impl._replace_token
check_for_dupes_on_remove = uselist and not parent_impl.dynamic
else:
check_remove_token = child_impl._remove_token
check_replace_token = (
child_impl._bulk_replace_token
if child_impl.collection
else None
)
check_for_dupes_on_remove = False
if (
initiator is not check_remove_token
and initiator is not check_replace_token
):
if not check_for_dupes_on_remove or not util.has_dupes(
# when this event is called, the item is usually
# present in the list, except for a pop() operation.
state.dict[parent_impl.key],
child,
):
child_impl.pop(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH,
)
if uselist:
event.listen(
attribute,
"append",
emit_backref_from_collection_append_event,
retval=True,
raw=True,
)
else:
event.listen(
attribute,
"set",
emit_backref_from_scalar_set_event,
retval=True,
raw=True,
)
# TODO: need coverage in test/orm/ of remove event
event.listen(
attribute,
"remove",
emit_backref_from_collection_remove_event,
retval=True,
raw=True,
)
_NO_HISTORY = util.symbol("NO_HISTORY")
_NO_STATE_SYMBOLS = frozenset(
[id(PASSIVE_NO_RESULT), id(NO_VALUE), id(NEVER_SET)]
)
History = util.namedtuple("History", ["added", "unchanged", "deleted"])
class History(History):
"""A 3-tuple of added, unchanged and deleted values,
representing the changes which have occurred on an instrumented
attribute.
The easiest way to get a :class:`.History` object for a particular
attribute on an object is to use the :func:`_sa.inspect` function::
from sqlalchemy import inspect
hist = inspect(myobject).attrs.myattribute.history
Each tuple member is an iterable sequence:
* ``added`` - the collection of items added to the attribute (the first
tuple element).
* ``unchanged`` - the collection of items that have not changed on the
attribute (the second tuple element).
* ``deleted`` - the collection of items that have been removed from the
attribute (the third tuple element).
"""
def __bool__(self):
return self != HISTORY_BLANK
__nonzero__ = __bool__
def empty(self):
"""Return True if this :class:`.History` has no changes
and no existing, unchanged state.
"""
return not bool((self.added or self.deleted) or self.unchanged)
def sum(self):
"""Return a collection of added + unchanged + deleted."""
return (
(self.added or []) + (self.unchanged or []) + (self.deleted or [])
)
def non_deleted(self):
"""Return a collection of added + unchanged."""
return (self.added or []) + (self.unchanged or [])
def non_added(self):
"""Return a collection of unchanged + deleted."""
return (self.unchanged or []) + (self.deleted or [])
def has_changes(self):
"""Return True if this :class:`.History` has changes."""
return bool(self.added or self.deleted)
def as_state(self):
return History(
[
(c is not None) and instance_state(c) or None
for c in self.added
],
[
(c is not None) and instance_state(c) or None
for c in self.unchanged
],
[
(c is not None) and instance_state(c) or None
for c in self.deleted
],
)
@classmethod
def from_scalar_attribute(cls, attribute, state, current):
original = state.committed_state.get(attribute.key, _NO_HISTORY)
if original is _NO_HISTORY:
if current is NEVER_SET:
return cls((), (), ())
else:
return cls((), [current], ())
# don't let ClauseElement expressions here trip things up
elif attribute.is_equal(current, original) is True:
return cls((), [current], ())
else:
# current convention on native scalars is to not
# include information
# about missing previous value in "deleted", but
# we do include None, which helps in some primary
# key situations
if id(original) in _NO_STATE_SYMBOLS:
deleted = ()
# indicate a "del" operation occurred when we don't have
# the previous value as: ([None], (), ())
if id(current) in _NO_STATE_SYMBOLS:
current = None
else:
deleted = [original]
if current is NEVER_SET:
return cls((), (), deleted)
else:
return cls([current], (), deleted)
@classmethod
def from_object_attribute(cls, attribute, state, current):
original = state.committed_state.get(attribute.key, _NO_HISTORY)
if original is _NO_HISTORY:
if current is NO_VALUE or current is NEVER_SET:
return cls((), (), ())
else:
return cls((), [current], ())
elif current is original and current is not NEVER_SET:
return cls((), [current], ())
else:
# current convention on related objects is to not
# include information
# about missing previous value in "deleted", and
# to also not include None - the dependency.py rules
# ignore the None in any case.
if id(original) in _NO_STATE_SYMBOLS or original is None:
deleted = ()
# indicate a "del" operation occurred when we don't have
# the previous value as: ([None], (), ())
if id(current) in _NO_STATE_SYMBOLS:
current = None
else:
deleted = [original]
if current is NO_VALUE or current is NEVER_SET:
return cls((), (), deleted)
else:
return cls([current], (), deleted)
@classmethod
def from_collection(cls, attribute, state, current):
original = state.committed_state.get(attribute.key, _NO_HISTORY)
if current is NO_VALUE or current is NEVER_SET:
return cls((), (), ())
current = getattr(current, "_sa_adapter")
if original in (NO_VALUE, NEVER_SET):
return cls(list(current), (), ())
elif original is _NO_HISTORY:
return cls((), list(current), ())
else:
current_states = [
((c is not None) and instance_state(c) or None, c)
for c in current
]
original_states = [
((c is not None) and instance_state(c) or None, c)
for c in original
]
current_set = dict(current_states)
original_set = dict(original_states)
return cls(
[o for s, o in current_states if s not in original_set],
[o for s, o in current_states if s in original_set],
[o for s, o in original_states if s not in current_set],
)
HISTORY_BLANK = History(None, None, None)
def get_history(obj, key, passive=PASSIVE_OFF):
"""Return a :class:`.History` record for the given object
and attribute key.
This is the **pre-flush** history for a given attribute, which is
reset each time the :class:`.Session` flushes changes to the
current database transaction.
.. note::
Prefer to use the :attr:`.AttributeState.history` and
:meth:`.AttributeState.load_history` accessors to retrieve the
:class:`.History` for instance attributes.
:param obj: an object whose class is instrumented by the
attributes package.
:param key: string attribute name.
:param passive: indicates loading behavior for the attribute
if the value is not already present. This is a
bitflag attribute, which defaults to the symbol
:attr:`.PASSIVE_OFF` indicating all necessary SQL
should be emitted.
.. seealso::
:attr:`.AttributeState.history`
:meth:`.AttributeState.load_history` - retrieve history
using loader callables if the value is not locally present.
"""
if passive is True:
util.warn_deprecated(
"Passing True for 'passive' is deprecated. "
"Use attributes.PASSIVE_NO_INITIALIZE"
)
passive = PASSIVE_NO_INITIALIZE
elif passive is False:
util.warn_deprecated(
"Passing False for 'passive' is "
"deprecated. Use attributes.PASSIVE_OFF"
)
passive = PASSIVE_OFF
return get_state_history(instance_state(obj), key, passive)
def get_state_history(state, key, passive=PASSIVE_OFF):
return state.get_history(key, passive)
def has_parent(cls, obj, key, optimistic=False):
"""TODO"""
manager = manager_of_class(cls)
state = instance_state(obj)
return manager.has_parent(state, key, optimistic)
def register_attribute(class_, key, **kw):
comparator = kw.pop("comparator", None)
parententity = kw.pop("parententity", None)
doc = kw.pop("doc", None)
desc = register_descriptor(class_, key, comparator, parententity, doc=doc)
register_attribute_impl(class_, key, **kw)
return desc
def register_attribute_impl(
class_,
key,
uselist=False,
callable_=None,
useobject=False,
impl_class=None,
backref=None,
**kw
):
manager = manager_of_class(class_)
if uselist:
factory = kw.pop("typecallable", None)
typecallable = manager.instrument_collection_class(
key, factory or list
)
else:
typecallable = kw.pop("typecallable", None)
dispatch = manager[key].dispatch
if impl_class:
impl = impl_class(class_, key, typecallable, dispatch, **kw)
elif uselist:
impl = CollectionAttributeImpl(
class_, key, callable_, dispatch, typecallable=typecallable, **kw
)
elif useobject:
impl = ScalarObjectAttributeImpl(
class_, key, callable_, dispatch, **kw
)
else:
impl = ScalarAttributeImpl(class_, key, callable_, dispatch, **kw)
manager[key].impl = impl
if backref:
backref_listeners(manager[key], backref, uselist)
manager.post_configure_attribute(key)
return manager[key]
def register_descriptor(
class_, key, comparator=None, parententity=None, doc=None
):
manager = manager_of_class(class_)
descriptor = InstrumentedAttribute(
class_, key, comparator=comparator, parententity=parententity
)
descriptor.__doc__ = doc
manager.instrument_attribute(key, descriptor)
return descriptor
def unregister_attribute(class_, key):
manager_of_class(class_).uninstrument_attribute(key)
def init_collection(obj, key):
"""Initialize a collection attribute and return the collection adapter.
This function is used to provide direct access to collection internals
for a previously unloaded attribute. e.g.::
collection_adapter = init_collection(someobject, 'elements')
for elem in values:
collection_adapter.append_without_event(elem)
For an easier way to do the above, see
:func:`~sqlalchemy.orm.attributes.set_committed_value`.
:param obj: a mapped object
:param key: string attribute name where the collection is located.
"""
state = instance_state(obj)
dict_ = state.dict
return init_state_collection(state, dict_, key)
def init_state_collection(state, dict_, key):
"""Initialize a collection attribute and return the collection adapter."""
attr = state.manager[key].impl
user_data = attr.initialize(state, dict_)
return attr.get_collection(state, dict_, user_data)
def set_committed_value(instance, key, value):
"""Set the value of an attribute with no history events.
Cancels any previous history present. The value should be
a scalar value for scalar-holding attributes, or
an iterable for any collection-holding attribute.
This is the same underlying method used when a lazy loader
fires off and loads additional data from the database.
In particular, this method can be used by application code
which has loaded additional attributes or collections through
separate queries, which can then be attached to an instance
as though it were part of its original loaded state.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.manager[key].impl.set_committed_value(state, dict_, value)
def set_attribute(instance, key, value, initiator=None):
"""Set the value of an attribute, firing history events.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to establish attribute state as understood
by SQLAlchemy.
:param instance: the object that will be modified
:param key: string name of the attribute
:param value: value to assign
:param initiator: an instance of :class:`.Event` that would have
been propagated from a previous event listener. This argument
is used when the :func:`.set_attribute` function is being used within
an existing event listening function where an :class:`.Event` object
is being supplied; the object may be used to track the origin of the
chain of events.
.. versionadded:: 1.2.3
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.manager[key].impl.set(state, dict_, value, initiator)
def get_attribute(instance, key):
"""Get the value of an attribute, firing any callables required.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to make usage of attribute state as understood
by SQLAlchemy.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
return state.manager[key].impl.get(state, dict_)
def del_attribute(instance, key):
"""Delete the value of an attribute, firing history events.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to establish attribute state as understood
by SQLAlchemy.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.manager[key].impl.delete(state, dict_)
def flag_modified(instance, key):
"""Mark an attribute on an instance as 'modified'.
This sets the 'modified' flag on the instance and
establishes an unconditional change event for the given attribute.
The attribute must have a value present, else an
:class:`.InvalidRequestError` is raised.
To mark an object "dirty" without referring to any specific attribute
so that it is considered within a flush, use the
:func:`.attributes.flag_dirty` call.
.. seealso::
:func:`.attributes.flag_dirty`
"""
state, dict_ = instance_state(instance), instance_dict(instance)
impl = state.manager[key].impl
impl.dispatch.modified(state, impl._modified_token)
state._modified_event(dict_, impl, NO_VALUE, is_userland=True)
def flag_dirty(instance):
"""Mark an instance as 'dirty' without any specific attribute mentioned.
This is a special operation that will allow the object to travel through
the flush process for interception by events such as
:meth:`.SessionEvents.before_flush`. Note that no SQL will be emitted in
the flush process for an object that has no changes, even if marked dirty
via this method. However, a :meth:`.SessionEvents.before_flush` handler
will be able to see the object in the :attr:`.Session.dirty` collection and
may establish changes on it, which will then be included in the SQL
emitted.
.. versionadded:: 1.2
.. seealso::
:func:`.attributes.flag_modified`
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state._modified_event(dict_, None, NO_VALUE, is_userland=True)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/strategies.py
|
# orm/strategies.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""sqlalchemy.orm.interfaces.LoaderStrategy
implementations, and related MapperOptions."""
from __future__ import absolute_import
import collections
import itertools
from . import attributes
from . import exc as orm_exc
from . import interfaces
from . import loading
from . import properties
from . import query
from . import unitofwork
from . import util as orm_util
from .base import _DEFER_FOR_STATE
from .base import _SET_DEFERRED_EXPIRED
from .interfaces import LoaderStrategy
from .interfaces import StrategizedProperty
from .session import _state_session
from .state import InstanceState
from .util import _none_set
from .util import aliased
from .. import event
from .. import exc as sa_exc
from .. import inspect
from .. import log
from .. import sql
from .. import util
from ..sql import util as sql_util
from ..sql import visitors
def _register_attribute(
prop,
mapper,
useobject,
compare_function=None,
typecallable=None,
callable_=None,
proxy_property=None,
active_history=False,
impl_class=None,
**kw
):
attribute_ext = list(util.to_list(prop.extension, default=[]))
listen_hooks = []
uselist = useobject and prop.uselist
if useobject and prop.single_parent:
listen_hooks.append(single_parent_validator)
if prop.key in prop.parent.validators:
fn, opts = prop.parent.validators[prop.key]
listen_hooks.append(
lambda desc, prop: orm_util._validator_events(
desc, prop.key, fn, **opts
)
)
if useobject:
listen_hooks.append(unitofwork.track_cascade_events)
# need to assemble backref listeners
# after the singleparentvalidator, mapper validator
if useobject:
backref = prop.back_populates
if backref and prop._effective_sync_backref:
listen_hooks.append(
lambda desc, prop: attributes.backref_listeners(
desc, backref, uselist
)
)
# a single MapperProperty is shared down a class inheritance
# hierarchy, so we set up attribute instrumentation and backref event
# for each mapper down the hierarchy.
# typically, "mapper" is the same as prop.parent, due to the way
# the configure_mappers() process runs, however this is not strongly
# enforced, and in the case of a second configure_mappers() run the
# mapper here might not be prop.parent; also, a subclass mapper may
# be called here before a superclass mapper. That is, can't depend
# on mappers not already being set up so we have to check each one.
for m in mapper.self_and_descendants:
if prop is m._props.get(
prop.key
) and not m.class_manager._attr_has_impl(prop.key):
desc = attributes.register_attribute_impl(
m.class_,
prop.key,
parent_token=prop,
uselist=uselist,
compare_function=compare_function,
useobject=useobject,
extension=attribute_ext,
trackparent=useobject
and (
prop.single_parent
or prop.direction is interfaces.ONETOMANY
),
typecallable=typecallable,
callable_=callable_,
active_history=active_history,
impl_class=impl_class,
send_modified_events=not useobject or not prop.viewonly,
doc=prop.doc,
**kw
)
for hook in listen_hooks:
hook(desc, prop)
@properties.ColumnProperty.strategy_for(instrument=False, deferred=False)
class UninstrumentedColumnLoader(LoaderStrategy):
"""Represent a non-instrumented MapperProperty.
The polymorphic_on argument of mapper() often results in this,
if the argument is against the with_polymorphic selectable.
"""
__slots__ = ("columns",)
def __init__(self, parent, strategy_key):
super(UninstrumentedColumnLoader, self).__init__(parent, strategy_key)
self.columns = self.parent_property.columns
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection=None,
**kwargs
):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
pass
@log.class_logger
@properties.ColumnProperty.strategy_for(instrument=True, deferred=False)
class ColumnLoader(LoaderStrategy):
"""Provide loading behavior for a :class:`.ColumnProperty`."""
__slots__ = "columns", "is_composite"
def __init__(self, parent, strategy_key):
super(ColumnLoader, self).__init__(parent, strategy_key)
self.columns = self.parent_property.columns
self.is_composite = hasattr(self.parent_property, "composite_class")
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kwargs
):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
fetch = self.columns[0]
if adapter:
fetch = adapter.columns[fetch]
memoized_populators[self.parent_property] = fetch
def init_class_attribute(self, mapper):
self.is_class_level = True
coltype = self.columns[0].type
# TODO: check all columns ? check for foreign key as well?
active_history = (
self.parent_property.active_history
or self.columns[0].primary_key
or mapper.version_id_col in set(self.columns)
)
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=coltype.compare_values,
active_history=active_history,
)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
# look through list of columns represented here
# to see which, if any, is present in the row.
for col in self.columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
@log.class_logger
@properties.ColumnProperty.strategy_for(query_expression=True)
class ExpressionColumnLoader(ColumnLoader):
def __init__(self, parent, strategy_key):
super(ExpressionColumnLoader, self).__init__(parent, strategy_key)
null = sql.null()
self._have_default_expression = any(
not c.compare(null) for c in self.parent_property.columns
)
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kwargs
):
columns = None
if loadopt and "expression" in loadopt.local_opts:
columns = [loadopt.local_opts["expression"]]
elif self._have_default_expression:
columns = self.parent_property.columns
if columns is None:
return
for c in columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
fetch = columns[0]
if adapter:
fetch = adapter.columns[fetch]
memoized_populators[self.parent_property] = fetch
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
# look through list of columns represented here
# to see which, if any, is present in the row.
if loadopt and "expression" in loadopt.local_opts:
columns = [loadopt.local_opts["expression"]]
for col in columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=self.columns[0].type.compare_values,
accepts_scalar_loader=False,
)
@log.class_logger
@properties.ColumnProperty.strategy_for(deferred=True, instrument=True)
@properties.ColumnProperty.strategy_for(do_nothing=True)
class DeferredColumnLoader(LoaderStrategy):
"""Provide loading behavior for a deferred :class:`.ColumnProperty`."""
__slots__ = "columns", "group"
def __init__(self, parent, strategy_key):
super(DeferredColumnLoader, self).__init__(parent, strategy_key)
if hasattr(self.parent_property, "composite_class"):
raise NotImplementedError(
"Deferred loading for composite " "types not implemented yet"
)
self.columns = self.parent_property.columns
self.group = self.parent_property.group
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
# this path currently does not check the result
# for the column; this is because in most cases we are
# working just with the setup_query() directive which does
# not support this, and the behavior here should be consistent.
if not self.is_class_level:
set_deferred_for_local_state = (
self.parent_property._deferred_column_loader
)
populators["new"].append((self.key, set_deferred_for_local_state))
else:
populators["expire"].append((self.key, False))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=self.columns[0].type.compare_values,
callable_=self._load_for_state,
expire_missing=False,
)
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
only_load_props=None,
**kw
):
if (
(
loadopt
and "undefer_pks" in loadopt.local_opts
and set(self.columns).intersection(
self.parent._should_undefer_in_wildcard
)
)
or (
loadopt
and self.group
and loadopt.local_opts.get(
"undefer_group_%s" % self.group, False
)
)
or (only_load_props and self.key in only_load_props)
):
self.parent_property._get_strategy(
(("deferred", False), ("instrument", True))
).setup_query(
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kw
)
elif self.is_class_level:
memoized_populators[self.parent_property] = _SET_DEFERRED_EXPIRED
else:
memoized_populators[self.parent_property] = _DEFER_FOR_STATE
def _load_for_state(self, state, passive):
if not state.key:
return attributes.ATTR_EMPTY
if not passive & attributes.SQL_OK:
return attributes.PASSIVE_NO_RESULT
localparent = state.manager.mapper
if self.group:
toload = [
p.key
for p in localparent.iterate_properties
if isinstance(p, StrategizedProperty)
and isinstance(p.strategy, DeferredColumnLoader)
and p.group == self.group
]
else:
toload = [self.key]
# narrow the keys down to just those which have no history
group = [k for k in toload if k in state.unmodified]
session = _state_session(state)
if session is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"deferred load operation of attribute '%s' cannot proceed"
% (orm_util.state_str(state), self.key)
)
query = session.query(localparent)
if (
loading.load_on_ident(
query, state.key, only_load_props=group, refresh_state=state
)
is None
):
raise orm_exc.ObjectDeletedError(state)
return attributes.ATTR_WAS_SET
class LoadDeferredColumns(object):
"""serializable loader object used by DeferredColumnLoader"""
def __init__(self, key):
self.key = key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
localparent = state.manager.mapper
prop = localparent._props[key]
strategy = prop._strategies[DeferredColumnLoader]
return strategy._load_for_state(state, passive)
class AbstractRelationshipLoader(LoaderStrategy):
"""LoaderStratgies which deal with related objects."""
__slots__ = "mapper", "target", "uselist", "entity"
def __init__(self, parent, strategy_key):
super(AbstractRelationshipLoader, self).__init__(parent, strategy_key)
self.mapper = self.parent_property.mapper
self.entity = self.parent_property.entity
self.target = self.parent_property.target
self.uselist = self.parent_property.uselist
@log.class_logger
@properties.RelationshipProperty.strategy_for(do_nothing=True)
class DoNothingLoader(LoaderStrategy):
"""Relationship loader that makes no change to the object's state.
Compared to NoLoader, this loader does not initialize the
collection/attribute to empty/none; the usual default LazyLoader will
take effect.
"""
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="noload")
@properties.RelationshipProperty.strategy_for(lazy=None)
class NoLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=None".
"""
__slots__ = ()
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=True,
typecallable=self.parent_property.collection_class,
)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
def invoke_no_load(state, dict_, row):
if self.uselist:
state.manager.get_impl(self.key).initialize(state, dict_)
else:
dict_[self.key] = None
populators["new"].append((self.key, invoke_no_load))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy=True)
@properties.RelationshipProperty.strategy_for(lazy="select")
@properties.RelationshipProperty.strategy_for(lazy="raise")
@properties.RelationshipProperty.strategy_for(lazy="raise_on_sql")
@properties.RelationshipProperty.strategy_for(lazy="baked_select")
class LazyLoader(AbstractRelationshipLoader, util.MemoizedSlots):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=True", that is loads when first accessed.
"""
__slots__ = (
"_lazywhere",
"_rev_lazywhere",
"use_get",
"is_aliased_class",
"_bind_to_col",
"_equated_columns",
"_rev_bind_to_col",
"_rev_equated_columns",
"_simple_lazy_clause",
"_raise_always",
"_raise_on_sql",
"_bakery",
)
def __init__(self, parent, strategy_key):
super(LazyLoader, self).__init__(parent, strategy_key)
self._raise_always = self.strategy_opts["lazy"] == "raise"
self._raise_on_sql = self.strategy_opts["lazy"] == "raise_on_sql"
self.is_aliased_class = inspect(self.entity).is_aliased_class
join_condition = self.parent_property._join_condition
(
self._lazywhere,
self._bind_to_col,
self._equated_columns,
) = join_condition.create_lazy_clause()
(
self._rev_lazywhere,
self._rev_bind_to_col,
self._rev_equated_columns,
) = join_condition.create_lazy_clause(reverse_direction=True)
self.logger.info("%s lazy loading clause %s", self, self._lazywhere)
# determine if our "lazywhere" clause is the same as the mapper's
# get() clause. then we can just use mapper.get()
#
# TODO: the "not self.uselist" can be taken out entirely; a m2o
# load that populates for a list (very unusual, but is possible with
# the API) can still set for "None" and the attribute system will
# populate as an empty list.
self.use_get = (
not self.is_aliased_class
and not self.uselist
and self.entity._get_clause[0].compare(
self._lazywhere,
use_proxies=True,
equivalents=self.mapper._equivalent_columns,
)
)
if self.use_get:
for col in list(self._equated_columns):
if col in self.mapper._equivalent_columns:
for c in self.mapper._equivalent_columns[col]:
self._equated_columns[c] = self._equated_columns[col]
self.logger.info(
"%s will use query.get() to " "optimize instance loads", self
)
def init_class_attribute(self, mapper):
self.is_class_level = True
active_history = (
self.parent_property.active_history
or self.parent_property.direction is not interfaces.MANYTOONE
or not self.use_get
)
# MANYTOONE currently only needs the
# "old" value for delete-orphan
# cascades. the required _SingleParentValidator
# will enable active_history
# in that case. otherwise we don't need the
# "old" value during backref operations.
_register_attribute(
self.parent_property,
mapper,
useobject=True,
callable_=self._load_for_state,
typecallable=self.parent_property.collection_class,
active_history=active_history,
)
def _memoized_attr__simple_lazy_clause(self):
criterion, bind_to_col = (self._lazywhere, self._bind_to_col)
params = []
def visit_bindparam(bindparam):
bindparam.unique = False
visitors.traverse(criterion, {}, {"bindparam": visit_bindparam})
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
params.append(
(
bindparam.key,
bind_to_col[bindparam._identifying_key],
None,
)
)
elif bindparam.callable is None:
params.append((bindparam.key, None, bindparam.value))
criterion = visitors.cloned_traverse(
criterion, {}, {"bindparam": visit_bindparam}
)
return criterion, params
def _generate_lazy_clause(self, state, passive):
criterion, param_keys = self._simple_lazy_clause
if state is None:
return sql_util.adapt_criterion_to_null(
criterion, [key for key, ident, value in param_keys]
)
mapper = self.parent_property.parent
o = state.obj() # strong ref
dict_ = attributes.instance_dict(o)
if passive & attributes.INIT_OK:
passive ^= attributes.INIT_OK
params = {}
for key, ident, value in param_keys:
if ident is not None:
if passive and passive & attributes.LOAD_AGAINST_COMMITTED:
value = mapper._get_committed_state_attr_by_column(
state, dict_, ident, passive
)
else:
value = mapper._get_state_attr_by_column(
state, dict_, ident, passive
)
params[key] = value
return criterion, params
def _invoke_raise_load(self, state, passive, lazy):
raise sa_exc.InvalidRequestError(
"'%s' is not available due to lazy='%s'" % (self, lazy)
)
def _load_for_state(self, state, passive):
if not state.key and (
(
not self.parent_property.load_on_pending
and not state._load_pending
)
or not state.session_id
):
return attributes.ATTR_EMPTY
pending = not state.key
primary_key_identity = None
if (not passive & attributes.SQL_OK and not self.use_get) or (
not passive & attributes.NON_PERSISTENT_OK and pending
):
return attributes.PASSIVE_NO_RESULT
if (
# we were given lazy="raise"
self._raise_always
# the no_raise history-related flag was not passed
and not passive & attributes.NO_RAISE
and (
# if we are use_get and related_object_ok is disabled,
# which means we are at most looking in the identity map
# for history purposes or otherwise returning
# PASSIVE_NO_RESULT, don't raise. This is also a
# history-related flag
not self.use_get
or passive & attributes.RELATED_OBJECT_OK
)
):
self._invoke_raise_load(state, passive, "raise")
session = _state_session(state)
if not session:
if passive & attributes.NO_RAISE:
return attributes.PASSIVE_NO_RESULT
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"lazy load operation of attribute '%s' cannot proceed"
% (orm_util.state_str(state), self.key)
)
# if we have a simple primary key load, check the
# identity map without generating a Query at all
if self.use_get:
primary_key_identity = self._get_ident_for_use_get(
session, state, passive
)
if attributes.PASSIVE_NO_RESULT in primary_key_identity:
return attributes.PASSIVE_NO_RESULT
elif attributes.NEVER_SET in primary_key_identity:
return attributes.NEVER_SET
if _none_set.issuperset(primary_key_identity):
return None
# look for this identity in the identity map. Delegate to the
# Query class in use, as it may have special rules for how it
# does this, including how it decides what the correct
# identity_token would be for this identity.
instance = session.query()._identity_lookup(
self.entity,
primary_key_identity,
passive=passive,
lazy_loaded_from=state,
)
if instance is not None:
if instance is attributes.PASSIVE_CLASS_MISMATCH:
return None
else:
return instance
elif (
not passive & attributes.SQL_OK
or not passive & attributes.RELATED_OBJECT_OK
):
return attributes.PASSIVE_NO_RESULT
return self._emit_lazyload(
session, state, primary_key_identity, passive
)
def _get_ident_for_use_get(self, session, state, passive):
instance_mapper = state.manager.mapper
if passive & attributes.LOAD_AGAINST_COMMITTED:
get_attr = instance_mapper._get_committed_state_attr_by_column
else:
get_attr = instance_mapper._get_state_attr_by_column
dict_ = state.dict
return [
get_attr(state, dict_, self._equated_columns[pk], passive=passive)
for pk in self.mapper.primary_key
]
@util.dependencies("sqlalchemy.ext.baked")
def _memoized_attr__bakery(self, baked):
return baked.bakery(size=50)
@util.dependencies("sqlalchemy.orm.strategy_options")
def _emit_lazyload(
self, strategy_options, session, state, primary_key_identity, passive
):
# emit lazy load now using BakedQuery, to cut way down on the overhead
# of generating queries.
# there are two big things we are trying to guard against here:
#
# 1. two different lazy loads that need to have a different result,
# being cached on the same key. The results between two lazy loads
# can be different due to the options passed to the query, which
# take effect for descendant objects. Therefore we have to make
# sure paths and load options generate good cache keys, and if they
# don't, we don't cache.
# 2. a lazy load that gets cached on a key that includes some
# "throwaway" object, like a per-query AliasedClass, meaning
# the cache key will never be seen again and the cache itself
# will fill up. (the cache is an LRU cache, so while we won't
# run out of memory, it will perform terribly when it's full. A
# warning is emitted if this occurs.) We must prevent the
# generation of a cache key that is including a throwaway object
# in the key.
# note that "lazy='select'" and "lazy=True" make two separate
# lazy loaders. Currently the LRU cache is local to the LazyLoader,
# however add ourselves to the initial cache key just to future
# proof in case it moves
q = self._bakery(lambda session: session.query(self.entity), self)
q.add_criteria(
lambda q: q._adapt_all_clauses()._with_invoke_all_eagers(False),
self.parent_property,
)
if not self.parent_property.bake_queries:
q.spoil(full=True)
if self.parent_property.secondary is not None:
q.add_criteria(
lambda q: q.select_from(
self.mapper, self.parent_property.secondary
)
)
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
q.add_criteria(lambda q: q.autoflush(False))
if state.load_options:
# here, if any of the options cannot return a cache key,
# the BakedQuery "spoils" and caching will not occur. a path
# that features Cls.attribute.of_type(some_alias) will cancel
# caching, for example, since "some_alias" is user-defined and
# is usually a throwaway object.
effective_path = state.load_path[self.parent_property]
q._add_lazyload_options(state.load_options, effective_path)
if self.use_get:
if self._raise_on_sql:
self._invoke_raise_load(state, passive, "raise_on_sql")
return (
q(session)
.with_post_criteria(lambda q: q._set_lazyload_from(state))
._load_on_pk_identity(
session.query(self.mapper), primary_key_identity
)
)
if self.parent_property.order_by:
q.add_criteria(
lambda q: q.order_by(
*util.to_list(self.parent_property.order_by)
)
)
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if (
rev.direction is interfaces.MANYTOONE
and rev._use_get
and not isinstance(rev.strategy, LazyLoader)
):
q.add_criteria(
lambda q: q.options(
strategy_options.Load.for_existing_path(
q._current_path[rev.parent]
).lazyload(rev.key)
)
)
lazy_clause, params = self._generate_lazy_clause(state, passive)
if pending:
if util.has_intersection(orm_util._none_set, params.values()):
return None
elif util.has_intersection(orm_util._never_set, params.values()):
return None
if self._raise_on_sql:
self._invoke_raise_load(state, passive, "raise_on_sql")
q.add_criteria(lambda q: q.filter(lazy_clause))
# set parameters in the query such that we don't overwrite
# parameters that are already set within it
def set_default_params(q):
params.update(q._params)
q._params = params
return q
result = (
q(session)
.with_post_criteria(lambda q: q._set_lazyload_from(state))
.with_post_criteria(set_default_params)
.all()
)
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property
)
return result[0]
else:
return None
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
key = self.key
if not self.is_class_level:
# we are not the primary manager for this attribute
# on this class - set up a
# per-instance lazyloader, which will override the
# class-level behavior.
# this currently only happens when using a
# "lazyload" option on a "no load"
# attribute - "eager" attributes always have a
# class-level lazyloader installed.
set_lazy_callable = (
InstanceState._instance_level_callable_processor
)(mapper.class_manager, LoadLazyAttribute(key, self), key)
populators["new"].append((self.key, set_lazy_callable))
elif context.populate_existing or mapper.always_refresh:
def reset_for_lazy_callable(state, dict_, row):
# we are the primary manager for this attribute on
# this class - reset its
# per-instance attribute state, so that the class-level
# lazy loader is
# executed when next referenced on this instance.
# this is needed in
# populate_existing() types of scenarios to reset
# any existing state.
state._reset(dict_, key)
populators["new"].append((self.key, reset_for_lazy_callable))
class LoadLazyAttribute(object):
"""serializable loader object used by LazyLoader"""
def __init__(self, key, initiating_strategy):
self.key = key
self.strategy_key = initiating_strategy.strategy_key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
instance_mapper = state.manager.mapper
prop = instance_mapper._props[key]
strategy = prop._strategies[self.strategy_key]
return strategy._load_for_state(state, passive)
@properties.RelationshipProperty.strategy_for(lazy="immediate")
class ImmediateLoader(AbstractRelationshipLoader):
__slots__ = ()
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def setup_query(
self,
context,
entity,
path,
loadopt,
adapter,
column_collection=None,
parentmapper=None,
**kwargs
):
pass
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
def load_immediate(state, dict_, row):
state.get_impl(self.key).get(state, dict_)
populators["delayed"].append((self.key, load_immediate))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="subquery")
class SubqueryLoader(AbstractRelationshipLoader):
__slots__ = ("join_depth",)
def __init__(self, parent, strategy_key):
super(SubqueryLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def setup_query(
self,
context,
entity,
path,
loadopt,
adapter,
column_collection=None,
parentmapper=None,
**kwargs
):
if not context.query._enable_eagerloads:
return
elif context.query._yield_per:
context.query._no_yield_per("subquery")
path = path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_entity = path.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
effective_entity = with_poly_entity
else:
effective_entity = self.entity
subq_path = context.attributes.get(
("subquery_path", None), orm_util.PathRegistry.root
)
subq_path = subq_path + path
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if (
(
context.query._current_path.length
if context.query._current_path
else 0
)
+ path.length
) / 2 > self.join_depth:
return
elif subq_path.contains_mapper(self.mapper):
return
(
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
) = self._get_leftmost(subq_path)
orig_query = context.attributes.get(
("orig_query", SubqueryLoader), context.query
)
# generate a new Query from the original, then
# produce a subquery from it.
left_alias = self._generate_from_original_query(
orig_query,
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
entity.entity_zero,
)
# generate another Query that will join the
# left alias to the target relationships.
# basically doing a longhand
# "from_self()". (from_self() itself not quite industrial
# strength enough for all contingencies...but very close)
q = orig_query.session.query(effective_entity)
q._attributes = {
("orig_query", SubqueryLoader): orig_query,
("subquery_path", None): subq_path,
}
q = q._set_enable_single_crit(False)
to_join, local_attr, parent_alias = self._prep_for_joins(
left_alias, subq_path
)
q = q.add_columns(*local_attr)
q = self._apply_joins(
q, to_join, left_alias, parent_alias, effective_entity
)
q = self._setup_options(q, subq_path, orig_query, effective_entity)
q = self._setup_outermost_orderby(q)
# add new query to attributes to be picked up
# by create_row_processor
path.set(context.attributes, "subquery", q)
def _get_leftmost(self, subq_path):
subq_path = subq_path.path
subq_mapper = orm_util._class_to_mapper(subq_path[0])
# determine attributes of the leftmost mapper
if (
self.parent.isa(subq_mapper)
and self.parent_property is subq_path[1]
):
leftmost_mapper, leftmost_prop = self.parent, self.parent_property
else:
leftmost_mapper, leftmost_prop = subq_mapper, subq_path[1]
leftmost_cols = leftmost_prop.local_columns
leftmost_attr = [
getattr(
subq_path[0].entity, leftmost_mapper._columntoproperty[c].key
)
for c in leftmost_cols
]
return leftmost_mapper, leftmost_attr, leftmost_prop
def _generate_from_original_query(
self,
orig_query,
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
orig_entity,
):
# reformat the original query
# to look only for significant columns
q = orig_query._clone().correlate(None)
# set the query's "FROM" list explicitly to what the
# FROM list would be in any case, as we will be limiting
# the columns in the SELECT list which may no longer include
# all entities mentioned in things like WHERE, JOIN, etc.
if not q._from_obj:
q._set_select_from(
list(
set(
[
ent["entity"]
for ent in orig_query.column_descriptions
if ent["entity"] is not None
]
)
),
False,
)
# select from the identity columns of the outer (specifically, these
# are the 'local_cols' of the property). This will remove
# other columns from the query that might suggest the right entity
# which is why we do _set_select_from above.
target_cols = q._adapt_col_list(leftmost_attr)
q._set_entities(target_cols)
distinct_target_key = leftmost_relationship.distinct_target_key
if distinct_target_key is True:
q._distinct = True
elif distinct_target_key is None:
# if target_cols refer to a non-primary key or only
# part of a composite primary key, set the q as distinct
for t in set(c.table for c in target_cols):
if not set(target_cols).issuperset(t.primary_key):
q._distinct = True
break
if q._order_by is False:
q._order_by = leftmost_mapper.order_by
# don't need ORDER BY if no limit/offset
if q._limit is None and q._offset is None:
q._order_by = None
# the original query now becomes a subquery
# which we'll join onto.
embed_q = q.with_labels().subquery()
left_alias = orm_util.AliasedClass(
leftmost_mapper, embed_q, use_mapper_path=True
)
return left_alias
def _prep_for_joins(self, left_alias, subq_path):
# figure out what's being joined. a.k.a. the fun part
to_join = []
pairs = list(subq_path.pairs())
for i, (mapper, prop) in enumerate(pairs):
if i > 0:
# look at the previous mapper in the chain -
# if it is as or more specific than this prop's
# mapper, use that instead.
# note we have an assumption here that
# the non-first element is always going to be a mapper,
# not an AliasedClass
prev_mapper = pairs[i - 1][1].mapper
to_append = prev_mapper if prev_mapper.isa(mapper) else mapper
else:
to_append = mapper
to_join.append((to_append, prop.key))
# determine the immediate parent class we are joining from,
# which needs to be aliased.
if len(to_join) < 2:
# in the case of a one level eager load, this is the
# leftmost "left_alias".
parent_alias = left_alias
else:
info = inspect(to_join[-1][0])
if info.is_aliased_class:
parent_alias = info.entity
else:
# alias a plain mapper as we may be
# joining multiple times
parent_alias = orm_util.AliasedClass(
info.entity, use_mapper_path=True
)
local_cols = self.parent_property.local_columns
local_attr = [
getattr(parent_alias, self.parent._columntoproperty[c].key)
for c in local_cols
]
return to_join, local_attr, parent_alias
def _apply_joins(
self, q, to_join, left_alias, parent_alias, effective_entity
):
ltj = len(to_join)
if ltj == 1:
to_join = [
getattr(left_alias, to_join[0][1]).of_type(effective_entity)
]
elif ltj == 2:
to_join = [
getattr(left_alias, to_join[0][1]).of_type(parent_alias),
getattr(parent_alias, to_join[-1][1]).of_type(
effective_entity
),
]
elif ltj > 2:
middle = [
(
orm_util.AliasedClass(item[0])
if not inspect(item[0]).is_aliased_class
else item[0].entity,
item[1],
)
for item in to_join[1:-1]
]
inner = []
while middle:
item = middle.pop(0)
attr = getattr(item[0], item[1])
if middle:
attr = attr.of_type(middle[0][0])
else:
attr = attr.of_type(parent_alias)
inner.append(attr)
to_join = (
[getattr(left_alias, to_join[0][1]).of_type(inner[0].parent)]
+ inner
+ [
getattr(parent_alias, to_join[-1][1]).of_type(
effective_entity
)
]
)
for attr in to_join:
q = q.join(attr, from_joinpoint=True)
return q
def _setup_options(self, q, subq_path, orig_query, effective_entity):
# propagate loader options etc. to the new query.
# these will fire relative to subq_path.
q = q._with_current_path(subq_path)
q = q._conditional_options(*orig_query._with_options)
if orig_query._populate_existing:
q._populate_existing = orig_query._populate_existing
return q
def _setup_outermost_orderby(self, q):
if self.parent_property.order_by:
# if there's an ORDER BY, alias it the same
# way joinedloader does, but we have to pull out
# the "eagerjoin" from the query.
# this really only picks up the "secondary" table
# right now.
eagerjoin = q._from_obj[0]
eager_order_by = eagerjoin._target_adapter.copy_and_process(
util.to_list(self.parent_property.order_by)
)
q = q.order_by(*eager_order_by)
return q
class _SubqCollections(object):
"""Given a :class:`_query.Query` used to emit the "subquery load",
provide a load interface that executes the query at the
first moment a value is needed.
"""
_data = None
def __init__(self, subq):
self.subq = subq
def get(self, key, default):
if self._data is None:
self._load()
return self._data.get(key, default)
def _load(self):
self._data = collections.defaultdict(list)
for k, v in itertools.groupby(self.subq, lambda x: x[1:]):
self._data[k].extend(vv[0] for vv in v)
def loader(self, state, dict_, row):
if self._data is None:
self._load()
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
path = path[self.parent_property]
subq = path.get(context.attributes, "subquery")
if subq is None:
return
assert subq.session is context.session, (
"Subquery session doesn't refer to that of "
"our context. Are there broken context caching "
"schemes being used?"
)
local_cols = self.parent_property.local_columns
# cache the loaded collections in the context
# so that inheriting mappers don't re-load when they
# call upon create_row_processor again
collections = path.get(context.attributes, "collections")
if collections is None:
collections = self._SubqCollections(subq)
path.set(context.attributes, "collections", collections)
if adapter:
local_cols = [adapter.columns[c] for c in local_cols]
if self.uselist:
self._create_collection_loader(
context, collections, local_cols, populators
)
else:
self._create_scalar_loader(
context, collections, local_cols, populators
)
def _create_collection_loader(
self, context, collections, local_cols, populators
):
def load_collection_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]), ()
)
state.get_impl(self.key).set_committed_value(
state, dict_, collection
)
def load_collection_from_subq_existing_row(state, dict_, row):
if self.key not in dict_:
load_collection_from_subq(state, dict_, row)
populators["new"].append((self.key, load_collection_from_subq))
populators["existing"].append(
(self.key, load_collection_from_subq_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
def _create_scalar_loader(
self, context, collections, local_cols, populators
):
def load_scalar_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]), (None,)
)
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' " % self
)
scalar = collection[0]
state.get_impl(self.key).set_committed_value(state, dict_, scalar)
def load_scalar_from_subq_existing_row(state, dict_, row):
if self.key not in dict_:
load_scalar_from_subq(state, dict_, row)
populators["new"].append((self.key, load_scalar_from_subq))
populators["existing"].append(
(self.key, load_scalar_from_subq_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="joined")
@properties.RelationshipProperty.strategy_for(lazy=False)
class JoinedLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
using joined eager loading.
"""
__slots__ = "join_depth", "_aliased_class_pool"
def __init__(self, parent, strategy_key):
super(JoinedLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
self._aliased_class_pool = []
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection=None,
parentmapper=None,
chained_from_outerjoin=False,
**kwargs
):
"""Add a left outer join to the statement that's being constructed."""
if not context.query._enable_eagerloads:
return
elif context.query._yield_per and self.uselist:
context.query._no_yield_per("joined collection")
path = path[self.parent_property]
with_polymorphic = None
user_defined_adapter = (
self._init_user_defined_eager_proc(loadopt, context)
if loadopt
else False
)
if user_defined_adapter is not False:
(
clauses,
adapter,
add_to_collection,
) = self._setup_query_on_user_defined_adapter(
context, query_entity, path, adapter, user_defined_adapter
)
else:
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif path.contains_mapper(self.mapper):
return
(
clauses,
adapter,
add_to_collection,
chained_from_outerjoin,
) = self._generate_row_adapter(
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
parentmapper,
chained_from_outerjoin,
)
with_poly_entity = path.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
with_polymorphic = inspect(
with_poly_entity
).with_polymorphic_mappers
else:
with_polymorphic = None
path = path[self.entity]
loading._setup_entity_query(
context,
self.mapper,
query_entity,
path,
clauses,
add_to_collection,
with_polymorphic=with_polymorphic,
parentmapper=self.mapper,
chained_from_outerjoin=chained_from_outerjoin,
)
if with_poly_entity is not None and None in set(
context.secondary_columns
):
raise sa_exc.InvalidRequestError(
"Detected unaliased columns when generating joined "
"load. Make sure to use aliased=True or flat=True "
"when using joined loading with with_polymorphic()."
)
def _init_user_defined_eager_proc(self, loadopt, context):
# check if the opt applies at all
if "eager_from_alias" not in loadopt.local_opts:
# nope
return False
path = loadopt.path.parent
# the option applies. check if the "user_defined_eager_row_processor"
# has been built up.
adapter = path.get(
context.attributes, "user_defined_eager_row_processor", False
)
if adapter is not False:
# just return it
return adapter
# otherwise figure it out.
alias = loadopt.local_opts["eager_from_alias"]
root_mapper, prop = path[-2:]
if alias is not None:
if isinstance(alias, str):
alias = prop.target.alias(alias)
adapter = sql_util.ColumnAdapter(
alias, equivalents=prop.mapper._equivalent_columns
)
else:
if path.contains(context.attributes, "path_with_polymorphic"):
with_poly_entity = path.get(
context.attributes, "path_with_polymorphic"
)
adapter = orm_util.ORMAdapter(
with_poly_entity,
equivalents=prop.mapper._equivalent_columns,
)
else:
adapter = context.query._polymorphic_adapters.get(
prop.mapper, None
)
path.set(
context.attributes, "user_defined_eager_row_processor", adapter
)
return adapter
def _setup_query_on_user_defined_adapter(
self, context, entity, path, adapter, user_defined_adapter
):
# apply some more wrapping to the "user defined adapter"
# if we are setting up the query for SQL render.
adapter = entity._get_entity_clauses(context.query, context)
if adapter and user_defined_adapter:
user_defined_adapter = user_defined_adapter.wrap(adapter)
path.set(
context.attributes,
"user_defined_eager_row_processor",
user_defined_adapter,
)
elif adapter:
user_defined_adapter = adapter
path.set(
context.attributes,
"user_defined_eager_row_processor",
user_defined_adapter,
)
add_to_collection = context.primary_columns
return user_defined_adapter, adapter, add_to_collection
def _gen_pooled_aliased_class(self, context):
# keep a local pool of AliasedClass objects that get re-used.
# we need one unique AliasedClass per query per appearance of our
# entity in the query.
if inspect(self.entity).is_aliased_class:
alt_selectable = inspect(self.entity).selectable
else:
alt_selectable = None
key = ("joinedloader_ac", self)
if key not in context.attributes:
context.attributes[key] = idx = 0
else:
context.attributes[key] = idx = context.attributes[key] + 1
if idx >= len(self._aliased_class_pool):
to_adapt = orm_util.AliasedClass(
self.mapper,
alias=alt_selectable.alias(flat=True)
if alt_selectable is not None
else None,
flat=True,
use_mapper_path=True,
)
# load up the .columns collection on the Alias() before
# the object becomes shared among threads. this prevents
# races for column identities.
inspect(to_adapt).selectable.c
self._aliased_class_pool.append(to_adapt)
return self._aliased_class_pool[idx]
def _generate_row_adapter(
self,
context,
entity,
path,
loadopt,
adapter,
column_collection,
parentmapper,
chained_from_outerjoin,
):
with_poly_entity = path.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_entity:
to_adapt = with_poly_entity
else:
to_adapt = self._gen_pooled_aliased_class(context)
clauses = inspect(to_adapt)._memo(
("joinedloader_ormadapter", self),
orm_util.ORMAdapter,
to_adapt,
equivalents=self.mapper._equivalent_columns,
adapt_required=True,
allow_label_resolve=False,
anonymize_labels=True,
)
assert clauses.aliased_class is not None
if self.parent_property.uselist:
context.multi_row_eager_loaders = True
innerjoin = (
loadopt.local_opts.get("innerjoin", self.parent_property.innerjoin)
if loadopt is not None
else self.parent_property.innerjoin
)
if not innerjoin:
# if this is an outer join, all non-nested eager joins from
# this path must also be outer joins
chained_from_outerjoin = True
context.create_eager_joins.append(
(
self._create_eager_join,
entity,
path,
adapter,
parentmapper,
clauses,
innerjoin,
chained_from_outerjoin,
)
)
add_to_collection = context.secondary_columns
path.set(context.attributes, "eager_row_processor", clauses)
return clauses, adapter, add_to_collection, chained_from_outerjoin
def _create_eager_join(
self,
context,
query_entity,
path,
adapter,
parentmapper,
clauses,
innerjoin,
chained_from_outerjoin,
):
if parentmapper is None:
localparent = query_entity.mapper
else:
localparent = parentmapper
# whether or not the Query will wrap the selectable in a subquery,
# and then attach eager load joins to that (i.e., in the case of
# LIMIT/OFFSET etc.)
should_nest_selectable = (
context.multi_row_eager_loaders
and context.query._should_nest_selectable
)
query_entity_key = None
if (
query_entity not in context.eager_joins
and not should_nest_selectable
and context.from_clause
):
indexes = sql_util.find_left_clause_that_matches_given(
context.from_clause, query_entity.selectable
)
if len(indexes) > 1:
# for the eager load case, I can't reproduce this right
# now. For query.join() I can.
raise sa_exc.InvalidRequestError(
"Can't identify which query entity in which to joined "
"eager load from. Please use an exact match when "
"specifying the join path."
)
if indexes:
clause = context.from_clause[indexes[0]]
# join to an existing FROM clause on the query.
# key it to its list index in the eager_joins dict.
# Query._compile_context will adapt as needed and
# append to the FROM clause of the select().
query_entity_key, default_towrap = indexes[0], clause
if query_entity_key is None:
query_entity_key, default_towrap = (
query_entity,
query_entity.selectable,
)
towrap = context.eager_joins.setdefault(
query_entity_key, default_towrap
)
if adapter:
if getattr(adapter, "aliased_class", None):
# joining from an adapted entity. The adapted entity
# might be a "with_polymorphic", so resolve that to our
# specific mapper's entity before looking for our attribute
# name on it.
efm = inspect(adapter.aliased_class)._entity_for_mapper(
localparent
if localparent.isa(self.parent)
else self.parent
)
# look for our attribute on the adapted entity, else fall back
# to our straight property
onclause = getattr(efm.entity, self.key, self.parent_property)
else:
onclause = getattr(
orm_util.AliasedClass(
self.parent, adapter.selectable, use_mapper_path=True
),
self.key,
self.parent_property,
)
else:
onclause = self.parent_property
assert clauses.aliased_class is not None
attach_on_outside = (
not chained_from_outerjoin
or not innerjoin
or innerjoin == "unnested"
or query_entity.entity_zero.represents_outer_join
)
if attach_on_outside:
# this is the "classic" eager join case.
eagerjoin = orm_util._ORMJoin(
towrap,
clauses.aliased_class,
onclause,
isouter=not innerjoin
or query_entity.entity_zero.represents_outer_join
or (chained_from_outerjoin and isinstance(towrap, sql.Join)),
_left_memo=self.parent,
_right_memo=self.mapper,
)
else:
# all other cases are innerjoin=='nested' approach
eagerjoin = self._splice_nested_inner_join(
path, towrap, clauses, onclause
)
context.eager_joins[query_entity_key] = eagerjoin
# send a hint to the Query as to where it may "splice" this join
eagerjoin.stop_on = query_entity.selectable
if not parentmapper:
# for parentclause that is the non-eager end of the join,
# ensure all the parent cols in the primaryjoin are actually
# in the
# columns clause (i.e. are not deferred), so that aliasing applied
# by the Query propagates those columns outward.
# This has the effect
# of "undefering" those columns.
for col in sql_util._find_columns(
self.parent_property.primaryjoin
):
if localparent.persist_selectable.c.contains_column(col):
if adapter:
col = adapter.columns[col]
context.primary_columns.append(col)
if self.parent_property.order_by:
context.eager_order_by += (
eagerjoin._target_adapter.copy_and_process
)(util.to_list(self.parent_property.order_by))
def _splice_nested_inner_join(
self, path, join_obj, clauses, onclause, splicing=False
):
if splicing is False:
# first call is always handed a join object
# from the outside
assert isinstance(join_obj, orm_util._ORMJoin)
elif isinstance(join_obj, sql.selectable.FromGrouping):
return self._splice_nested_inner_join(
path, join_obj.element, clauses, onclause, splicing
)
elif not isinstance(join_obj, orm_util._ORMJoin):
if path[-2] is splicing:
return orm_util._ORMJoin(
join_obj,
clauses.aliased_class,
onclause,
isouter=False,
_left_memo=splicing,
_right_memo=path[-1].mapper,
)
else:
# only here if splicing == True
return None
target_join = self._splice_nested_inner_join(
path, join_obj.right, clauses, onclause, join_obj._right_memo
)
if target_join is None:
right_splice = False
target_join = self._splice_nested_inner_join(
path, join_obj.left, clauses, onclause, join_obj._left_memo
)
if target_join is None:
# should only return None when recursively called,
# e.g. splicing==True
assert (
splicing is not False
), "assertion failed attempting to produce joined eager loads"
return None
else:
right_splice = True
if right_splice:
# for a right splice, attempt to flatten out
# a JOIN b JOIN c JOIN .. to avoid needless
# parenthesis nesting
if not join_obj.isouter and not target_join.isouter:
eagerjoin = join_obj._splice_into_center(target_join)
else:
eagerjoin = orm_util._ORMJoin(
join_obj.left,
target_join,
join_obj.onclause,
isouter=join_obj.isouter,
_left_memo=join_obj._left_memo,
)
else:
eagerjoin = orm_util._ORMJoin(
target_join,
join_obj.right,
join_obj.onclause,
isouter=join_obj.isouter,
_right_memo=join_obj._right_memo,
)
eagerjoin._target_adapter = target_join._target_adapter
return eagerjoin
def _create_eager_adapter(self, context, result, adapter, path, loadopt):
user_defined_adapter = (
self._init_user_defined_eager_proc(loadopt, context)
if loadopt
else False
)
if user_defined_adapter is not False:
decorator = user_defined_adapter
# user defined eagerloads are part of the "primary"
# portion of the load.
# the adapters applied to the Query should be honored.
if context.adapter and decorator:
decorator = decorator.wrap(context.adapter)
elif context.adapter:
decorator = context.adapter
else:
decorator = path.get(context.attributes, "eager_row_processor")
if decorator is None:
return False
if self.mapper._result_has_identity_key(result, decorator):
return decorator
else:
# no identity key - don't return a row
# processor, will cause a degrade to lazy
return False
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
our_path = path[self.parent_property]
eager_adapter = self._create_eager_adapter(
context, result, adapter, our_path, loadopt
)
if eager_adapter is not False:
key = self.key
_instance = loading._instance_processor(
self.mapper,
context,
result,
our_path[self.entity],
eager_adapter,
)
if not self.uselist:
self._create_scalar_loader(context, key, _instance, populators)
else:
self._create_collection_loader(
context, key, _instance, populators
)
else:
self.parent_property._get_strategy(
(("lazy", "select"),)
).create_row_processor(
context, path, loadopt, mapper, result, adapter, populators
)
def _create_collection_loader(self, context, key, _instance, populators):
def load_collection_from_joined_new_row(state, dict_, row):
collection = attributes.init_state_collection(state, dict_, key)
result_list = util.UniqueAppender(
collection, "append_without_event"
)
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_existing_row(state, dict_, row):
if (state, key) in context.attributes:
result_list = context.attributes[(state, key)]
else:
# appender_key can be absent from context.attributes
# with isnew=False when self-referential eager loading
# is used; the same instance may be present in two
# distinct sets of result columns
collection = attributes.init_state_collection(
state, dict_, key
)
result_list = util.UniqueAppender(
collection, "append_without_event"
)
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append(
(self.key, load_collection_from_joined_new_row)
)
populators["existing"].append(
(self.key, load_collection_from_joined_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_collection_from_joined_exec)
)
def _create_scalar_loader(self, context, key, _instance, populators):
def load_scalar_from_joined_new_row(state, dict_, row):
# set a scalar object instance directly on the parent
# object, bypassing InstrumentedAttribute event handlers.
dict_[key] = _instance(row)
def load_scalar_from_joined_existing_row(state, dict_, row):
# call _instance on the row, even though the object has
# been created, so that we further descend into properties
existing = _instance(row)
# conflicting value already loaded, this shouldn't happen
if key in dict_:
if existing is not dict_[key]:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self
)
else:
# this case is when one row has multiple loads of the
# same entity (e.g. via aliasing), one has an attribute
# that the other doesn't.
dict_[key] = existing
def load_scalar_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append((self.key, load_scalar_from_joined_new_row))
populators["existing"].append(
(self.key, load_scalar_from_joined_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_scalar_from_joined_exec)
)
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="selectin")
class SelectInLoader(AbstractRelationshipLoader, util.MemoizedSlots):
__slots__ = (
"join_depth",
"omit_join",
"_parent_alias",
"_query_info",
"_fallback_query_info",
"_bakery",
)
query_info = collections.namedtuple(
"queryinfo",
[
"load_only_child",
"load_with_join",
"in_expr",
"pk_cols",
"zero_idx",
"child_lookup_cols",
],
)
_chunksize = 500
def __init__(self, parent, strategy_key):
super(SelectInLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
is_m2o = self.parent_property.direction is interfaces.MANYTOONE
if self.parent_property.omit_join is not None:
self.omit_join = self.parent_property.omit_join
else:
lazyloader = self.parent_property._get_strategy(
(("lazy", "select"),)
)
if is_m2o:
self.omit_join = lazyloader.use_get
else:
self.omit_join = self.parent._get_clause[0].compare(
lazyloader._rev_lazywhere,
use_proxies=True,
equivalents=self.parent._equivalent_columns,
)
if self.omit_join:
if is_m2o:
self._query_info = self._init_for_omit_join_m2o()
self._fallback_query_info = self._init_for_join()
else:
self._query_info = self._init_for_omit_join()
else:
self._query_info = self._init_for_join()
def _init_for_omit_join(self):
pk_to_fk = dict(
self.parent_property._join_condition.local_remote_pairs
)
pk_to_fk.update(
(equiv, pk_to_fk[k])
for k in list(pk_to_fk)
for equiv in self.parent._equivalent_columns.get(k, ())
)
pk_cols = fk_cols = [
pk_to_fk[col] for col in self.parent.primary_key if col in pk_to_fk
]
if len(fk_cols) > 1:
in_expr = sql.tuple_(*fk_cols)
zero_idx = False
else:
in_expr = fk_cols[0]
zero_idx = True
return self.query_info(False, False, in_expr, pk_cols, zero_idx, None)
def _init_for_omit_join_m2o(self):
pk_cols = self.mapper.primary_key
if len(pk_cols) > 1:
in_expr = sql.tuple_(*pk_cols)
zero_idx = False
else:
in_expr = pk_cols[0]
zero_idx = True
lazyloader = self.parent_property._get_strategy((("lazy", "select"),))
lookup_cols = [lazyloader._equated_columns[pk] for pk in pk_cols]
return self.query_info(
True, False, in_expr, pk_cols, zero_idx, lookup_cols
)
def _init_for_join(self):
self._parent_alias = aliased(self.parent.class_)
pa_insp = inspect(self._parent_alias)
pk_cols = [
pa_insp._adapt_element(col) for col in self.parent.primary_key
]
if len(pk_cols) > 1:
in_expr = sql.tuple_(*pk_cols)
zero_idx = False
else:
in_expr = pk_cols[0]
zero_idx = True
return self.query_info(False, True, in_expr, pk_cols, zero_idx, None)
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
@util.dependencies("sqlalchemy.ext.baked")
def _memoized_attr__bakery(self, baked):
return baked.bakery(size=50)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
selectin_path = (
context.query._current_path or orm_util.PathRegistry.root
) + path
if not orm_util._entity_isa(path[-1], self.parent):
return
if loading.PostLoad.path_exists(
context, selectin_path, self.parent_property
):
return
path_w_prop = path[self.parent_property]
selectin_path_w_prop = selectin_path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_entity = path_w_prop.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
effective_entity = with_poly_entity
else:
effective_entity = self.entity
if not path_w_prop.contains(context.attributes, "loader"):
if self.join_depth:
if selectin_path_w_prop.length / 2 > self.join_depth:
return
elif selectin_path_w_prop.contains_mapper(self.mapper):
return
loading.PostLoad.callable_for_path(
context,
selectin_path,
self.parent,
self.parent_property,
self._load_for_path,
effective_entity,
)
@util.dependencies("sqlalchemy.ext.baked")
def _load_for_path(
self, baked, context, path, states, load_only, effective_entity
):
if load_only and self.key not in load_only:
return
query_info = self._query_info
if query_info.load_only_child:
our_states = collections.defaultdict(list)
none_states = []
mapper = self.parent
for state, overwrite in states:
state_dict = state.dict
related_ident = tuple(
mapper._get_state_attr_by_column(
state,
state_dict,
lk,
passive=attributes.PASSIVE_NO_FETCH,
)
for lk in query_info.child_lookup_cols
)
# if the loaded parent objects do not have the foreign key
# to the related item loaded, then degrade into the joined
# version of selectinload
if attributes.PASSIVE_NO_RESULT in related_ident:
query_info = self._fallback_query_info
break
# organize states into lists keyed to particular foreign
# key values.
if None not in related_ident:
our_states[related_ident].append(
(state, state_dict, overwrite)
)
else:
# For FK values that have None, add them to a
# separate collection that will be populated separately
none_states.append((state, state_dict, overwrite))
# note the above conditional may have changed query_info
if not query_info.load_only_child:
our_states = [
(state.key[1], state, state.dict, overwrite)
for state, overwrite in states
]
pk_cols = query_info.pk_cols
in_expr = query_info.in_expr
if not query_info.load_with_join:
# in "omit join" mode, the primary key column and the
# "in" expression are in terms of the related entity. So
# if the related entity is polymorphic or otherwise aliased,
# we need to adapt our "pk_cols" and "in_expr" to that
# entity. in non-"omit join" mode, these are against the
# parent entity and do not need adaption.
insp = inspect(effective_entity)
if insp.is_aliased_class:
pk_cols = [insp._adapt_element(col) for col in pk_cols]
in_expr = insp._adapt_element(in_expr)
pk_cols = [insp._adapt_element(col) for col in pk_cols]
q = self._bakery(
lambda session: session.query(
query.Bundle("pk", *pk_cols), effective_entity
),
self,
)
if not query_info.load_with_join:
# the Bundle we have in the "omit_join" case is against raw, non
# annotated columns, so to ensure the Query knows its primary
# entity, we add it explicitly. If we made the Bundle against
# annotated columns, we hit a performance issue in this specific
# case, which is detailed in issue #4347.
q.add_criteria(lambda q: q.select_from(effective_entity))
else:
# in the non-omit_join case, the Bundle is against the annotated/
# mapped column of the parent entity, but the #4347 issue does not
# occur in this case.
pa = self._parent_alias
q.add_criteria(
lambda q: q.select_from(pa).join(
getattr(pa, self.parent_property.key).of_type(
effective_entity
)
)
)
if query_info.load_only_child:
q.add_criteria(
lambda q: q.filter(
in_expr.in_(sql.bindparam("primary_keys", expanding=True))
)
)
else:
q.add_criteria(
lambda q: q.filter(
in_expr.in_(sql.bindparam("primary_keys", expanding=True))
)
)
orig_query = context.query
q._add_lazyload_options(
orig_query._with_options, path[self.parent_property]
)
if orig_query._populate_existing:
q.add_criteria(lambda q: q.populate_existing())
if self.parent_property.order_by:
if not query_info.load_with_join:
eager_order_by = self.parent_property.order_by
if insp.is_aliased_class:
eager_order_by = [
insp._adapt_element(elem) for elem in eager_order_by
]
q.add_criteria(lambda q: q.order_by(*eager_order_by))
else:
def _setup_outermost_orderby(q):
# imitate the same method that subquery eager loading uses,
# looking for the adapted "secondary" table
eagerjoin = q._from_obj[0]
return q.order_by(
*eagerjoin._target_adapter.copy_and_process(
util.to_list(self.parent_property.order_by)
)
)
q.add_criteria(_setup_outermost_orderby)
if query_info.load_only_child:
self._load_via_child(
our_states, none_states, query_info, q, context
)
else:
self._load_via_parent(our_states, query_info, q, context)
def _load_via_child(self, our_states, none_states, query_info, q, context):
uselist = self.uselist
# this sort is really for the benefit of the unit tests
our_keys = sorted(our_states)
while our_keys:
chunk = our_keys[0 : self._chunksize]
our_keys = our_keys[self._chunksize :]
data = {
k: v
for k, v in q(context.session).params(
primary_keys=[
key[0] if query_info.zero_idx else key for key in chunk
]
)
}
for key in chunk:
# for a real foreign key and no concurrent changes to the
# DB while running this method, "key" is always present in
# data. However, for primaryjoins without real foreign keys
# a non-None primaryjoin condition may still refer to no
# related object.
related_obj = data.get(key, None)
for state, dict_, overwrite in our_states[key]:
if not overwrite and self.key in dict_:
continue
state.get_impl(self.key).set_committed_value(
state,
dict_,
related_obj if not uselist else [related_obj],
)
# populate none states with empty value / collection
for state, dict_, overwrite in none_states:
if not overwrite and self.key in dict_:
continue
# note it's OK if this is a uselist=True attribute, the empty
# collection will be populated
state.get_impl(self.key).set_committed_value(state, dict_, None)
def _load_via_parent(self, our_states, query_info, q, context):
uselist = self.uselist
_empty_result = () if uselist else None
while our_states:
chunk = our_states[0 : self._chunksize]
our_states = our_states[self._chunksize :]
primary_keys = [
key[0] if query_info.zero_idx else key
for key, state, state_dict, overwrite in chunk
]
data = collections.defaultdict(list)
for k, v in itertools.groupby(
q(context.session).params(primary_keys=primary_keys),
lambda x: x[0],
):
data[k].extend(vv[1] for vv in v)
for key, state, state_dict, overwrite in chunk:
if not overwrite and self.key in state_dict:
continue
collection = data.get(key, _empty_result)
if not uselist and collection:
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded "
"attribute '%s' " % self
)
state.get_impl(self.key).set_committed_value(
state, state_dict, collection[0]
)
else:
# note that empty tuple set on uselist=False sets the
# value to None
state.get_impl(self.key).set_committed_value(
state, state_dict, collection
)
def single_parent_validator(desc, prop):
def _do_check(state, value, oldvalue, initiator):
if value is not None and initiator.key == prop.key:
hasparent = initiator.hasparent(attributes.instance_state(value))
if hasparent and oldvalue is not value:
raise sa_exc.InvalidRequestError(
"Instance %s is already associated with an instance "
"of %s via its %s attribute, and is only allowed a "
"single parent."
% (orm_util.instance_str(value), state.class_, prop),
code="bbf1",
)
return value
def append(state, value, initiator):
return _do_check(state, value, None, initiator)
def set_(state, value, oldvalue, initiator):
return _do_check(state, value, oldvalue, initiator)
event.listen(
desc, "append", append, raw=True, retval=True, active_history=True
)
event.listen(desc, "set", set_, raw=True, retval=True, active_history=True)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/strategy_options.py
|
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
"""
from . import util as orm_util
from .attributes import QueryableAttribute
from .base import _class_to_mapper
from .base import _is_aliased_class
from .base import _is_mapped_class
from .base import InspectionAttr
from .interfaces import MapperOption
from .interfaces import PropComparator
from .path_registry import _DEFAULT_TOKEN
from .path_registry import _WILDCARD_TOKEN
from .path_registry import PathRegistry
from .path_registry import TokenRegistry
from .util import _orm_full_deannotate
from .. import exc as sa_exc
from .. import inspect
from .. import util
from ..sql import expression as sql_expr
from ..sql.base import _generative
from ..sql.base import Generative
class Load(Generative, MapperOption):
"""Represents loader options which modify the state of a
:class:`_query.Query` in order to affect how various mapped attributes are
loaded.
The :class:`_orm.Load` object is in most cases used implicitly behind the
scenes when one makes use of a query option like :func:`_orm.joinedload`,
:func:`.defer`, or similar. However, the :class:`_orm.Load` object
can also be used directly, and in some cases can be useful.
To use :class:`_orm.Load` directly, instantiate it with the target mapped
class as the argument. This style of usage is
useful when dealing with a :class:`_query.Query`
that has multiple entities::
myopt = Load(MyClass).joinedload("widgets")
The above ``myopt`` can now be used with :meth:`_query.Query.options`,
where it
will only take effect for the ``MyClass`` entity::
session.query(MyClass, MyOtherClass).options(myopt)
One case where :class:`_orm.Load`
is useful as public API is when specifying
"wildcard" options that only take effect for a certain class::
session.query(Order).options(Load(Order).lazyload('*'))
Above, all relationships on ``Order`` will be lazy-loaded, but other
attributes on those descendant objects will load using their normal
loader strategy.
.. seealso::
:ref:`deferred_options`
:ref:`deferred_loading_w_multiple`
:ref:`relationship_loader_options`
"""
def __init__(self, entity):
insp = inspect(entity)
self.path = insp._path_registry
# note that this .context is shared among all descendant
# Load objects
self.context = util.OrderedDict()
self.local_opts = {}
self.is_class_strategy = False
@classmethod
def for_existing_path(cls, path):
load = cls.__new__(cls)
load.path = path
load.context = {}
load.local_opts = {}
load._of_type = None
return load
def _generate_cache_key(self, path):
if path.path[0].is_aliased_class:
return False
serialized = []
for (key, loader_path), obj in self.context.items():
if key != "loader":
continue
for local_elem, obj_elem in zip(self.path.path, loader_path):
if local_elem is not obj_elem:
break
else:
endpoint = obj._of_type or obj.path.path[-1]
chopped = self._chop_path(loader_path, path)
if (
# means loader_path and path are unrelated,
# this does not need to be part of a cache key
chopped
is None
) or (
# means no additional path with loader_path + path
# and the endpoint isn't using of_type so isn't modified
# into an alias or other unsafe entity
not chopped
and not obj._of_type
):
continue
serialized_path = []
for token in chopped:
if isinstance(token, util.string_types):
serialized_path.append(token)
elif token.is_aliased_class:
return False
elif token.is_property:
serialized_path.append(token.key)
else:
assert token.is_mapper
serialized_path.append(token.class_)
if not serialized_path or endpoint != serialized_path[-1]:
if endpoint.is_mapper:
serialized_path.append(endpoint.class_)
elif endpoint.is_aliased_class:
return False
serialized.append(
(
tuple(serialized_path)
+ (obj.strategy or ())
+ (
tuple(
[
(key, obj.local_opts[key])
for key in sorted(obj.local_opts)
]
)
if obj.local_opts
else ()
)
)
)
if not serialized:
return None
else:
return tuple(serialized)
def _generate(self):
cloned = super(Load, self)._generate()
cloned.local_opts = {}
return cloned
is_opts_only = False
is_class_strategy = False
strategy = None
propagate_to_loaders = False
_of_type = None
def process_query(self, query):
self._process(query, True)
def process_query_conditionally(self, query):
self._process(query, False)
def _process(self, query, raiseerr):
current_path = query._current_path
if current_path:
for (token, start_path), loader in self.context.items():
chopped_start_path = self._chop_path(start_path, current_path)
if chopped_start_path is not None:
query._attributes[(token, chopped_start_path)] = loader
else:
query._attributes.update(self.context)
def _generate_path(
self, path, attr, for_strategy, wildcard_key, raiseerr=True
):
existing_of_type = self._of_type
self._of_type = None
if raiseerr and not path.has_entity:
if isinstance(path, TokenRegistry):
raise sa_exc.ArgumentError(
"Wildcard token cannot be followed by another entity"
)
else:
raise sa_exc.ArgumentError(
"Mapped attribute '%s' does not "
"refer to a mapped entity" % (path.prop,)
)
if isinstance(attr, util.string_types):
default_token = attr.endswith(_DEFAULT_TOKEN)
if attr.endswith(_WILDCARD_TOKEN) or default_token:
if default_token:
self.propagate_to_loaders = False
if wildcard_key:
attr = "%s:%s" % (wildcard_key, attr)
# TODO: AliasedInsp inside the path for of_type is not
# working for a with_polymorphic entity because the
# relationship loaders don't render the with_poly into the
# path. See #4469 which will try to improve this
if existing_of_type and not existing_of_type.is_aliased_class:
path = path.parent[existing_of_type]
path = path.token(attr)
self.path = path
return path
if existing_of_type:
ent = inspect(existing_of_type)
else:
ent = path.entity
try:
# use getattr on the class to work around
# synonyms, hybrids, etc.
attr = getattr(ent.class_, attr)
except AttributeError as err:
if raiseerr:
util.raise_(
sa_exc.ArgumentError(
'Can\'t find property named "%s" on '
"%s in this Query." % (attr, ent)
),
replace_context=err,
)
else:
return None
else:
attr = found_property = attr.property
path = path[attr]
elif _is_mapped_class(attr):
# TODO: this does not appear to be a valid codepath. "attr"
# would never be a mapper. This block is present in 1.2
# as well however does not seem to be accessed in any tests.
if not orm_util._entity_corresponds_to_use_path_impl(
attr.parent, path[-1]
):
if raiseerr:
raise sa_exc.ArgumentError(
"Attribute '%s' does not "
"link from element '%s'" % (attr, path.entity)
)
else:
return None
else:
prop = found_property = attr.property
if not orm_util._entity_corresponds_to_use_path_impl(
attr.parent, path[-1]
):
if raiseerr:
raise sa_exc.ArgumentError(
'Attribute "%s" does not '
'link from element "%s".%s'
% (
attr,
path.entity,
(
" Did you mean to use "
"%s.of_type(%s)?"
% (path[-2], attr.class_.__name__)
if len(path) > 1
and path.entity.is_mapper
and attr.parent.is_aliased_class
else ""
),
)
)
else:
return None
if getattr(attr, "_of_type", None):
ac = attr._of_type
ext_info = of_type_info = inspect(ac)
existing = path.entity_path[prop].get(
self.context, "path_with_polymorphic"
)
if not ext_info.is_aliased_class:
ac = orm_util.with_polymorphic(
ext_info.mapper.base_mapper,
ext_info.mapper,
aliased=True,
_use_mapper_path=True,
_existing_alias=inspect(existing)
if existing is not None
else None,
)
ext_info = inspect(ac)
path.entity_path[prop].set(
self.context, "path_with_polymorphic", ac
)
path = path[prop][ext_info]
self._of_type = of_type_info
else:
path = path[prop]
if for_strategy is not None:
found_property._get_strategy(for_strategy)
if path.has_entity:
path = path.entity_path
self.path = path
return path
def __str__(self):
return "Load(strategy=%r)" % (self.strategy,)
def _coerce_strat(self, strategy):
if strategy is not None:
strategy = tuple(sorted(strategy.items()))
return strategy
def _apply_to_parent(self, parent, applied, bound):
raise NotImplementedError(
"Only 'unbound' loader options may be used with the "
"Load.options() method"
)
@_generative
def options(self, *opts):
r"""Apply a series of options as sub-options to this
:class:`_orm.Load`
object.
E.g.::
query = session.query(Author)
query = query.options(
joinedload(Author.book).options(
load_only("summary", "excerpt"),
joinedload(Book.citations).options(
joinedload(Citation.author)
)
)
)
:param \*opts: A series of loader option objects (ultimately
:class:`_orm.Load` objects) which should be applied to the path
specified by this :class:`_orm.Load` object.
.. versionadded:: 1.3.6
.. seealso::
:func:`.defaultload`
:ref:`relationship_loader_options`
:ref:`deferred_loading_w_multiple`
"""
apply_cache = {}
bound = not isinstance(self, _UnboundLoad)
if bound:
raise NotImplementedError(
"The options() method is currently only supported "
"for 'unbound' loader options"
)
for opt in opts:
opt._apply_to_parent(self, apply_cache, bound)
@_generative
def set_relationship_strategy(
self, attr, strategy, propagate_to_loaders=True
):
strategy = self._coerce_strat(strategy)
self.propagate_to_loaders = propagate_to_loaders
cloned = self._clone_for_bind_strategy(attr, strategy, "relationship")
self.path = cloned.path
self._of_type = cloned._of_type
cloned.is_class_strategy = self.is_class_strategy = False
self.propagate_to_loaders = cloned.propagate_to_loaders
@_generative
def set_column_strategy(self, attrs, strategy, opts=None, opts_only=False):
strategy = self._coerce_strat(strategy)
self.is_class_strategy = False
for attr in attrs:
cloned = self._clone_for_bind_strategy(
attr, strategy, "column", opts_only=opts_only, opts=opts
)
cloned.propagate_to_loaders = True
@_generative
def set_generic_strategy(self, attrs, strategy):
strategy = self._coerce_strat(strategy)
for attr in attrs:
cloned = self._clone_for_bind_strategy(attr, strategy, None)
cloned.propagate_to_loaders = True
@_generative
def set_class_strategy(self, strategy, opts):
strategy = self._coerce_strat(strategy)
cloned = self._clone_for_bind_strategy(None, strategy, None)
cloned.is_class_strategy = True
cloned.propagate_to_loaders = True
cloned.local_opts.update(opts)
def _clone_for_bind_strategy(
self, attr, strategy, wildcard_key, opts_only=False, opts=None
):
"""Create an anonymous clone of the Load/_UnboundLoad that is suitable
to be placed in the context / _to_bind collection of this Load
object. The clone will then lose references to context/_to_bind
in order to not create reference cycles.
"""
cloned = self._generate()
cloned._generate_path(self.path, attr, strategy, wildcard_key)
cloned.strategy = strategy
cloned.local_opts = self.local_opts
if opts:
cloned.local_opts.update(opts)
if opts_only:
cloned.is_opts_only = True
if strategy or cloned.is_opts_only:
cloned._set_path_strategy()
return cloned
def _set_for_path(self, context, path, replace=True, merge_opts=False):
if merge_opts or not replace:
existing = path.get(self.context, "loader")
if existing:
if merge_opts:
existing.local_opts.update(self.local_opts)
else:
path.set(context, "loader", self)
else:
existing = path.get(self.context, "loader")
path.set(context, "loader", self)
if existing and existing.is_opts_only:
self.local_opts.update(existing.local_opts)
def _set_path_strategy(self):
if not self.is_class_strategy and self.path.has_entity:
effective_path = self.path.parent
else:
effective_path = self.path
if effective_path.is_token:
for path in effective_path.generate_for_superclasses():
self._set_for_path(
self.context,
path,
replace=True,
merge_opts=self.is_opts_only,
)
else:
self._set_for_path(
self.context,
effective_path,
replace=True,
merge_opts=self.is_opts_only,
)
# remove cycles; _set_path_strategy is always invoked on an
# anonymous clone of the Load / UnboundLoad object since #5056
self.context = None
def __getstate__(self):
d = self.__dict__.copy()
if d["context"] is not None:
d["context"] = PathRegistry.serialize_context_dict(
d["context"], ("loader",)
)
d["path"] = self.path.serialize()
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.path = PathRegistry.deserialize(self.path)
if self.context is not None:
self.context = PathRegistry.deserialize_context_dict(self.context)
def _chop_path(self, to_chop, path):
i = -1
for i, (c_token, p_token) in enumerate(zip(to_chop, path.path)):
if isinstance(c_token, util.string_types):
# TODO: this is approximated from the _UnboundLoad
# version and probably has issues, not fully covered.
if i == 0 and c_token.endswith(":" + _DEFAULT_TOKEN):
return to_chop
elif (
c_token != "relationship:%s" % (_WILDCARD_TOKEN,)
and c_token != p_token.key
):
return None
if c_token is p_token:
continue
elif (
isinstance(c_token, InspectionAttr)
and c_token.is_mapper
and p_token.is_mapper
and c_token.isa(p_token)
):
continue
else:
return None
return to_chop[i + 1 :]
class _UnboundLoad(Load):
"""Represent a loader option that isn't tied to a root entity.
The loader option will produce an entity-linked :class:`_orm.Load`
object when it is passed :meth:`_query.Query.options`.
This provides compatibility with the traditional system
of freestanding options, e.g. ``joinedload('x.y.z')``.
"""
def __init__(self):
self.path = ()
self._to_bind = []
self.local_opts = {}
_is_chain_link = False
def _generate_cache_key(self, path):
serialized = ()
for val in self._to_bind:
for local_elem, val_elem in zip(self.path, val.path):
if local_elem is not val_elem:
break
else:
opt = val._bind_loader([path.path[0]], None, None, False)
if opt:
c_key = opt._generate_cache_key(path)
if c_key is False:
return False
elif c_key:
serialized += c_key
if not serialized:
return None
else:
return serialized
def _set_path_strategy(self):
self._to_bind.append(self)
# remove cycles; _set_path_strategy is always invoked on an
# anonymous clone of the Load / UnboundLoad object since #5056
self._to_bind = None
def _apply_to_parent(self, parent, applied, bound, to_bind=None):
if self in applied:
return applied[self]
if to_bind is None:
to_bind = self._to_bind
cloned = self._generate()
applied[self] = cloned
cloned.strategy = self.strategy
if self.path:
attr = self.path[-1]
if isinstance(attr, util.string_types) and attr.endswith(
_DEFAULT_TOKEN
):
attr = attr.split(":")[0] + ":" + _WILDCARD_TOKEN
cloned._generate_path(
parent.path + self.path[0:-1], attr, self.strategy, None
)
# these assertions can go away once the "sub options" API is
# mature
assert cloned.propagate_to_loaders == self.propagate_to_loaders
assert cloned.is_class_strategy == self.is_class_strategy
assert cloned.is_opts_only == self.is_opts_only
new_to_bind = {
elem._apply_to_parent(parent, applied, bound, to_bind)
for elem in to_bind
}
cloned._to_bind = parent._to_bind
cloned._to_bind.extend(new_to_bind)
cloned.local_opts.update(self.local_opts)
return cloned
def _generate_path(self, path, attr, for_strategy, wildcard_key):
if (
wildcard_key
and isinstance(attr, util.string_types)
and attr in (_WILDCARD_TOKEN, _DEFAULT_TOKEN)
):
if attr == _DEFAULT_TOKEN:
self.propagate_to_loaders = False
attr = "%s:%s" % (wildcard_key, attr)
if path and _is_mapped_class(path[-1]) and not self.is_class_strategy:
path = path[0:-1]
if attr:
path = path + (attr,)
self.path = path
return path
def __getstate__(self):
d = self.__dict__.copy()
d["path"] = self._serialize_path(self.path, filter_aliased_class=True)
return d
def __setstate__(self, state):
ret = []
for key in state["path"]:
if isinstance(key, tuple):
if len(key) == 2:
# support legacy
cls, propkey = key
of_type = None
else:
cls, propkey, of_type = key
prop = getattr(cls, propkey)
if of_type:
prop = prop.of_type(of_type)
ret.append(prop)
else:
ret.append(key)
state["path"] = tuple(ret)
self.__dict__ = state
def _process(self, query, raiseerr):
dedupes = query._attributes["_unbound_load_dedupes"]
for val in self._to_bind:
if val not in dedupes:
dedupes.add(val)
val._bind_loader(
[ent.entity_zero for ent in query._mapper_entities],
query._current_path,
query._attributes,
raiseerr,
)
@classmethod
def _from_keys(cls, meth, keys, chained, kw):
opt = _UnboundLoad()
def _split_key(key):
if isinstance(key, util.string_types):
# coerce fooload('*') into "default loader strategy"
if key == _WILDCARD_TOKEN:
return (_DEFAULT_TOKEN,)
# coerce fooload(".*") into "wildcard on default entity"
elif key.startswith("." + _WILDCARD_TOKEN):
key = key[1:]
return key.split(".")
else:
return (key,)
all_tokens = [token for key in keys for token in _split_key(key)]
for token in all_tokens[0:-1]:
# set _is_chain_link first so that clones of the
# object also inherit this flag
opt._is_chain_link = True
if chained:
opt = meth(opt, token, **kw)
else:
opt = opt.defaultload(token)
opt = meth(opt, all_tokens[-1], **kw)
opt._is_chain_link = False
return opt
def _chop_path(self, to_chop, path):
i = -1
for i, (c_token, (p_entity, p_prop)) in enumerate(
zip(to_chop, path.pairs())
):
if isinstance(c_token, util.string_types):
if i == 0 and c_token.endswith(":" + _DEFAULT_TOKEN):
return to_chop
elif (
c_token != "relationship:%s" % (_WILDCARD_TOKEN,)
and c_token != p_prop.key
):
return None
elif isinstance(c_token, PropComparator):
if c_token.property is not p_prop or (
c_token._parententity is not p_entity
and (
not c_token._parententity.is_mapper
or not c_token._parententity.isa(p_entity)
)
):
return None
else:
i += 1
return to_chop[i:]
def _serialize_path(self, path, filter_aliased_class=False):
ret = []
for token in path:
if isinstance(token, QueryableAttribute):
if (
filter_aliased_class
and token._of_type
and inspect(token._of_type).is_aliased_class
):
ret.append((token._parentmapper.class_, token.key, None))
else:
ret.append(
(token._parentmapper.class_, token.key, token._of_type)
)
elif isinstance(token, PropComparator):
ret.append((token._parentmapper.class_, token.key, None))
else:
ret.append(token)
return ret
def _bind_loader(self, entities, current_path, context, raiseerr):
"""Convert from an _UnboundLoad() object into a Load() object.
The _UnboundLoad() uses an informal "path" and does not necessarily
refer to a lead entity as it may use string tokens. The Load()
OTOH refers to a complete path. This method reconciles from a
given Query into a Load.
Example::
query = session.query(User).options(
joinedload("orders").joinedload("items"))
The above options will be an _UnboundLoad object along the lines
of (note this is not the exact API of _UnboundLoad)::
_UnboundLoad(
_to_bind=[
_UnboundLoad(["orders"], {"lazy": "joined"}),
_UnboundLoad(["orders", "items"], {"lazy": "joined"}),
]
)
After this method, we get something more like this (again this is
not exact API)::
Load(
User,
(User, User.orders.property))
Load(
User,
(User, User.orders.property, Order, Order.items.property))
"""
start_path = self.path
if self.is_class_strategy and current_path:
start_path += (entities[0],)
# _current_path implies we're in a
# secondary load with an existing path
if current_path:
start_path = self._chop_path(start_path, current_path)
if not start_path:
return None
# look at the first token and try to locate within the Query
# what entity we are referring towards.
token = start_path[0]
if isinstance(token, util.string_types):
entity = self._find_entity_basestring(entities, token, raiseerr)
elif isinstance(token, PropComparator):
prop = token.property
entity = self._find_entity_prop_comparator(
entities, prop, token._parententity, raiseerr
)
elif self.is_class_strategy and _is_mapped_class(token):
entity = inspect(token)
if entity not in entities:
entity = None
else:
raise sa_exc.ArgumentError(
"mapper option expects " "string key or list of attributes"
)
if not entity:
return
path_element = entity
# transfer our entity-less state into a Load() object
# with a real entity path. Start with the lead entity
# we just located, then go through the rest of our path
# tokens and populate into the Load().
loader = Load(path_element)
if context is not None:
loader.context = context
else:
context = loader.context
loader.strategy = self.strategy
loader.is_opts_only = self.is_opts_only
loader.is_class_strategy = self.is_class_strategy
path = loader.path
if not loader.is_class_strategy:
for idx, token in enumerate(start_path):
if not loader._generate_path(
loader.path,
token,
self.strategy if idx == len(start_path) - 1 else None,
None,
raiseerr,
):
return
loader.local_opts.update(self.local_opts)
if not loader.is_class_strategy and loader.path.has_entity:
effective_path = loader.path.parent
else:
effective_path = loader.path
# prioritize "first class" options over those
# that were "links in the chain", e.g. "x" and "y" in
# someload("x.y.z") versus someload("x") / someload("x.y")
if effective_path.is_token:
for path in effective_path.generate_for_superclasses():
loader._set_for_path(
context,
path,
replace=not self._is_chain_link,
merge_opts=self.is_opts_only,
)
else:
loader._set_for_path(
context,
effective_path,
replace=not self._is_chain_link,
merge_opts=self.is_opts_only,
)
return loader
def _find_entity_prop_comparator(self, entities, prop, mapper, raiseerr):
if _is_aliased_class(mapper):
searchfor = mapper
else:
searchfor = _class_to_mapper(mapper)
for ent in entities:
if orm_util._entity_corresponds_to(ent, searchfor):
return ent
else:
if raiseerr:
if not list(entities):
raise sa_exc.ArgumentError(
"Query has only expression-based entities, "
'which do not apply to %s "%s"'
% (util.clsname_as_plain_name(type(prop)), prop)
)
else:
raise sa_exc.ArgumentError(
'Mapped attribute "%s" does not apply to any of the '
"root entities in this query, e.g. %s. Please "
"specify the full path "
"from one of the root entities to the target "
"attribute. "
% (prop, ", ".join(str(x) for x in entities))
)
else:
return None
def _find_entity_basestring(self, entities, token, raiseerr):
if token.endswith(":" + _WILDCARD_TOKEN):
if len(list(entities)) != 1:
if raiseerr:
raise sa_exc.ArgumentError(
"Can't apply wildcard ('*') or load_only() "
"loader option to multiple entities %s. Specify "
"loader options for each entity individually, such "
"as %s."
% (
", ".join(str(ent) for ent in entities),
", ".join(
"Load(%s).some_option('*')" % ent
for ent in entities
),
)
)
elif token.endswith(_DEFAULT_TOKEN):
raiseerr = False
for ent in entities:
# return only the first _MapperEntity when searching
# based on string prop name. Ideally object
# attributes are used to specify more exactly.
return ent
else:
if raiseerr:
raise sa_exc.ArgumentError(
"Query has only expression-based entities - "
'can\'t find property named "%s".' % (token,)
)
else:
return None
class loader_option(object):
def __init__(self):
pass
def __call__(self, fn):
self.name = name = fn.__name__
self.fn = fn
if hasattr(Load, name):
raise TypeError("Load class already has a %s method." % (name))
setattr(Load, name, fn)
return self
def _add_unbound_fn(self, fn):
self._unbound_fn = fn
fn_doc = self.fn.__doc__
self.fn.__doc__ = """Produce a new :class:`_orm.Load` object with the
:func:`_orm.%(name)s` option applied.
See :func:`_orm.%(name)s` for usage examples.
""" % {
"name": self.name
}
fn.__doc__ = fn_doc
return self
def _add_unbound_all_fn(self, fn):
fn.__doc__ = """Produce a standalone "all" option for
:func:`_orm.%(name)s`.
.. deprecated:: 0.9
The :func:`_orm.%(name)s_all` function is deprecated, and will be removed
in a future release. Please use method chaining with
:func:`_orm.%(name)s` instead, as in::
session.query(MyClass).options(
%(name)s("someattribute").%(name)s("anotherattribute")
)
""" % {
"name": self.name
}
fn = util.deprecated(
"0.9",
"The :func:`.%(name)s_all` function is deprecated, and will be "
"removed in a future release. Please use method chaining with "
":func:`.%(name)s` instead" % {"name": self.name},
add_deprecation_to_docstring=False,
)(fn)
self._unbound_all_fn = fn
return self
@loader_option()
def contains_eager(loadopt, attr, alias=None):
r"""Indicate that the given attribute should be eagerly loaded from
columns stated manually in the query.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
The option is used in conjunction with an explicit join that loads
the desired rows, i.e.::
sess.query(Order).\
join(Order.user).\
options(contains_eager(Order.user))
The above query would join from the ``Order`` entity to its related
``User`` entity, and the returned ``Order`` objects would have the
``Order.user`` attribute pre-populated.
When making use of aliases with :func:`.contains_eager`, the path
should be specified using :meth:`.PropComparator.of_type`::
user_alias = aliased(User)
sess.query(Order).\
join((user_alias, Order.user)).\
options(contains_eager(Order.user.of_type(user_alias)))
:meth:`.PropComparator.of_type` is also used to indicate a join
against specific subclasses of an inherting mapper, or
of a :func:`.with_polymorphic` construct::
# employees of a particular subtype
sess.query(Company).\
outerjoin(Company.employees.of_type(Manager)).\
options(
contains_eager(
Company.employees.of_type(Manager),
)
)
# employees of a multiple subtypes
wp = with_polymorphic(Employee, [Manager, Engineer])
sess.query(Company).\
outerjoin(Company.employees.of_type(wp)).\
options(
contains_eager(
Company.employees.of_type(wp),
)
)
The :paramref:`.contains_eager.alias` parameter is used for a similar
purpose, however the :meth:`.PropComparator.of_type` approach should work
in all cases and is more effective and explicit.
.. seealso::
:ref:`loading_toplevel`
:ref:`contains_eager`
"""
if alias is not None:
if not isinstance(alias, str):
info = inspect(alias)
alias = info.selectable
elif getattr(attr, "_of_type", None):
ot = inspect(attr._of_type)
alias = ot.selectable
cloned = loadopt.set_relationship_strategy(
attr, {"lazy": "joined"}, propagate_to_loaders=False
)
cloned.local_opts["eager_from_alias"] = alias
return cloned
@contains_eager._add_unbound_fn
def contains_eager(*keys, **kw):
return _UnboundLoad()._from_keys(
_UnboundLoad.contains_eager, keys, True, kw
)
@loader_option()
def load_only(loadopt, *attrs):
"""Indicate that for a particular entity, only the given list
of column-based attribute names should be loaded; all others will be
deferred.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
Example - given a class ``User``, load only the ``name`` and ``fullname``
attributes::
session.query(User).options(load_only("name", "fullname"))
Example - given a relationship ``User.addresses -> Address``, specify
subquery loading for the ``User.addresses`` collection, but on each
``Address`` object load only the ``email_address`` attribute::
session.query(User).options(
subqueryload("addresses").load_only("email_address")
)
For a :class:`_query.Query` that has multiple entities,
the lead entity can be
specifically referred to using the :class:`_orm.Load` constructor::
session.query(User, Address).join(User.addresses).options(
Load(User).load_only("name", "fullname"),
Load(Address).load_only("email_address")
)
.. versionadded:: 0.9.0
"""
cloned = loadopt.set_column_strategy(
attrs, {"deferred": False, "instrument": True}
)
cloned.set_column_strategy(
"*", {"deferred": True, "instrument": True}, {"undefer_pks": True}
)
return cloned
@load_only._add_unbound_fn
def load_only(*attrs):
return _UnboundLoad().load_only(*attrs)
@loader_option()
def joinedload(loadopt, attr, innerjoin=None):
"""Indicate that the given attribute should be loaded using joined
eager loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
examples::
# joined-load the "orders" collection on "User"
query(User).options(joinedload(User.orders))
# joined-load Order.items and then Item.keywords
query(Order).options(
joinedload(Order.items).joinedload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# joined-load the keywords collection
query(Order).options(
lazyload(Order.items).joinedload(Item.keywords))
:param innerjoin: if ``True``, indicates that the joined eager load should
use an inner join instead of the default of left outer join::
query(Order).options(joinedload(Order.user, innerjoin=True))
In order to chain multiple eager joins together where some may be
OUTER and others INNER, right-nested joins are used to link them::
query(A).options(
joinedload(A.bs, innerjoin=False).
joinedload(B.cs, innerjoin=True)
)
The above query, linking A.bs via "outer" join and B.cs via "inner" join
would render the joins as "a LEFT OUTER JOIN (b JOIN c)". When using
older versions of SQLite (< 3.7.16), this form of JOIN is translated to
use full subqueries as this syntax is otherwise not directly supported.
The ``innerjoin`` flag can also be stated with the term ``"unnested"``.
This indicates that an INNER JOIN should be used, *unless* the join
is linked to a LEFT OUTER JOIN to the left, in which case it
will render as LEFT OUTER JOIN. For example, supposing ``A.bs``
is an outerjoin::
query(A).options(
joinedload(A.bs).
joinedload(B.cs, innerjoin="unnested")
)
The above join will render as "a LEFT OUTER JOIN b LEFT OUTER JOIN c",
rather than as "a LEFT OUTER JOIN (b JOIN c)".
.. note:: The "unnested" flag does **not** affect the JOIN rendered
from a many-to-many association table, e.g. a table configured
as :paramref:`_orm.relationship.secondary`, to the target table; for
correctness of results, these joins are always INNER and are
therefore right-nested if linked to an OUTER join.
.. versionchanged:: 1.0.0 ``innerjoin=True`` now implies
``innerjoin="nested"``, whereas in 0.9 it implied
``innerjoin="unnested"``. In order to achieve the pre-1.0 "unnested"
inner join behavior, use the value ``innerjoin="unnested"``.
See :ref:`migration_3008`.
.. note::
The joins produced by :func:`_orm.joinedload` are **anonymously
aliased**. The criteria by which the join proceeds cannot be
modified, nor can the :class:`_query.Query`
refer to these joins in any way,
including ordering. See :ref:`zen_of_eager_loading` for further
detail.
To produce a specific SQL JOIN which is explicitly available, use
:meth:`_query.Query.join`.
To combine explicit JOINs with eager loading
of collections, use :func:`_orm.contains_eager`; see
:ref:`contains_eager`.
.. seealso::
:ref:`loading_toplevel`
:ref:`joined_eager_loading`
"""
loader = loadopt.set_relationship_strategy(attr, {"lazy": "joined"})
if innerjoin is not None:
loader.local_opts["innerjoin"] = innerjoin
return loader
@joinedload._add_unbound_fn
def joinedload(*keys, **kw):
return _UnboundLoad._from_keys(_UnboundLoad.joinedload, keys, False, kw)
@joinedload._add_unbound_all_fn
def joinedload_all(*keys, **kw):
return _UnboundLoad._from_keys(_UnboundLoad.joinedload, keys, True, kw)
@loader_option()
def subqueryload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
subquery eager loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
examples::
# subquery-load the "orders" collection on "User"
query(User).options(subqueryload(User.orders))
# subquery-load Order.items and then Item.keywords
query(Order).options(
subqueryload(Order.items).subqueryload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# subquery-load the keywords collection
query(Order).options(
lazyload(Order.items).subqueryload(Item.keywords))
.. seealso::
:ref:`loading_toplevel`
:ref:`subquery_eager_loading`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "subquery"})
@subqueryload._add_unbound_fn
def subqueryload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.subqueryload, keys, False, {})
@subqueryload._add_unbound_all_fn
def subqueryload_all(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.subqueryload, keys, True, {})
@loader_option()
def selectinload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
SELECT IN eager loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
examples::
# selectin-load the "orders" collection on "User"
query(User).options(selectinload(User.orders))
# selectin-load Order.items and then Item.keywords
query(Order).options(
selectinload(Order.items).selectinload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# selectin-load the keywords collection
query(Order).options(
lazyload(Order.items).selectinload(Item.keywords))
.. versionadded:: 1.2
.. seealso::
:ref:`loading_toplevel`
:ref:`selectin_eager_loading`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "selectin"})
@selectinload._add_unbound_fn
def selectinload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.selectinload, keys, False, {})
@selectinload._add_unbound_all_fn
def selectinload_all(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.selectinload, keys, True, {})
@loader_option()
def lazyload(loadopt, attr):
"""Indicate that the given attribute should be loaded using "lazy"
loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
.. seealso::
:ref:`loading_toplevel`
:ref:`lazy_loading`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "select"})
@lazyload._add_unbound_fn
def lazyload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.lazyload, keys, False, {})
@lazyload._add_unbound_all_fn
def lazyload_all(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.lazyload, keys, True, {})
@loader_option()
def immediateload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
an immediate load with a per-attribute SELECT statement.
The :func:`.immediateload` option is superseded in general
by the :func:`.selectinload` option, which performs the same task
more efficiently by emitting a SELECT for all loaded objects.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
.. seealso::
:ref:`loading_toplevel`
:ref:`selectin_eager_loading`
"""
loader = loadopt.set_relationship_strategy(attr, {"lazy": "immediate"})
return loader
@immediateload._add_unbound_fn
def immediateload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.immediateload, keys, False, {})
@loader_option()
def noload(loadopt, attr):
"""Indicate that the given relationship attribute should remain unloaded.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
:func:`_orm.noload` applies to :func:`_orm.relationship` attributes; for
column-based attributes, see :func:`_orm.defer`.
.. seealso::
:ref:`loading_toplevel`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "noload"})
@noload._add_unbound_fn
def noload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.noload, keys, False, {})
@loader_option()
def raiseload(loadopt, attr, sql_only=False):
"""Indicate that the given relationship attribute should disallow lazy loads.
A relationship attribute configured with :func:`_orm.raiseload` will
raise an :exc:`~sqlalchemy.exc.InvalidRequestError` upon access. The
typical way this is useful is when an application is attempting to ensure
that all relationship attributes that are accessed in a particular context
would have been already loaded via eager loading. Instead of having
to read through SQL logs to ensure lazy loads aren't occurring, this
strategy will cause them to raise immediately.
:param sql_only: if True, raise only if the lazy load would emit SQL,
but not if it is only checking the identity map, or determining that
the related value should just be None due to missing keys. When False,
the strategy will raise for all varieties of lazyload.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
:func:`_orm.raiseload` applies to :func:`_orm.relationship`
attributes only.
.. versionadded:: 1.1
.. seealso::
:ref:`loading_toplevel`
:ref:`prevent_lazy_with_raiseload`
"""
return loadopt.set_relationship_strategy(
attr, {"lazy": "raise_on_sql" if sql_only else "raise"}
)
@raiseload._add_unbound_fn
def raiseload(*keys, **kw):
return _UnboundLoad._from_keys(_UnboundLoad.raiseload, keys, False, kw)
@loader_option()
def defaultload(loadopt, attr):
"""Indicate an attribute should load using its default loader style.
This method is used to link to other loader options further into
a chain of attributes without altering the loader style of the links
along the chain. For example, to set joined eager loading for an
element of an element::
session.query(MyClass).options(
defaultload(MyClass.someattribute).
joinedload(MyOtherClass.someotherattribute)
)
:func:`.defaultload` is also useful for setting column-level options
on a related class, namely that of :func:`.defer` and :func:`.undefer`::
session.query(MyClass).options(
defaultload(MyClass.someattribute).
defer("some_column").
undefer("some_other_column")
)
.. seealso::
:meth:`_orm.Load.options` - allows for complex hierarchical
loader option structures with less verbosity than with individual
:func:`.defaultload` directives.
:ref:`relationship_loader_options`
:ref:`deferred_loading_w_multiple`
"""
return loadopt.set_relationship_strategy(attr, None)
@defaultload._add_unbound_fn
def defaultload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.defaultload, keys, False, {})
@loader_option()
def defer(loadopt, key):
r"""Indicate that the given column-oriented attribute should be deferred,
e.g. not loaded until accessed.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
e.g.::
from sqlalchemy.orm import defer
session.query(MyClass).options(
defer("attribute_one"),
defer("attribute_two"))
session.query(MyClass).options(
defer(MyClass.attribute_one),
defer(MyClass.attribute_two))
To specify a deferred load of an attribute on a related class,
the path can be specified one token at a time, specifying the loading
style for each link along the chain. To leave the loading style
for a link unchanged, use :func:`_orm.defaultload`::
session.query(MyClass).options(defaultload("someattr").defer("some_column"))
A :class:`_orm.Load` object that is present on a certain path can have
:meth:`_orm.Load.defer` called multiple times,
each will operate on the same
parent entity::
session.query(MyClass).options(
defaultload("someattr").
defer("some_column").
defer("some_other_column").
defer("another_column")
)
:param key: Attribute to be deferred.
:param \*addl_attrs: This option supports the old 0.8 style
of specifying a path as a series of attributes, which is now superseded
by the method-chained style.
.. deprecated:: 0.9 The \*addl_attrs on :func:`_orm.defer` is
deprecated and will be removed in a future release. Please
use method chaining in conjunction with defaultload() to
indicate a path.
.. seealso::
:ref:`deferred`
:func:`_orm.undefer`
"""
return loadopt.set_column_strategy(
(key,), {"deferred": True, "instrument": True}
)
@defer._add_unbound_fn
def defer(key, *addl_attrs):
if addl_attrs:
util.warn_deprecated(
"The *addl_attrs on orm.defer is deprecated. Please use "
"method chaining in conjunction with defaultload() to "
"indicate a path."
)
return _UnboundLoad._from_keys(
_UnboundLoad.defer, (key,) + addl_attrs, False, {}
)
@loader_option()
def undefer(loadopt, key):
r"""Indicate that the given column-oriented attribute should be undeferred,
e.g. specified within the SELECT statement of the entity as a whole.
The column being undeferred is typically set up on the mapping as a
:func:`.deferred` attribute.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
Examples::
# undefer two columns
session.query(MyClass).options(undefer("col1"), undefer("col2"))
# undefer all columns specific to a single class using Load + *
session.query(MyClass, MyOtherClass).options(
Load(MyClass).undefer("*"))
# undefer a column on a related object
session.query(MyClass).options(
defaultload(MyClass.items).undefer('text'))
:param key: Attribute to be undeferred.
:param \*addl_attrs: This option supports the old 0.8 style
of specifying a path as a series of attributes, which is now superseded
by the method-chained style.
.. deprecated:: 0.9 The \*addl_attrs on :func:`_orm.undefer` is
deprecated and will be removed in a future release. Please
use method chaining in conjunction with defaultload() to
indicate a path.
.. seealso::
:ref:`deferred`
:func:`_orm.defer`
:func:`_orm.undefer_group`
"""
return loadopt.set_column_strategy(
(key,), {"deferred": False, "instrument": True}
)
@undefer._add_unbound_fn
def undefer(key, *addl_attrs):
if addl_attrs:
util.warn_deprecated(
"The *addl_attrs on orm.undefer is deprecated. Please use "
"method chaining in conjunction with defaultload() to "
"indicate a path."
)
return _UnboundLoad._from_keys(
_UnboundLoad.undefer, (key,) + addl_attrs, False, {}
)
@loader_option()
def undefer_group(loadopt, name):
"""Indicate that columns within the given deferred group name should be
undeferred.
The columns being undeferred are set up on the mapping as
:func:`.deferred` attributes and include a "group" name.
E.g::
session.query(MyClass).options(undefer_group("large_attrs"))
To undefer a group of attributes on a related entity, the path can be
spelled out using relationship loader options, such as
:func:`_orm.defaultload`::
session.query(MyClass).options(
defaultload("someattr").undefer_group("large_attrs"))
.. versionchanged:: 0.9.0 :func:`_orm.undefer_group` is now specific to a
particular entity load path.
.. seealso::
:ref:`deferred`
:func:`_orm.defer`
:func:`_orm.undefer`
"""
return loadopt.set_column_strategy(
"*", None, {"undefer_group_%s" % name: True}, opts_only=True
)
@undefer_group._add_unbound_fn
def undefer_group(name):
return _UnboundLoad().undefer_group(name)
@loader_option()
def with_expression(loadopt, key, expression):
r"""Apply an ad-hoc SQL expression to a "deferred expression" attribute.
This option is used in conjunction with the :func:`_orm.query_expression`
mapper-level construct that indicates an attribute which should be the
target of an ad-hoc SQL expression.
E.g.::
sess.query(SomeClass).options(
with_expression(SomeClass.x_y_expr, SomeClass.x + SomeClass.y)
)
.. versionadded:: 1.2
:param key: Attribute to be undeferred.
:param expr: SQL expression to be applied to the attribute.
.. seealso::
:ref:`mapper_querytime_expression`
"""
expression = sql_expr._labeled(_orm_full_deannotate(expression))
return loadopt.set_column_strategy(
(key,), {"query_expression": True}, opts={"expression": expression}
)
@with_expression._add_unbound_fn
def with_expression(key, expression):
return _UnboundLoad._from_keys(
_UnboundLoad.with_expression, (key,), False, {"expression": expression}
)
@loader_option()
def selectin_polymorphic(loadopt, classes):
"""Indicate an eager load should take place for all attributes
specific to a subclass.
This uses an additional SELECT with IN against all matched primary
key values, and is the per-query analogue to the ``"selectin"``
setting on the :paramref:`.mapper.polymorphic_load` parameter.
.. versionadded:: 1.2
.. seealso::
:ref:`polymorphic_selectin`
"""
loadopt.set_class_strategy(
{"selectinload_polymorphic": True},
opts={
"entities": tuple(
sorted((inspect(cls) for cls in classes), key=id)
)
},
)
return loadopt
@selectin_polymorphic._add_unbound_fn
def selectin_polymorphic(base_cls, classes):
ul = _UnboundLoad()
ul.is_class_strategy = True
ul.path = (inspect(base_cls),)
ul.selectin_polymorphic(classes)
return ul
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/properties.py
|
# orm/properties.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""MapperProperty implementations.
This is a private module which defines the behavior of individual ORM-
mapped attributes.
"""
from __future__ import absolute_import
from . import attributes
from .interfaces import PropComparator
from .interfaces import StrategizedProperty
from .util import _orm_full_deannotate
from .. import log
from .. import util
from ..sql import expression
__all__ = ["ColumnProperty"]
@log.class_logger
class ColumnProperty(StrategizedProperty):
"""Describes an object attribute that corresponds to a table column.
Public constructor is the :func:`_orm.column_property` function.
"""
strategy_wildcard_key = "column"
__slots__ = (
"_orig_columns",
"columns",
"group",
"deferred",
"instrument",
"comparator_factory",
"descriptor",
"extension",
"active_history",
"expire_on_flush",
"info",
"doc",
"strategy_key",
"_creation_order",
"_is_polymorphic_discriminator",
"_mapped_by_synonym",
"_deferred_column_loader",
)
@util.deprecated_params(
extension=(
"0.7",
":class:`.AttributeExtension` is deprecated in favor of the "
":class:`.AttributeEvents` listener interface. The "
":paramref:`.column_property.extension` parameter will be "
"removed in a future release.",
)
)
def __init__(self, *columns, **kwargs):
r"""Provide a column-level property for use with a mapping.
Column-based properties can normally be applied to the mapper's
``properties`` dictionary using the :class:`_schema.Column`
element directly.
Use this function when the given column is not directly present within
the mapper's selectable; examples include SQL expressions, functions,
and scalar SELECT queries.
The :func:`_orm.column_property` function returns an instance of
:class:`.ColumnProperty`.
Columns that aren't present in the mapper's selectable won't be
persisted by the mapper and are effectively "read-only" attributes.
:param \*cols:
list of Column objects to be mapped.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
scalar attribute should be loaded when replaced, if not
already loaded. Normally, history tracking logic for
simple non-primary-key scalar values only needs to be
aware of the "new" value in order to perform a flush. This
flag is available for applications that make use of
:func:`.attributes.get_history` or :meth:`.Session.is_modified`
which also need to know
the "previous" value of the attribute.
:param comparator_factory: a class which extends
:class:`.ColumnProperty.Comparator` which provides custom SQL
clause generation for comparison operations.
:param group:
a group name for this property when marked as deferred.
:param deferred:
when True, the column property is "deferred", meaning that
it does not load immediately, and is instead loaded when the
attribute is first accessed on an instance. See also
:func:`~sqlalchemy.orm.deferred`.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param expire_on_flush=True:
Disable expiry on flush. A column_property() which refers
to a SQL expression (and not a single table-bound column)
is considered to be a "read only" property; populating it
has no effect on the state of data, and it can only return
database state. For this reason a column_property()'s value
is expired whenever the parent object is involved in a
flush, that is, has any kind of "dirty" state within a flush.
Setting this parameter to ``False`` will have the effect of
leaving any existing value present after the flush proceeds.
Note however that the :class:`.Session` with default expiration
settings still expires
all attributes after a :meth:`.Session.commit` call, however.
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
:param extension:
an :class:`.AttributeExtension` instance, or list of extensions,
which will be prepended to the list of attribute listeners for the
resulting descriptor placed on the class.
.. seealso::
:ref:`column_property_options` - to map columns while including
mapping options
:ref:`mapper_column_property_sql_expressions` - to map SQL
expressions
"""
super(ColumnProperty, self).__init__()
self._orig_columns = [expression._labeled(c) for c in columns]
self.columns = [
expression._labeled(_orm_full_deannotate(c)) for c in columns
]
self.group = kwargs.pop("group", None)
self.deferred = kwargs.pop("deferred", False)
self.instrument = kwargs.pop("_instrument", True)
self.comparator_factory = kwargs.pop(
"comparator_factory", self.__class__.Comparator
)
self.descriptor = kwargs.pop("descriptor", None)
self.extension = kwargs.pop("extension", None)
self.active_history = kwargs.pop("active_history", False)
self.expire_on_flush = kwargs.pop("expire_on_flush", True)
if "info" in kwargs:
self.info = kwargs.pop("info")
if "doc" in kwargs:
self.doc = kwargs.pop("doc")
else:
for col in reversed(self.columns):
doc = getattr(col, "doc", None)
if doc is not None:
self.doc = doc
break
else:
self.doc = None
if kwargs:
raise TypeError(
"%s received unexpected keyword argument(s): %s"
% (self.__class__.__name__, ", ".join(sorted(kwargs.keys())))
)
util.set_creation_order(self)
self.strategy_key = (
("deferred", self.deferred),
("instrument", self.instrument),
)
@util.dependencies("sqlalchemy.orm.state", "sqlalchemy.orm.strategies")
def _memoized_attr__deferred_column_loader(self, state, strategies):
return state.InstanceState._instance_level_callable_processor(
self.parent.class_manager,
strategies.LoadDeferredColumns(self.key),
self.key,
)
def __clause_element__(self):
"""Allow the ColumnProperty to work in expression before it is turned
into an instrumented attribute.
"""
return self.expression
@property
def expression(self):
"""Return the primary column or expression for this ColumnProperty.
E.g.::
class File(Base):
# ...
name = Column(String(64))
extension = Column(String(8))
filename = column_property(name + '.' + extension)
path = column_property('C:/' + filename.expression)
.. seealso::
:ref:`mapper_column_property_sql_expressions_composed`
"""
return self.columns[0]
def instrument_class(self, mapper):
if not self.instrument:
return
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
def do_init(self):
super(ColumnProperty, self).do_init()
if len(self.columns) > 1 and set(self.parent.primary_key).issuperset(
self.columns
):
util.warn(
(
"On mapper %s, primary key column '%s' is being combined "
"with distinct primary key column '%s' in attribute '%s'. "
"Use explicit properties to give each column its own "
"mapped attribute name."
)
% (self.parent, self.columns[1], self.columns[0], self.key)
)
def copy(self):
return ColumnProperty(
deferred=self.deferred,
group=self.group,
active_history=self.active_history,
*self.columns
)
def _getcommitted(
self, state, dict_, column, passive=attributes.PASSIVE_OFF
):
return state.get_impl(self.key).get_committed_value(
state, dict_, passive=passive
)
def merge(
self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load,
_recursive,
_resolve_conflict_map,
):
if not self.instrument:
return
elif self.key in source_dict:
value = source_dict[self.key]
if not load:
dest_dict[self.key] = value
else:
impl = dest_state.get_impl(self.key)
impl.set(dest_state, dest_dict, value, None)
elif dest_state.has_identity and self.key not in dest_dict:
dest_state._expire_attributes(
dest_dict, [self.key], no_loader=True
)
class Comparator(util.MemoizedSlots, PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.ColumnProperty` attributes.
See the documentation for :class:`.PropComparator` for a brief
overview.
.. seealso::
:class:`.PropComparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__slots__ = "__clause_element__", "info", "expressions"
def _memoized_method___clause_element__(self):
if self.adapter:
return self.adapter(self.prop.columns[0])
else:
# no adapter, so we aren't aliased
# assert self._parententity is self._parentmapper
return self.prop.columns[0]._annotate(
{
"parententity": self._parententity,
"parentmapper": self._parententity,
}
)
def _memoized_attr_info(self):
"""The .info dictionary for this attribute."""
ce = self.__clause_element__()
try:
return ce.info
except AttributeError:
return self.prop.info
def _memoized_attr_expressions(self):
"""The full sequence of columns referenced by this
attribute, adjusted for any aliasing in progress.
.. versionadded:: 1.3.17
"""
if self.adapter:
return [self.adapter(col) for col in self.prop.columns]
else:
# no adapter, so we aren't aliased
# assert self._parententity is self._parentmapper
return [
col._annotate(
{
"parententity": self._parententity,
"parentmapper": self._parententity,
"orm_key": self.prop.key,
}
)
for col in self.prop.columns
]
def _fallback_getattr(self, key):
"""proxy attribute access down to the mapped column.
this allows user-defined comparison methods to be accessed.
"""
return getattr(self.__clause_element__(), key)
def operate(self, op, *other, **kwargs):
return op(self.__clause_element__(), *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
col = self.__clause_element__()
return op(col._bind_param(op, other), col, **kwargs)
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/util.py
|
# orm/util.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
import types
import weakref
from . import attributes # noqa
from .base import _class_to_mapper # noqa
from .base import _never_set # noqa
from .base import _none_set # noqa
from .base import attribute_str # noqa
from .base import class_mapper # noqa
from .base import InspectionAttr # noqa
from .base import instance_str # noqa
from .base import object_mapper # noqa
from .base import object_state # noqa
from .base import state_attribute_str # noqa
from .base import state_class_str # noqa
from .base import state_str # noqa
from .interfaces import MapperProperty # noqa
from .interfaces import PropComparator # noqa
from .path_registry import PathRegistry # noqa
from .. import event
from .. import exc as sa_exc
from .. import inspection
from .. import sql
from .. import util
from ..sql import expression
from ..sql import util as sql_util
all_cascades = frozenset(
(
"delete",
"delete-orphan",
"all",
"merge",
"expunge",
"save-update",
"refresh-expire",
"none",
)
)
class CascadeOptions(frozenset):
"""Keeps track of the options sent to relationship().cascade"""
_add_w_all_cascades = all_cascades.difference(
["all", "none", "delete-orphan"]
)
_allowed_cascades = all_cascades
_viewonly_cascades = ["expunge", "all", "none", "refresh-expire"]
__slots__ = (
"save_update",
"delete",
"refresh_expire",
"merge",
"expunge",
"delete_orphan",
)
def __new__(cls, value_list):
if isinstance(value_list, util.string_types) or value_list is None:
return cls.from_string(value_list)
values = set(value_list)
if values.difference(cls._allowed_cascades):
raise sa_exc.ArgumentError(
"Invalid cascade option(s): %s"
% ", ".join(
[
repr(x)
for x in sorted(
values.difference(cls._allowed_cascades)
)
]
)
)
if "all" in values:
values.update(cls._add_w_all_cascades)
if "none" in values:
values.clear()
values.discard("all")
self = frozenset.__new__(CascadeOptions, values)
self.save_update = "save-update" in values
self.delete = "delete" in values
self.refresh_expire = "refresh-expire" in values
self.merge = "merge" in values
self.expunge = "expunge" in values
self.delete_orphan = "delete-orphan" in values
if self.delete_orphan and not self.delete:
util.warn(
"The 'delete-orphan' cascade " "option requires 'delete'."
)
return self
def __repr__(self):
return "CascadeOptions(%r)" % (",".join([x for x in sorted(self)]))
@classmethod
def from_string(cls, arg):
values = [c for c in re.split(r"\s*,\s*", arg or "") if c]
return cls(values)
def _validator_events(desc, key, validator, include_removes, include_backrefs):
"""Runs a validation method on an attribute value to be set or
appended.
"""
if not include_backrefs:
def detect_is_backref(state, initiator):
impl = state.manager[key].impl
return initiator.impl is not impl
if include_removes:
def append(state, value, initiator):
if initiator.op is not attributes.OP_BULK_REPLACE and (
include_backrefs or not detect_is_backref(state, initiator)
):
return validator(state.obj(), key, value, False)
else:
return value
def bulk_set(state, values, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
obj = state.obj()
values[:] = [
validator(obj, key, value, False) for value in values
]
def set_(state, value, oldvalue, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
return validator(state.obj(), key, value, False)
else:
return value
def remove(state, value, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
validator(state.obj(), key, value, True)
else:
def append(state, value, initiator):
if initiator.op is not attributes.OP_BULK_REPLACE and (
include_backrefs or not detect_is_backref(state, initiator)
):
return validator(state.obj(), key, value)
else:
return value
def bulk_set(state, values, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
obj = state.obj()
values[:] = [validator(obj, key, value) for value in values]
def set_(state, value, oldvalue, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
return validator(state.obj(), key, value)
else:
return value
event.listen(desc, "append", append, raw=True, retval=True)
event.listen(desc, "bulk_replace", bulk_set, raw=True)
event.listen(desc, "set", set_, raw=True, retval=True)
if include_removes:
event.listen(desc, "remove", remove, raw=True, retval=True)
def polymorphic_union(
table_map, typecolname, aliasname="p_union", cast_nulls=True
):
"""Create a ``UNION`` statement used by a polymorphic mapper.
See :ref:`concrete_inheritance` for an example of how
this is used.
:param table_map: mapping of polymorphic identities to
:class:`_schema.Table` objects.
:param typecolname: string name of a "discriminator" column, which will be
derived from the query, producing the polymorphic identity for
each row. If ``None``, no polymorphic discriminator is generated.
:param aliasname: name of the :func:`~sqlalchemy.sql.expression.alias()`
construct generated.
:param cast_nulls: if True, non-existent columns, which are represented
as labeled NULLs, will be passed into CAST. This is a legacy behavior
that is problematic on some backends such as Oracle - in which case it
can be set to False.
"""
colnames = util.OrderedSet()
colnamemaps = {}
types = {}
for key in table_map:
table = table_map[key]
# mysql doesn't like selecting from a select;
# make it an alias of the select
if isinstance(table, sql.Select):
table = table.alias()
table_map[key] = table
m = {}
for c in table.c:
colnames.add(c.key)
m[c.key] = c
types[c.key] = c.type
colnamemaps[table] = m
def col(name, table):
try:
return colnamemaps[table][name]
except KeyError:
if cast_nulls:
return sql.cast(sql.null(), types[name]).label(name)
else:
return sql.type_coerce(sql.null(), types[name]).label(name)
result = []
for type_, table in table_map.items():
if typecolname is not None:
result.append(
sql.select(
[col(name, table) for name in colnames]
+ [
sql.literal_column(
sql_util._quote_ddl_expr(type_)
).label(typecolname)
],
from_obj=[table],
)
)
else:
result.append(
sql.select(
[col(name, table) for name in colnames], from_obj=[table]
)
)
return sql.union_all(*result).alias(aliasname)
def identity_key(*args, **kwargs):
"""Generate "identity key" tuples, as are used as keys in the
:attr:`.Session.identity_map` dictionary.
This function has several call styles:
* ``identity_key(class, ident, identity_token=token)``
This form receives a mapped class and a primary key scalar or
tuple as an argument.
E.g.::
>>> identity_key(MyClass, (1, 2))
(<class '__main__.MyClass'>, (1, 2), None)
:param class: mapped class (must be a positional argument)
:param ident: primary key, may be a scalar or tuple argument.
:param identity_token: optional identity token
.. versionadded:: 1.2 added identity_token
* ``identity_key(instance=instance)``
This form will produce the identity key for a given instance. The
instance need not be persistent, only that its primary key attributes
are populated (else the key will contain ``None`` for those missing
values).
E.g.::
>>> instance = MyClass(1, 2)
>>> identity_key(instance=instance)
(<class '__main__.MyClass'>, (1, 2), None)
In this form, the given instance is ultimately run though
:meth:`_orm.Mapper.identity_key_from_instance`, which will have the
effect of performing a database check for the corresponding row
if the object is expired.
:param instance: object instance (must be given as a keyword arg)
* ``identity_key(class, row=row, identity_token=token)``
This form is similar to the class/tuple form, except is passed a
database result row as a :class:`.RowProxy` object.
E.g.::
>>> row = engine.execute("select * from table where a=1 and b=2").\
first()
>>> identity_key(MyClass, row=row)
(<class '__main__.MyClass'>, (1, 2), None)
:param class: mapped class (must be a positional argument)
:param row: :class:`.RowProxy` row returned by a
:class:`_engine.ResultProxy`
(must be given as a keyword arg)
:param identity_token: optional identity token
.. versionadded:: 1.2 added identity_token
"""
if args:
row = None
largs = len(args)
if largs == 1:
class_ = args[0]
try:
row = kwargs.pop("row")
except KeyError:
ident = kwargs.pop("ident")
elif largs in (2, 3):
class_, ident = args
else:
raise sa_exc.ArgumentError(
"expected up to three positional arguments, " "got %s" % largs
)
identity_token = kwargs.pop("identity_token", None)
if kwargs:
raise sa_exc.ArgumentError(
"unknown keyword arguments: %s" % ", ".join(kwargs)
)
mapper = class_mapper(class_)
if row is None:
return mapper.identity_key_from_primary_key(
util.to_list(ident), identity_token=identity_token
)
else:
return mapper.identity_key_from_row(
row, identity_token=identity_token
)
else:
instance = kwargs.pop("instance")
if kwargs:
raise sa_exc.ArgumentError(
"unknown keyword arguments: %s" % ", ".join(kwargs.keys)
)
mapper = object_mapper(instance)
return mapper.identity_key_from_instance(instance)
class ORMAdapter(sql_util.ColumnAdapter):
"""ColumnAdapter subclass which excludes adaptation of entities from
non-matching mappers.
"""
def __init__(
self,
entity,
equivalents=None,
adapt_required=False,
allow_label_resolve=True,
anonymize_labels=False,
):
info = inspection.inspect(entity)
self.mapper = info.mapper
selectable = info.selectable
is_aliased_class = info.is_aliased_class
if is_aliased_class:
self.aliased_class = entity
else:
self.aliased_class = None
sql_util.ColumnAdapter.__init__(
self,
selectable,
equivalents,
adapt_required=adapt_required,
allow_label_resolve=allow_label_resolve,
anonymize_labels=anonymize_labels,
include_fn=self._include_fn,
)
def _include_fn(self, elem):
entity = elem._annotations.get("parentmapper", None)
return not entity or entity.isa(self.mapper)
class AliasedClass(object):
r"""Represents an "aliased" form of a mapped class for usage with Query.
The ORM equivalent of a :func:`~sqlalchemy.sql.expression.alias`
construct, this object mimics the mapped class using a
``__getattr__`` scheme and maintains a reference to a
real :class:`~sqlalchemy.sql.expression.Alias` object.
A primary purpose of :class:`.AliasedClass` is to serve as an alternate
within a SQL statement generated by the ORM, such that an existing
mapped entity can be used in multiple contexts. A simple example::
# find all pairs of users with the same name
user_alias = aliased(User)
session.query(User, user_alias).\
join((user_alias, User.id > user_alias.id)).\
filter(User.name == user_alias.name)
:class:`.AliasedClass` is also capable of mapping an existing mapped
class to an entirely new selectable, provided this selectable is column-
compatible with the existing mapped selectable, and it can also be
configured in a mapping as the target of a :func:`_orm.relationship`.
See the links below for examples.
The :class:`.AliasedClass` object is constructed typically using the
:func:`_orm.aliased` function. It also is produced with additional
configuration when using the :func:`_orm.with_polymorphic` function.
The resulting object is an instance of :class:`.AliasedClass`.
This object implements an attribute scheme which produces the
same attribute and method interface as the original mapped
class, allowing :class:`.AliasedClass` to be compatible
with any attribute technique which works on the original class,
including hybrid attributes (see :ref:`hybrids_toplevel`).
The :class:`.AliasedClass` can be inspected for its underlying
:class:`_orm.Mapper`, aliased selectable, and other information
using :func:`_sa.inspect`::
from sqlalchemy import inspect
my_alias = aliased(MyClass)
insp = inspect(my_alias)
The resulting inspection object is an instance of :class:`.AliasedInsp`.
.. seealso::
:func:`.aliased`
:func:`.with_polymorphic`
:ref:`relationship_aliased_class`
:ref:`relationship_to_window_function`
"""
def __init__(
self,
cls,
alias=None,
name=None,
flat=False,
adapt_on_names=False,
# TODO: None for default here?
with_polymorphic_mappers=(),
with_polymorphic_discriminator=None,
base_alias=None,
use_mapper_path=False,
represents_outer_join=False,
):
mapper = _class_to_mapper(cls)
if alias is None:
alias = mapper._with_polymorphic_selectable.alias(
name=name, flat=flat
)
self._aliased_insp = AliasedInsp(
self,
mapper,
alias,
name,
with_polymorphic_mappers
if with_polymorphic_mappers
else mapper.with_polymorphic_mappers,
with_polymorphic_discriminator
if with_polymorphic_discriminator is not None
else mapper.polymorphic_on,
base_alias,
use_mapper_path,
adapt_on_names,
represents_outer_join,
)
self.__name__ = "AliasedClass_%s" % mapper.class_.__name__
def __getattr__(self, key):
try:
_aliased_insp = self.__dict__["_aliased_insp"]
except KeyError:
raise AttributeError()
else:
target = _aliased_insp._target
# maintain all getattr mechanics
attr = getattr(target, key)
# attribute is a method, that will be invoked against a
# "self"; so just return a new method with the same function and
# new self
if hasattr(attr, "__call__") and hasattr(attr, "__self__"):
return types.MethodType(attr.__func__, self)
# attribute is a descriptor, that will be invoked against a
# "self"; so invoke the descriptor against this self
if hasattr(attr, "__get__"):
attr = attr.__get__(None, self)
# attributes within the QueryableAttribute system will want this
# to be invoked so the object can be adapted
if hasattr(attr, "adapt_to_entity"):
attr = attr.adapt_to_entity(_aliased_insp)
setattr(self, key, attr)
return attr
def __repr__(self):
return "<AliasedClass at 0x%x; %s>" % (
id(self),
self._aliased_insp._target.__name__,
)
def __str__(self):
return str(self._aliased_insp)
class AliasedInsp(InspectionAttr):
"""Provide an inspection interface for an
:class:`.AliasedClass` object.
The :class:`.AliasedInsp` object is returned
given an :class:`.AliasedClass` using the
:func:`_sa.inspect` function::
from sqlalchemy import inspect
from sqlalchemy.orm import aliased
my_alias = aliased(MyMappedClass)
insp = inspect(my_alias)
Attributes on :class:`.AliasedInsp`
include:
* ``entity`` - the :class:`.AliasedClass` represented.
* ``mapper`` - the :class:`_orm.Mapper` mapping the underlying class.
* ``selectable`` - the :class:`_expression.Alias`
construct which ultimately
represents an aliased :class:`_schema.Table` or
:class:`_expression.Select`
construct.
* ``name`` - the name of the alias. Also is used as the attribute
name when returned in a result tuple from :class:`_query.Query`.
* ``with_polymorphic_mappers`` - collection of :class:`_orm.Mapper`
objects
indicating all those mappers expressed in the select construct
for the :class:`.AliasedClass`.
* ``polymorphic_on`` - an alternate column or SQL expression which
will be used as the "discriminator" for a polymorphic load.
.. seealso::
:ref:`inspection_toplevel`
"""
def __init__(
self,
entity,
mapper,
selectable,
name,
with_polymorphic_mappers,
polymorphic_on,
_base_alias,
_use_mapper_path,
adapt_on_names,
represents_outer_join,
):
self._weak_entity = weakref.ref(entity)
self.mapper = mapper
self.selectable = (
self.persist_selectable
) = self.local_table = selectable
self.name = name
self.polymorphic_on = polymorphic_on
self._base_alias = weakref.ref(_base_alias or self)
self._use_mapper_path = _use_mapper_path
self.represents_outer_join = represents_outer_join
if with_polymorphic_mappers:
self._is_with_polymorphic = True
self.with_polymorphic_mappers = with_polymorphic_mappers
self._with_polymorphic_entities = []
for poly in self.with_polymorphic_mappers:
if poly is not mapper:
ent = AliasedClass(
poly.class_,
selectable,
base_alias=self,
adapt_on_names=adapt_on_names,
use_mapper_path=_use_mapper_path,
)
setattr(self.entity, poly.class_.__name__, ent)
self._with_polymorphic_entities.append(ent._aliased_insp)
else:
self._is_with_polymorphic = False
self.with_polymorphic_mappers = [mapper]
self._adapter = sql_util.ColumnAdapter(
selectable,
equivalents=mapper._equivalent_columns,
adapt_on_names=adapt_on_names,
anonymize_labels=True,
)
self._adapt_on_names = adapt_on_names
self._target = mapper.class_
@property
def entity(self):
return self._weak_entity()
is_aliased_class = True
"always returns True"
@property
def class_(self):
"""Return the mapped class ultimately represented by this
:class:`.AliasedInsp`."""
return self.mapper.class_
@property
def _path_registry(self):
if self._use_mapper_path:
return self.mapper._path_registry
else:
return PathRegistry.per_mapper(self)
def __getstate__(self):
return {
"entity": self.entity,
"mapper": self.mapper,
"alias": self.selectable,
"name": self.name,
"adapt_on_names": self._adapt_on_names,
"with_polymorphic_mappers": self.with_polymorphic_mappers,
"with_polymorphic_discriminator": self.polymorphic_on,
"base_alias": self._base_alias(),
"use_mapper_path": self._use_mapper_path,
"represents_outer_join": self.represents_outer_join,
}
def __setstate__(self, state):
self.__init__(
state["entity"],
state["mapper"],
state["alias"],
state["name"],
state["with_polymorphic_mappers"],
state["with_polymorphic_discriminator"],
state["base_alias"],
state["use_mapper_path"],
state["adapt_on_names"],
state["represents_outer_join"],
)
def _adapt_element(self, elem):
return self._adapter.traverse(elem)._annotate(
{"parententity": self, "parentmapper": self.mapper}
)
def _entity_for_mapper(self, mapper):
self_poly = self.with_polymorphic_mappers
if mapper in self_poly:
if mapper is self.mapper:
return self
else:
return getattr(
self.entity, mapper.class_.__name__
)._aliased_insp
elif mapper.isa(self.mapper):
return self
else:
assert False, "mapper %s doesn't correspond to %s" % (mapper, self)
@util.memoized_property
def _get_clause(self):
onclause, replacemap = self.mapper._get_clause
return (
self._adapter.traverse(onclause),
{
self._adapter.traverse(col): param
for col, param in replacemap.items()
},
)
@util.memoized_property
def _memoized_values(self):
return {}
def _memo(self, key, callable_, *args, **kw):
if key in self._memoized_values:
return self._memoized_values[key]
else:
self._memoized_values[key] = value = callable_(*args, **kw)
return value
def __repr__(self):
if self.with_polymorphic_mappers:
with_poly = "(%s)" % ", ".join(
mp.class_.__name__ for mp in self.with_polymorphic_mappers
)
else:
with_poly = ""
return "<AliasedInsp at 0x%x; %s%s>" % (
id(self),
self.class_.__name__,
with_poly,
)
def __str__(self):
if self._is_with_polymorphic:
return "with_polymorphic(%s, [%s])" % (
self._target.__name__,
", ".join(
mp.class_.__name__
for mp in self.with_polymorphic_mappers
if mp is not self.mapper
),
)
else:
return "aliased(%s)" % (self._target.__name__,)
inspection._inspects(AliasedClass)(lambda target: target._aliased_insp)
inspection._inspects(AliasedInsp)(lambda target: target)
def aliased(element, alias=None, name=None, flat=False, adapt_on_names=False):
"""Produce an alias of the given element, usually an :class:`.AliasedClass`
instance.
E.g.::
my_alias = aliased(MyClass)
session.query(MyClass, my_alias).filter(MyClass.id > my_alias.id)
The :func:`.aliased` function is used to create an ad-hoc mapping
of a mapped class to a new selectable. By default, a selectable
is generated from the normally mapped selectable (typically a
:class:`_schema.Table`) using the :meth:`_expression.FromClause.alias`
method.
However, :func:`.aliased` can also be used to link the class to
a new :func:`_expression.select` statement. Also, the
:func:`.with_polymorphic`
function is a variant of :func:`.aliased` that is intended to specify
a so-called "polymorphic selectable", that corresponds to the union
of several joined-inheritance subclasses at once.
For convenience, the :func:`.aliased` function also accepts plain
:class:`_expression.FromClause` constructs, such as a
:class:`_schema.Table` or
:func:`_expression.select` construct. In those cases, the
:meth:`_expression.FromClause.alias`
method is called on the object and the new :class:`_expression.Alias`
object
returned. The returned :class:`_expression.Alias`
is not ORM-mapped in this case.
:param element: element to be aliased. Is normally a mapped class,
but for convenience can also be a :class:`_expression.FromClause` element
.
:param alias: Optional selectable unit to map the element to. This is
usually used to link the object to a subquery, and should be an aliased
select construct as one would produce from the
:meth:`_query.Query.subquery` method or
the :meth:`_expression.Select.alias` methods of the
:func:`_expression.select` construct.
:param name: optional string name to use for the alias, if not specified
by the ``alias`` parameter. The name, among other things, forms the
attribute name that will be accessible via tuples returned by a
:class:`_query.Query` object.
:param flat: Boolean, will be passed through to the
:meth:`_expression.FromClause.alias` call so that aliases of
:class:`_expression.Join` objects
don't include an enclosing SELECT. This can lead to more efficient
queries in many circumstances. A JOIN against a nested JOIN will be
rewritten as a JOIN against an aliased SELECT subquery on backends that
don't support this syntax.
.. seealso:: :meth:`_expression.Join.alias`
:param adapt_on_names: if True, more liberal "matching" will be used when
mapping the mapped columns of the ORM entity to those of the
given selectable - a name-based match will be performed if the
given selectable doesn't otherwise have a column that corresponds
to one on the entity. The use case for this is when associating
an entity with some derived selectable such as one that uses
aggregate functions::
class UnitPrice(Base):
__tablename__ = 'unit_price'
...
unit_id = Column(Integer)
price = Column(Numeric)
aggregated_unit_price = Session.query(
func.sum(UnitPrice.price).label('price')
).group_by(UnitPrice.unit_id).subquery()
aggregated_unit_price = aliased(UnitPrice,
alias=aggregated_unit_price, adapt_on_names=True)
Above, functions on ``aggregated_unit_price`` which refer to
``.price`` will return the
``func.sum(UnitPrice.price).label('price')`` column, as it is
matched on the name "price". Ordinarily, the "price" function
wouldn't have any "column correspondence" to the actual
``UnitPrice.price`` column as it is not a proxy of the original.
"""
if isinstance(element, expression.FromClause):
if adapt_on_names:
raise sa_exc.ArgumentError(
"adapt_on_names only applies to ORM elements"
)
return element.alias(name, flat=flat)
else:
return AliasedClass(
element,
alias=alias,
flat=flat,
name=name,
adapt_on_names=adapt_on_names,
)
def with_polymorphic(
base,
classes,
selectable=False,
flat=False,
polymorphic_on=None,
aliased=False,
innerjoin=False,
_use_mapper_path=False,
_existing_alias=None,
):
"""Produce an :class:`.AliasedClass` construct which specifies
columns for descendant mappers of the given base.
Using this method will ensure that each descendant mapper's
tables are included in the FROM clause, and will allow filter()
criterion to be used against those tables. The resulting
instances will also have those columns already loaded so that
no "post fetch" of those columns will be required.
.. seealso::
:ref:`with_polymorphic` - full discussion of
:func:`_orm.with_polymorphic`.
:param base: Base class to be aliased.
:param classes: a single class or mapper, or list of
class/mappers, which inherit from the base class.
Alternatively, it may also be the string ``'*'``, in which case
all descending mapped classes will be added to the FROM clause.
:param aliased: when True, the selectable will be wrapped in an
alias, that is ``(SELECT * FROM <fromclauses>) AS anon_1``.
This can be important when using the with_polymorphic()
to create the target of a JOIN on a backend that does not
support parenthesized joins, such as SQLite and older
versions of MySQL. However if the
:paramref:`.with_polymorphic.selectable` parameter is in use
with an existing :class:`_expression.Alias` construct,
then you should not
set this flag.
:param flat: Boolean, will be passed through to the
:meth:`_expression.FromClause.alias` call so that aliases of
:class:`_expression.Join`
objects don't include an enclosing SELECT. This can lead to more
efficient queries in many circumstances. A JOIN against a nested JOIN
will be rewritten as a JOIN against an aliased SELECT subquery on
backends that don't support this syntax.
Setting ``flat`` to ``True`` implies the ``aliased`` flag is
also ``True``.
.. versionadded:: 0.9.0
.. seealso:: :meth:`_expression.Join.alias`
:param selectable: a table or select() statement that will
be used in place of the generated FROM clause. This argument is
required if any of the desired classes use concrete table
inheritance, since SQLAlchemy currently cannot generate UNIONs
among tables automatically. If used, the ``selectable`` argument
must represent the full set of tables and columns mapped by every
mapped class. Otherwise, the unaccounted mapped columns will
result in their table being appended directly to the FROM clause
which will usually lead to incorrect results.
:param polymorphic_on: a column to be used as the "discriminator"
column for the given selectable. If not given, the polymorphic_on
attribute of the base classes' mapper will be used, if any. This
is useful for mappings that don't have polymorphic loading
behavior by default.
:param innerjoin: if True, an INNER JOIN will be used. This should
only be specified if querying for one specific subtype only
"""
primary_mapper = _class_to_mapper(base)
if _existing_alias:
assert _existing_alias.mapper is primary_mapper
classes = util.to_set(classes)
new_classes = set(
[mp.class_ for mp in _existing_alias.with_polymorphic_mappers]
)
if classes == new_classes:
return _existing_alias
else:
classes = classes.union(new_classes)
mappers, selectable = primary_mapper._with_polymorphic_args(
classes, selectable, innerjoin=innerjoin
)
if aliased or flat:
selectable = selectable.alias(flat=flat)
return AliasedClass(
base,
selectable,
with_polymorphic_mappers=mappers,
with_polymorphic_discriminator=polymorphic_on,
use_mapper_path=_use_mapper_path,
represents_outer_join=not innerjoin,
)
def _orm_annotate(element, exclude=None):
"""Deep copy the given ClauseElement, annotating each element with the
"_orm_adapt" flag.
Elements within the exclude collection will be cloned but not annotated.
"""
return sql_util._deep_annotate(element, {"_orm_adapt": True}, exclude)
def _orm_deannotate(element):
"""Remove annotations that link a column to a particular mapping.
Note this doesn't affect "remote" and "foreign" annotations
passed by the :func:`_orm.foreign` and :func:`_orm.remote`
annotators.
"""
return sql_util._deep_deannotate(
element, values=("_orm_adapt", "parententity")
)
def _orm_full_deannotate(element):
return sql_util._deep_deannotate(element)
class _ORMJoin(expression.Join):
"""Extend Join to support ORM constructs as input."""
__visit_name__ = expression.Join.__visit_name__
def __init__(
self,
left,
right,
onclause=None,
isouter=False,
full=False,
_left_memo=None,
_right_memo=None,
):
left_info = inspection.inspect(left)
left_orm_info = getattr(left, "_joined_from_info", left_info)
right_info = inspection.inspect(right)
adapt_to = right_info.selectable
self._joined_from_info = right_info
self._left_memo = _left_memo
self._right_memo = _right_memo
if isinstance(onclause, util.string_types):
onclause = getattr(left_orm_info.entity, onclause)
if isinstance(onclause, attributes.QueryableAttribute):
on_selectable = onclause.comparator._source_selectable()
prop = onclause.property
elif isinstance(onclause, MapperProperty):
prop = onclause
on_selectable = prop.parent.selectable
else:
prop = None
if prop:
if sql_util.clause_is_present(on_selectable, left_info.selectable):
adapt_from = on_selectable
else:
adapt_from = left_info.selectable
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = prop._create_joins(
source_selectable=adapt_from,
dest_selectable=adapt_to,
source_polymorphic=True,
dest_polymorphic=True,
of_type_mapper=right_info.mapper,
alias_secondary=True,
)
if sj is not None:
if isouter:
# note this is an inner join from secondary->right
right = sql.join(secondary, right, sj)
onclause = pj
else:
left = sql.join(left, secondary, pj, isouter)
onclause = sj
else:
onclause = pj
self._target_adapter = target_adapter
expression.Join.__init__(self, left, right, onclause, isouter, full)
if (
not prop
and getattr(right_info, "mapper", None)
and right_info.mapper.single
):
# if single inheritance target and we are using a manual
# or implicit ON clause, augment it the same way we'd augment the
# WHERE.
single_crit = right_info.mapper._single_table_criterion
if single_crit is not None:
if right_info.is_aliased_class:
single_crit = right_info._adapter.traverse(single_crit)
self.onclause = self.onclause & single_crit
def _splice_into_center(self, other):
"""Splice a join into the center.
Given join(a, b) and join(b, c), return join(a, b).join(c)
"""
leftmost = other
while isinstance(leftmost, sql.Join):
leftmost = leftmost.left
assert self.right is leftmost
left = _ORMJoin(
self.left,
other.left,
self.onclause,
isouter=self.isouter,
_left_memo=self._left_memo,
_right_memo=other._left_memo,
)
return _ORMJoin(
left,
other.right,
other.onclause,
isouter=other.isouter,
_right_memo=other._right_memo,
)
def join(
self,
right,
onclause=None,
isouter=False,
full=False,
join_to_left=None,
):
return _ORMJoin(self, right, onclause, full=full, isouter=isouter)
def outerjoin(self, right, onclause=None, full=False, join_to_left=None):
return _ORMJoin(self, right, onclause, isouter=True, full=full)
def join(
left, right, onclause=None, isouter=False, full=False, join_to_left=None
):
r"""Produce an inner join between left and right clauses.
:func:`_orm.join` is an extension to the core join interface
provided by :func:`_expression.join()`, where the
left and right selectables may be not only core selectable
objects such as :class:`_schema.Table`, but also mapped classes or
:class:`.AliasedClass` instances. The "on" clause can
be a SQL expression, or an attribute or string name
referencing a configured :func:`_orm.relationship`.
:func:`_orm.join` is not commonly needed in modern usage,
as its functionality is encapsulated within that of the
:meth:`_query.Query.join` method, which features a
significant amount of automation beyond :func:`_orm.join`
by itself. Explicit usage of :func:`_orm.join`
with :class:`_query.Query` involves usage of the
:meth:`_query.Query.select_from` method, as in::
from sqlalchemy.orm import join
session.query(User).\
select_from(join(User, Address, User.addresses)).\
filter(Address.email_address=='foo@bar.com')
In modern SQLAlchemy the above join can be written more
succinctly as::
session.query(User).\
join(User.addresses).\
filter(Address.email_address=='foo@bar.com')
See :meth:`_query.Query.join` for information on modern usage
of ORM level joins.
.. deprecated:: 0.8
the ``join_to_left`` parameter is deprecated, and will be removed
in a future release. The parameter has no effect.
"""
return _ORMJoin(left, right, onclause, isouter, full)
def outerjoin(left, right, onclause=None, full=False, join_to_left=None):
"""Produce a left outer join between left and right clauses.
This is the "outer join" version of the :func:`_orm.join` function,
featuring the same behavior except that an OUTER JOIN is generated.
See that function's documentation for other usage details.
"""
return _ORMJoin(left, right, onclause, True, full)
def with_parent(instance, prop, from_entity=None):
"""Create filtering criterion that relates this query's primary entity
to the given related instance, using established
:func:`_orm.relationship()`
configuration.
The SQL rendered is the same as that rendered when a lazy loader
would fire off from the given parent on that attribute, meaning
that the appropriate state is taken from the parent object in
Python without the need to render joins to the parent table
in the rendered statement.
:param instance:
An instance which has some :func:`_orm.relationship`.
:param property:
String property name, or class-bound attribute, which indicates
what relationship from the instance should be used to reconcile the
parent/child relationship.
:param from_entity:
Entity in which to consider as the left side. This defaults to the
"zero" entity of the :class:`_query.Query` itself.
.. versionadded:: 1.2
"""
if isinstance(prop, util.string_types):
mapper = object_mapper(instance)
prop = getattr(mapper.class_, prop).property
elif isinstance(prop, attributes.QueryableAttribute):
prop = prop.property
return prop._with_parent(instance, from_entity=from_entity)
def has_identity(object_):
"""Return True if the given object has a database
identity.
This typically corresponds to the object being
in either the persistent or detached state.
.. seealso::
:func:`.was_deleted`
"""
state = attributes.instance_state(object_)
return state.has_identity
def was_deleted(object_):
"""Return True if the given object was deleted
within a session flush.
This is regardless of whether or not the object is
persistent or detached.
.. seealso::
:attr:`.InstanceState.was_deleted`
"""
state = attributes.instance_state(object_)
return state.was_deleted
def _entity_corresponds_to(given, entity):
"""determine if 'given' corresponds to 'entity', in terms
of an entity passed to Query that would match the same entity
being referred to elsewhere in the query.
"""
if entity.is_aliased_class:
if given.is_aliased_class:
if entity._base_alias() is given._base_alias():
return True
return False
elif given.is_aliased_class:
if given._use_mapper_path:
return entity in given.with_polymorphic_mappers
else:
return entity is given
return entity.common_parent(given)
def _entity_corresponds_to_use_path_impl(given, entity):
"""determine if 'given' corresponds to 'entity', in terms
of a path of loader options where a mapped attribute is taken to
be a member of a parent entity.
e.g.::
someoption(A).someoption(A.b) # -> fn(A, A) -> True
someoption(A).someoption(C.d) # -> fn(A, C) -> False
a1 = aliased(A)
someoption(a1).someoption(A.b) # -> fn(a1, A) -> False
someoption(a1).someoption(a1.b) # -> fn(a1, a1) -> True
wp = with_polymorphic(A, [A1, A2])
someoption(wp).someoption(A1.foo) # -> fn(wp, A1) -> False
someoption(wp).someoption(wp.A1.foo) # -> fn(wp, wp.A1) -> True
"""
if given.is_aliased_class:
return (
entity.is_aliased_class
and not entity._use_mapper_path
and (given is entity or given in entity._with_polymorphic_entities)
)
elif not entity.is_aliased_class:
return given.common_parent(entity.mapper)
else:
return (
entity._use_mapper_path
and given in entity.with_polymorphic_mappers
)
def _entity_isa(given, mapper):
"""determine if 'given' "is a" mapper, in terms of the given
would load rows of type 'mapper'.
"""
if given.is_aliased_class:
return mapper in given.with_polymorphic_mappers or given.mapper.isa(
mapper
)
elif given.with_polymorphic_mappers:
return mapper in given.with_polymorphic_mappers
else:
return given.isa(mapper)
def randomize_unitofwork():
"""Use random-ordering sets within the unit of work in order
to detect unit of work sorting issues.
This is a utility function that can be used to help reproduce
inconsistent unit of work sorting issues. For example,
if two kinds of objects A and B are being inserted, and
B has a foreign key reference to A - the A must be inserted first.
However, if there is no relationship between A and B, the unit of work
won't know to perform this sorting, and an operation may or may not
fail, depending on how the ordering works out. Since Python sets
and dictionaries have non-deterministic ordering, such an issue may
occur on some runs and not on others, and in practice it tends to
have a great dependence on the state of the interpreter. This leads
to so-called "heisenbugs" where changing entirely irrelevant aspects
of the test program still cause the failure behavior to change.
By calling ``randomize_unitofwork()`` when a script first runs, the
ordering of a key series of sets within the unit of work implementation
are randomized, so that the script can be minimized down to the
fundamental mapping and operation that's failing, while still reproducing
the issue on at least some runs.
This utility is also available when running the test suite via the
``--reversetop`` flag.
"""
from sqlalchemy.orm import unitofwork, session, mapper, dependency
from sqlalchemy.util import topological
from sqlalchemy.testing.util import RandomSet
topological.set = (
unitofwork.set
) = session.set = mapper.set = dependency.set = RandomSet
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/events.py
|
# orm/events.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""ORM event interfaces.
"""
import weakref
from . import instrumentation
from . import interfaces
from . import mapperlib
from .attributes import QueryableAttribute
from .base import _mapper_or_none
from .query import Query
from .scoping import scoped_session
from .session import Session
from .session import sessionmaker
from .. import event
from .. import exc
from .. import util
from ..util.compat import inspect_getfullargspec
class InstrumentationEvents(event.Events):
"""Events related to class instrumentation events.
The listeners here support being established against
any new style class, that is any object that is a subclass
of 'type'. Events will then be fired off for events
against that class. If the "propagate=True" flag is passed
to event.listen(), the event will fire off for subclasses
of that class as well.
The Python ``type`` builtin is also accepted as a target,
which when used has the effect of events being emitted
for all classes.
Note the "propagate" flag here is defaulted to ``True``,
unlike the other class level events where it defaults
to ``False``. This means that new subclasses will also
be the subject of these events, when a listener
is established on a superclass.
"""
_target_class_doc = "SomeBaseClass"
_dispatch_target = instrumentation.InstrumentationFactory
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
return _InstrumentationEventsHold(target)
else:
return None
@classmethod
def _listen(cls, event_key, propagate=True, **kw):
target, identifier, fn = (
event_key.dispatch_target,
event_key.identifier,
event_key._listen_fn,
)
def listen(target_cls, *arg):
listen_cls = target()
if propagate and issubclass(target_cls, listen_cls):
return fn(target_cls, *arg)
elif not propagate and target_cls is listen_cls:
return fn(target_cls, *arg)
def remove(ref):
key = event.registry._EventKey(
None,
identifier,
listen,
instrumentation._instrumentation_factory,
)
getattr(
instrumentation._instrumentation_factory.dispatch, identifier
).remove(key)
target = weakref.ref(target.class_, remove)
event_key.with_dispatch_target(
instrumentation._instrumentation_factory
).with_wrapper(listen).base_listen(**kw)
@classmethod
def _clear(cls):
super(InstrumentationEvents, cls)._clear()
instrumentation._instrumentation_factory.dispatch._clear()
def class_instrument(self, cls):
"""Called after the given class is instrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def class_uninstrument(self, cls):
"""Called before the given class is uninstrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def attribute_instrument(self, cls, key, inst):
"""Called when an attribute is instrumented."""
class _InstrumentationEventsHold(object):
"""temporary marker object used to transfer from _accept_with() to
_listen() on the InstrumentationEvents class.
"""
def __init__(self, class_):
self.class_ = class_
dispatch = event.dispatcher(InstrumentationEvents)
class InstanceEvents(event.Events):
"""Define events specific to object lifecycle.
e.g.::
from sqlalchemy import event
def my_load_listener(target, context):
print("on load!")
event.listen(SomeClass, 'load', my_load_listener)
Available targets include:
* mapped classes
* unmapped superclasses of mapped or to-be-mapped classes
(using the ``propagate=True`` flag)
* :class:`_orm.Mapper` objects
* the :class:`_orm.Mapper` class itself and the :func:`.mapper`
function indicate listening for all mappers.
Instance events are closely related to mapper events, but
are more specific to the instance and its instrumentation,
rather than its system of persistence.
When using :class:`.InstanceEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting classes as well as the
class which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param restore_load_context=False: Applies to the
:meth:`.InstanceEvents.load` and :meth:`.InstanceEvents.refresh`
events. Restores the loader context of the object when the event
hook is complete, so that ongoing eager load operations continue
to target the object appropriately. A warning is emitted if the
object is moved to a new loader context from within one of these
events if this flag is not set.
.. versionadded:: 1.3.14
"""
_target_class_doc = "SomeClass"
_dispatch_target = instrumentation.ClassManager
@classmethod
def _new_classmanager_instance(cls, class_, classmanager):
_InstanceEventsHold.populate(class_, classmanager)
@classmethod
@util.dependencies("sqlalchemy.orm")
def _accept_with(cls, orm, target):
if isinstance(target, instrumentation.ClassManager):
return target
elif isinstance(target, mapperlib.Mapper):
return target.class_manager
elif target is orm.mapper:
return instrumentation.ClassManager
elif isinstance(target, type):
if issubclass(target, mapperlib.Mapper):
return instrumentation.ClassManager
else:
manager = instrumentation.manager_of_class(target)
if manager:
return manager
else:
return _InstanceEventsHold(target)
return None
@classmethod
def _listen(
cls,
event_key,
raw=False,
propagate=False,
restore_load_context=False,
**kw
):
target, fn = (event_key.dispatch_target, event_key._listen_fn)
if not raw or restore_load_context:
def wrap(state, *arg, **kw):
if not raw:
target = state.obj()
else:
target = state
if restore_load_context:
runid = state.runid
try:
return fn(target, *arg, **kw)
finally:
if restore_load_context:
state.runid = runid
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(propagate=propagate, **kw)
if propagate:
for mgr in target.subclass_managers(True):
event_key.with_dispatch_target(mgr).base_listen(propagate=True)
@classmethod
def _clear(cls):
super(InstanceEvents, cls)._clear()
_InstanceEventsHold._clear()
def first_init(self, manager, cls):
"""Called when the first instance of a particular mapping is called.
This event is called when the ``__init__`` method of a class
is called the first time for that particular class. The event
invokes before ``__init__`` actually proceeds as well as before
the :meth:`.InstanceEvents.init` event is invoked.
"""
def init(self, target, args, kwargs):
"""Receive an instance when its constructor is called.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is
loaded from the database; see the :meth:`.InstanceEvents.load`
event in order to intercept a database load.
The event is called before the actual ``__init__`` constructor
of the object is called. The ``kwargs`` dictionary may be
modified in-place in order to affect what is passed to
``__init__``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments passed to the ``__init__`` method.
This is passed as a tuple and is currently immutable.
:param kwargs: keyword arguments passed to the ``__init__`` method.
This structure *can* be altered in place.
.. seealso::
:meth:`.InstanceEvents.init_failure`
:meth:`.InstanceEvents.load`
"""
def init_failure(self, target, args, kwargs):
"""Receive an instance when its constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is loaded
from the database.
The event is invoked after an exception raised by the ``__init__``
method is caught. After the event
is invoked, the original exception is re-raised outwards, so that
the construction of the object still raises an exception. The
actual exception and stack trace raised should be present in
``sys.exc_info()``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments that were passed to the ``__init__``
method.
:param kwargs: keyword arguments that were passed to the ``__init__``
method.
.. seealso::
:meth:`.InstanceEvents.init`
:meth:`.InstanceEvents.load`
"""
def load(self, target, context):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
.. warning::
During a result-row load, this event is invoked when the
first row received for this instance is processed. When using
eager loading with collection-oriented attributes, the additional
rows that are to be loaded / processed in order to load subsequent
collection items have not occurred yet. This has the effect
both that collections will not be fully loaded, as well as that
if an operation occurs within this event handler that emits
another database load operation for the object, the "loading
context" for the object can change and interfere with the
existing eager loaders still in progress.
Examples of what can cause the "loading context" to change within
the event handler include, but are not necessarily limited to:
* accessing deferred attributes that weren't part of the row,
will trigger an "undefer" operation and refresh the object
* accessing attributes on a joined-inheritance subclass that
weren't part of the row, will trigger a refresh operation.
As of SQLAlchemy 1.3.14, a warning is emitted when this occurs. The
:paramref:`.InstanceEvents.restore_load_context` option may be
used on the event to prevent this warning; this will ensure that
the existing loading context is maintained for the object after the
event is called::
@event.listens_for(
SomeClass, "load", restore_load_context=True)
def on_load(instance, context):
instance.some_unloaded_attribute
.. versionchanged:: 1.3.14 Added
:paramref:`.InstanceEvents.restore_load_context`
and :paramref:`.SessionEvents.restore_load_context` flags which
apply to "on load" events, which will ensure that the loading
context for an object is restored when the event hook is
complete; a warning is emitted if the load context of the object
changes without this flag being set.
The :meth:`.InstanceEvents.load` event is also available in a
class-method decorator format called :func:`_orm.reconstructor`.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`_query.Query` in progress. This argument may be
``None`` if the load does not correspond to a :class:`_query.Query`,
such as during :meth:`.Session.merge`.
.. seealso::
:meth:`.InstanceEvents.init`
:meth:`.InstanceEvents.refresh`
:meth:`.SessionEvents.loaded_as_persistent`
:ref:`mapping_constructors`
"""
def refresh(self, target, context, attrs):
"""Receive an object instance after one or more attributes have
been refreshed from a query.
Contrast this to the :meth:`.InstanceEvents.load` method, which
is invoked when the object is first loaded from a query.
.. note:: This event is invoked within the loader process before
eager loaders may have been completed, and the object's state may
not be complete. Additionally, invoking row-level refresh
operations on the object will place the object into a new loader
context, interfering with the existing load context. See the note
on :meth:`.InstanceEvents.load` for background on making use of the
:paramref:`.InstanceEvents.restore_load_context` parameter, in
order to resolve this scenario.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`_query.Query` in progress.
:param attrs: sequence of attribute names which
were populated, or None if all column-mapped, non-deferred
attributes were populated.
.. seealso::
:meth:`.InstanceEvents.load`
"""
def refresh_flush(self, target, flush_context, attrs):
"""Receive an object instance after one or more attributes that
contain a column-level default or onupdate handler have been refreshed
during persistence of the object's state.
This event is the same as :meth:`.InstanceEvents.refresh` except
it is invoked within the unit of work flush process, and includes
only non-primary-key columns that have column level default or
onupdate handlers, including Python callables as well as server side
defaults and triggers which may be fetched via the RETURNING clause.
.. note::
While the :meth:`.InstanceEvents.refresh_flush` event is triggered
for an object that was INSERTed as well as for an object that was
UPDATEd, the event is geared primarily towards the UPDATE process;
it is mostly an internal artifact that INSERT actions can also
trigger this event, and note that **primary key columns for an
INSERTed row are explicitly omitted** from this event. In order to
intercept the newly INSERTed state of an object, the
:meth:`.SessionEvents.pending_to_persistent` and
:meth:`.MapperEvents.after_insert` are better choices.
.. versionadded:: 1.0.5
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
:param attrs: sequence of attribute names which
were populated.
.. seealso::
:ref:`orm_server_defaults`
:ref:`metadata_defaults_toplevel`
"""
def expire(self, target, attrs):
"""Receive an object instance after its attributes or some subset
have been expired.
'keys' is a list of attribute names. If None, the entire
state was expired.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param attrs: sequence of attribute
names which were expired, or None if all attributes were
expired.
"""
def pickle(self, target, state_dict):
"""Receive an object instance when its associated state is
being pickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary returned by
:class:`.InstanceState.__getstate__`, containing the state
to be pickled.
"""
def unpickle(self, target, state_dict):
"""Receive an object instance after its associated state has
been unpickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary sent to
:class:`.InstanceState.__setstate__`, containing the state
dictionary which was pickled.
"""
class _EventsHold(event.RefCollection):
"""Hold onto listeners against unmapped, uninstrumented classes.
Establish _listen() for that class' mapper/instrumentation when
those objects are created for that class.
"""
def __init__(self, class_):
self.class_ = class_
@classmethod
def _clear(cls):
cls.all_holds.clear()
class HoldEvents(object):
_dispatch_target = None
@classmethod
def _listen(
cls, event_key, raw=False, propagate=False, retval=False, **kw
):
target = event_key.dispatch_target
if target.class_ in target.all_holds:
collection = target.all_holds[target.class_]
else:
collection = target.all_holds[target.class_] = {}
event.registry._stored_in_collection(event_key, target)
collection[event_key._key] = (event_key, raw, propagate, retval)
if propagate:
stack = list(target.class_.__subclasses__())
while stack:
subclass = stack.pop(0)
stack.extend(subclass.__subclasses__())
subject = target.resolve(subclass)
if subject is not None:
# we are already going through __subclasses__()
# so leave generic propagate flag False
event_key.with_dispatch_target(subject).listen(
raw=raw, propagate=False, retval=retval, **kw
)
def remove(self, event_key):
target = event_key.dispatch_target
if isinstance(target, _EventsHold):
collection = target.all_holds[target.class_]
del collection[event_key._key]
@classmethod
def populate(cls, class_, subject):
for subclass in class_.__mro__:
if subclass in cls.all_holds:
collection = cls.all_holds[subclass]
for event_key, raw, propagate, retval in collection.values():
if propagate or subclass is class_:
# since we can't be sure in what order different
# classes in a hierarchy are triggered with
# populate(), we rely upon _EventsHold for all event
# assignment, instead of using the generic propagate
# flag.
event_key.with_dispatch_target(subject).listen(
raw=raw, propagate=False, retval=retval
)
class _InstanceEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return instrumentation.manager_of_class(class_)
class HoldInstanceEvents(_EventsHold.HoldEvents, InstanceEvents):
pass
dispatch = event.dispatcher(HoldInstanceEvents)
class MapperEvents(event.Events):
"""Define events specific to mappings.
e.g.::
from sqlalchemy import event
def my_before_insert_listener(mapper, connection, target):
# execute a stored procedure upon INSERT,
# apply the value to the row to be inserted
target.calculated_value = connection.scalar(
"select my_special_function(%d)"
% target.special_number)
# associate the listener function with SomeClass,
# to execute during the "before_insert" hook
event.listen(
SomeClass, 'before_insert', my_before_insert_listener)
Available targets include:
* mapped classes
* unmapped superclasses of mapped or to-be-mapped classes
(using the ``propagate=True`` flag)
* :class:`_orm.Mapper` objects
* the :class:`_orm.Mapper` class itself and the :func:`.mapper`
function indicate listening for all mappers.
Mapper events provide hooks into critical sections of the
mapper, including those related to object instrumentation,
object loading, and object persistence. In particular, the
persistence methods :meth:`~.MapperEvents.before_insert`,
and :meth:`~.MapperEvents.before_update` are popular
places to augment the state being persisted - however, these
methods operate with several significant restrictions. The
user is encouraged to evaluate the
:meth:`.SessionEvents.before_flush` and
:meth:`.SessionEvents.after_flush` methods as more
flexible and user-friendly hooks in which to apply
additional database state during a flush.
When using :class:`.MapperEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting mappers and/or the mappers of
inheriting classes, as well as any
mapper which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event function
must have a return value, the purpose of which is either to
control subsequent event propagation, or to otherwise alter
the operation in progress by the mapper. Possible return
values are:
* ``sqlalchemy.orm.interfaces.EXT_CONTINUE`` - continue event
processing normally.
* ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent
event handlers in the chain.
* other values - the return value specified by specific listeners.
"""
_target_class_doc = "SomeClass"
_dispatch_target = mapperlib.Mapper
@classmethod
def _new_mapper_instance(cls, class_, mapper):
_MapperEventsHold.populate(class_, mapper)
@classmethod
@util.dependencies("sqlalchemy.orm")
def _accept_with(cls, orm, target):
if target is orm.mapper:
return mapperlib.Mapper
elif isinstance(target, type):
if issubclass(target, mapperlib.Mapper):
return target
else:
mapper = _mapper_or_none(target)
if mapper is not None:
return mapper
else:
return _MapperEventsHold(target)
else:
return target
@classmethod
def _listen(
cls, event_key, raw=False, retval=False, propagate=False, **kw
):
target, identifier, fn = (
event_key.dispatch_target,
event_key.identifier,
event_key._listen_fn,
)
if (
identifier in ("before_configured", "after_configured")
and target is not mapperlib.Mapper
):
util.warn(
"'before_configured' and 'after_configured' ORM events "
"only invoke with the mapper() function or Mapper class "
"as the target."
)
if not raw or not retval:
if not raw:
meth = getattr(cls, identifier)
try:
target_index = (
inspect_getfullargspec(meth)[0].index("target") - 1
)
except ValueError:
target_index = None
def wrap(*arg, **kw):
if not raw and target_index is not None:
arg = list(arg)
arg[target_index] = arg[target_index].obj()
if not retval:
fn(*arg, **kw)
return interfaces.EXT_CONTINUE
else:
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
if propagate:
for mapper in target.self_and_descendants:
event_key.with_dispatch_target(mapper).base_listen(
propagate=True, **kw
)
else:
event_key.base_listen(**kw)
@classmethod
def _clear(cls):
super(MapperEvents, cls)._clear()
_MapperEventsHold._clear()
def instrument_class(self, mapper, class_):
r"""Receive a class when the mapper is first constructed,
before instrumentation is applied to the mapped class.
This event is the earliest phase of mapper construction.
Most attributes of the mapper are not yet initialized.
This listener can either be applied to the :class:`_orm.Mapper`
class overall, or to any un-mapped class which serves as a base
for classes that will be mapped (using the ``propagate=True`` flag)::
Base = declarative_base()
@event.listens_for(Base, "instrument_class", propagate=True)
def on_new_class(mapper, cls_):
" ... "
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param class\_: the mapped class.
"""
def before_mapper_configured(self, mapper, class_):
"""Called right before a specific mapper is to be configured.
This event is intended to allow a specific mapper to be skipped during
the configure step, by returning the :attr:`.orm.interfaces.EXT_SKIP`
symbol which indicates to the :func:`.configure_mappers` call that this
particular mapper (or hierarchy of mappers, if ``propagate=True`` is
used) should be skipped in the current configuration run. When one or
more mappers are skipped, the he "new mappers" flag will remain set,
meaning the :func:`.configure_mappers` function will continue to be
called when mappers are used, to continue to try to configure all
available mappers.
In comparison to the other configure-level events,
:meth:`.MapperEvents.before_configured`,
:meth:`.MapperEvents.after_configured`, and
:meth:`.MapperEvents.mapper_configured`, the
:meth;`.MapperEvents.before_mapper_configured` event provides for a
meaningful return value when it is registered with the ``retval=True``
parameter.
.. versionadded:: 1.3
e.g.::
from sqlalchemy.orm import EXT_SKIP
Base = declarative_base()
DontConfigureBase = declarative_base()
@event.listens_for(
DontConfigureBase,
"before_mapper_configured", retval=True, propagate=True)
def dont_configure(mapper, cls):
return EXT_SKIP
.. seealso::
:meth:`.MapperEvents.before_configured`
:meth:`.MapperEvents.after_configured`
:meth:`.MapperEvents.mapper_configured`
"""
def mapper_configured(self, mapper, class_):
r"""Called when a specific mapper has completed its own configuration
within the scope of the :func:`.configure_mappers` call.
The :meth:`.MapperEvents.mapper_configured` event is invoked
for each mapper that is encountered when the
:func:`_orm.configure_mappers` function proceeds through the current
list of not-yet-configured mappers.
:func:`_orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
When the event is called, the mapper should be in its final
state, but **not including backrefs** that may be invoked from
other mappers; they might still be pending within the
configuration operation. Bidirectional relationships that
are instead configured via the
:paramref:`.orm.relationship.back_populates` argument
*will* be fully available, since this style of relationship does not
rely upon other possibly-not-configured mappers to know that they
exist.
For an event that is guaranteed to have **all** mappers ready
to go including backrefs that are defined only on other
mappings, use the :meth:`.MapperEvents.after_configured`
event; this event invokes only after all known mappings have been
fully configured.
The :meth:`.MapperEvents.mapper_configured` event, unlike
:meth:`.MapperEvents.before_configured` or
:meth:`.MapperEvents.after_configured`,
is called for each mapper/class individually, and the mapper is
passed to the event itself. It also is called exactly once for
a particular mapper. The event is therefore useful for
configurational steps that benefit from being invoked just once
on a specific mapper basis, which don't require that "backref"
configurations are necessarily ready yet.
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param class\_: the mapped class.
.. seealso::
:meth:`.MapperEvents.before_configured`
:meth:`.MapperEvents.after_configured`
:meth:`.MapperEvents.before_mapper_configured`
"""
# TODO: need coverage for this event
def before_configured(self):
"""Called before a series of mappers have been configured.
The :meth:`.MapperEvents.before_configured` event is invoked
each time the :func:`_orm.configure_mappers` function is
invoked, before the function has done any of its work.
:func:`_orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
This event can **only** be applied to the :class:`_orm.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
mapped classes. It is only invoked for all mappings as a whole::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "before_configured")
def go():
# ...
Contrast this event to :meth:`.MapperEvents.after_configured`,
which is invoked after the series of mappers has been configured,
as well as :meth:`.MapperEvents.before_mapper_configured`
and :meth:`.MapperEvents.mapper_configured`, which are both invoked
on a per-mapper basis.
Theoretically this event is called once per
application, but is actually called any time new mappers
are to be affected by a :func:`_orm.configure_mappers`
call. If new mappings are constructed after existing ones have
already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "before_configured", once=True)
def go():
# ...
.. versionadded:: 0.9.3
.. seealso::
:meth:`.MapperEvents.before_mapper_configured`
:meth:`.MapperEvents.mapper_configured`
:meth:`.MapperEvents.after_configured`
"""
def after_configured(self):
"""Called after a series of mappers have been configured.
The :meth:`.MapperEvents.after_configured` event is invoked
each time the :func:`_orm.configure_mappers` function is
invoked, after the function has completed its work.
:func:`_orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
Contrast this event to the :meth:`.MapperEvents.mapper_configured`
event, which is called on a per-mapper basis while the configuration
operation proceeds; unlike that event, when this event is invoked,
all cross-configurations (e.g. backrefs) will also have been made
available for any mappers that were pending.
Also contrast to :meth:`.MapperEvents.before_configured`,
which is invoked before the series of mappers has been configured.
This event can **only** be applied to the :class:`_orm.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
mapped classes. It is only invoked for all mappings as a whole::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "after_configured")
def go():
# ...
Theoretically this event is called once per
application, but is actually called any time new mappers
have been affected by a :func:`_orm.configure_mappers`
call. If new mappings are constructed after existing ones have
already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "after_configured", once=True)
def go():
# ...
.. seealso::
:meth:`.MapperEvents.before_mapper_configured`
:meth:`.MapperEvents.mapper_configured`
:meth:`.MapperEvents.before_configured`
"""
def before_insert(self, mapper, connection, target):
"""Receive an object instance before an INSERT statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class before their INSERT statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`_engine.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param connection: the :class:`_engine.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_insert(self, mapper, connection, target):
"""Receive an object instance after an INSERT statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class after their INSERT statements have been
emitted at once in a previous step. In the extremely
rare case that this is not desirable, the
:func:`.mapper` can be configured with ``batch=False``,
which will cause batches of instances to be broken up
into individual (and more poorly performing)
event->persist->event steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`_engine.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param connection: the :class:`_engine.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def before_update(self, mapper, connection, target):
"""Receive an object instance before an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.before_update` is
*not* a guarantee that an UPDATE statement will be
issued, although you can affect the outcome here by
modifying attributes so that a net change in value does
exist.
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class before their UPDATE statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`_engine.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param connection: the :class:`_engine.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_update(self, mapper, connection, target):
"""Receive an object instance after an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*, and for which
no UPDATE statement has proceeded. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.after_update` is
*not* a guarantee that an UPDATE statement has been
issued.
To detect if the column-based attributes on the object have net
changes, and therefore resulted in an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class after their UPDATE statements have been emitted at
once in a previous step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`_engine.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param connection: the :class:`_engine.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def before_delete(self, mapper, connection, target):
"""Receive an object instance before a DELETE statement
is emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class before their DELETE statements are emitted at
once in a later step.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`_engine.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param connection: the :class:`_engine.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_delete(self, mapper, connection, target):
"""Receive an object instance after a DELETE statement
has been emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class after their DELETE statements have been emitted at
once in a previous step.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`_engine.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param connection: the :class:`_engine.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
class _MapperEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return _mapper_or_none(class_)
class HoldMapperEvents(_EventsHold.HoldEvents, MapperEvents):
pass
dispatch = event.dispatcher(HoldMapperEvents)
_sessionevents_lifecycle_event_names = set()
class SessionEvents(event.Events):
"""Define events specific to :class:`.Session` lifecycle.
e.g.::
from sqlalchemy import event
from sqlalchemy.orm import sessionmaker
def my_before_commit(session):
print("before commit!")
Session = sessionmaker()
event.listen(Session, "before_commit", my_before_commit)
The :func:`~.event.listen` function will accept
:class:`.Session` objects as well as the return result
of :class:`~.sessionmaker()` and :class:`~.scoped_session()`.
Additionally, it accepts the :class:`.Session` class which
will apply listeners to all :class:`.Session` instances
globally.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions that work on individual
objects will be the instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
.. versionadded:: 1.3.14
:param restore_load_context=False: Applies to the
:meth:`.SessionEvents.loaded_as_persistent` event. Restores the loader
context of the object when the event hook is complete, so that ongoing
eager load operations continue to target the object appropriately. A
warning is emitted if the object is moved to a new loader context from
within this event if this flag is not set.
.. versionadded:: 1.3.14
"""
_target_class_doc = "SomeSessionOrFactory"
_dispatch_target = Session
def _lifecycle_event(fn):
_sessionevents_lifecycle_event_names.add(fn.__name__)
return fn
@classmethod
def _accept_with(cls, target):
if isinstance(target, scoped_session):
target = target.session_factory
if not isinstance(target, sessionmaker) and (
not isinstance(target, type) or not issubclass(target, Session)
):
raise exc.ArgumentError(
"Session event listen on a scoped_session "
"requires that its creation callable "
"is associated with the Session class."
)
if isinstance(target, sessionmaker):
return target.class_
elif isinstance(target, type):
if issubclass(target, scoped_session):
return Session
elif issubclass(target, Session):
return target
elif isinstance(target, Session):
return target
else:
return None
@classmethod
def _listen(cls, event_key, raw=False, restore_load_context=False, **kw):
is_instance_event = (
event_key.identifier in _sessionevents_lifecycle_event_names
)
if is_instance_event:
if not raw or restore_load_context:
fn = event_key._listen_fn
def wrap(session, state, *arg, **kw):
if not raw:
target = state.obj()
if target is None:
# existing behavior is that if the object is
# garbage collected, no event is emitted
return
else:
target = state
if restore_load_context:
runid = state.runid
try:
return fn(session, target, *arg, **kw)
finally:
if restore_load_context:
state.runid = runid
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(**kw)
def after_transaction_create(self, session, transaction):
"""Execute when a new :class:`.SessionTransaction` is created.
This event differs from :meth:`~.SessionEvents.after_begin`
in that it occurs for each :class:`.SessionTransaction`
overall, as opposed to when transactions are begun
on individual database connections. It is also invoked
for nested transactions and subtransactions, and is always
matched by a corresponding
:meth:`~.SessionEvents.after_transaction_end` event
(assuming normal operation of the :class:`.Session`).
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
To detect if this is the outermost
:class:`.SessionTransaction`, as opposed to a "subtransaction" or a
SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
is ``None``::
@event.listens_for(session, "after_transaction_create")
def after_transaction_create(session, transaction):
if transaction.parent is None:
# work with top-level transaction
To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
:attr:`.SessionTransaction.nested` attribute::
@event.listens_for(session, "after_transaction_create")
def after_transaction_create(session, transaction):
if transaction.nested:
# work with SAVEPOINT transaction
.. seealso::
:class:`.SessionTransaction`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_transaction_end(self, session, transaction):
"""Execute when the span of a :class:`.SessionTransaction` ends.
This event differs from :meth:`~.SessionEvents.after_commit`
in that it corresponds to all :class:`.SessionTransaction`
objects in use, including those for nested transactions
and subtransactions, and is always matched by a corresponding
:meth:`~.SessionEvents.after_transaction_create` event.
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
To detect if this is the outermost
:class:`.SessionTransaction`, as opposed to a "subtransaction" or a
SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
is ``None``::
@event.listens_for(session, "after_transaction_create")
def after_transaction_end(session, transaction):
if transaction.parent is None:
# work with top-level transaction
To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
:attr:`.SessionTransaction.nested` attribute::
@event.listens_for(session, "after_transaction_create")
def after_transaction_end(session, transaction):
if transaction.nested:
# work with SAVEPOINT transaction
.. seealso::
:class:`.SessionTransaction`
:meth:`~.SessionEvents.after_transaction_create`
"""
def before_commit(self, session):
"""Execute before commit is called.
.. note::
The :meth:`~.SessionEvents.before_commit` hook is *not* per-flush,
that is, the :class:`.Session` can emit SQL to the database
many times within the scope of a transaction.
For interception of these events, use the
:meth:`~.SessionEvents.before_flush`,
:meth:`~.SessionEvents.after_flush`, or
:meth:`~.SessionEvents.after_flush_postexec`
events.
:param session: The target :class:`.Session`.
.. seealso::
:meth:`~.SessionEvents.after_commit`
:meth:`~.SessionEvents.after_begin`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_commit(self, session):
"""Execute after a commit has occurred.
.. note::
The :meth:`~.SessionEvents.after_commit` hook is *not* per-flush,
that is, the :class:`.Session` can emit SQL to the database
many times within the scope of a transaction.
For interception of these events, use the
:meth:`~.SessionEvents.before_flush`,
:meth:`~.SessionEvents.after_flush`, or
:meth:`~.SessionEvents.after_flush_postexec`
events.
.. note::
The :class:`.Session` is not in an active transaction
when the :meth:`~.SessionEvents.after_commit` event is invoked,
and therefore can not emit SQL. To emit SQL corresponding to
every transaction, use the :meth:`~.SessionEvents.before_commit`
event.
:param session: The target :class:`.Session`.
.. seealso::
:meth:`~.SessionEvents.before_commit`
:meth:`~.SessionEvents.after_begin`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_rollback(self, session):
"""Execute after a real DBAPI rollback has occurred.
Note that this event only fires when the *actual* rollback against
the database occurs - it does *not* fire each time the
:meth:`.Session.rollback` method is called, if the underlying
DBAPI transaction has already been rolled back. In many
cases, the :class:`.Session` will not be in
an "active" state during this event, as the current
transaction is not valid. To acquire a :class:`.Session`
which is active after the outermost rollback has proceeded,
use the :meth:`.SessionEvents.after_soft_rollback` event, checking the
:attr:`.Session.is_active` flag.
:param session: The target :class:`.Session`.
"""
def after_soft_rollback(self, session, previous_transaction):
"""Execute after any rollback has occurred, including "soft"
rollbacks that don't actually emit at the DBAPI level.
This corresponds to both nested and outer rollbacks, i.e.
the innermost rollback that calls the DBAPI's
rollback() method, as well as the enclosing rollback
calls that only pop themselves from the transaction stack.
The given :class:`.Session` can be used to invoke SQL and
:meth:`.Session.query` operations after an outermost rollback
by first checking the :attr:`.Session.is_active` flag::
@event.listens_for(Session, "after_soft_rollback")
def do_something(session, previous_transaction):
if session.is_active:
session.execute("select * from some_table")
:param session: The target :class:`.Session`.
:param previous_transaction: The :class:`.SessionTransaction`
transactional marker object which was just closed. The current
:class:`.SessionTransaction` for the given :class:`.Session` is
available via the :attr:`.Session.transaction` attribute.
"""
def before_flush(self, session, flush_context, instances):
"""Execute before flush process has started.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
:param instances: Usually ``None``, this is the collection of
objects which can be passed to the :meth:`.Session.flush` method
(note this usage is deprecated).
.. seealso::
:meth:`~.SessionEvents.after_flush`
:meth:`~.SessionEvents.after_flush_postexec`
:ref:`session_persistence_events`
"""
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes.
.. warning:: This event runs after the :class:`.Session` has emitted
SQL to modify the database, but **before** it has altered its
internal state to reflect those changes, including that newly
inserted objects are placed into the identity map. ORM operations
emitted within this event such as loads of related items
may produce new identity map entries that will immediately
be replaced, sometimes causing confusing results. SQLAlchemy will
emit a warning for this condition as of version 1.3.9.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
.. seealso::
:meth:`~.SessionEvents.before_flush`
:meth:`~.SessionEvents.after_flush_postexec`
:ref:`session_persistence_events`
"""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
.. seealso::
:meth:`~.SessionEvents.before_flush`
:meth:`~.SessionEvents.after_flush`
:ref:`session_persistence_events`
"""
def after_begin(self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
:param session: The target :class:`.Session`.
:param transaction: The :class:`.SessionTransaction`.
:param connection: The :class:`_engine.Connection` object
which will be used for SQL statements.
.. seealso::
:meth:`~.SessionEvents.before_commit`
:meth:`~.SessionEvents.after_commit`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
@_lifecycle_event
def before_attach(self, session, instance):
"""Execute before an instance is attached to a session.
This is called before an add, delete or merge causes
the object to be part of the session.
.. seealso::
:meth:`~.SessionEvents.after_attach`
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge.
.. note::
As of 0.8, this event fires off *after* the item
has been fully associated with the session, which is
different than previous releases. For event
handlers that require the object not yet
be part of session state (such as handlers which
may autoflush while the target object is not
yet complete) consider the
new :meth:`.before_attach` event.
.. seealso::
:meth:`~.SessionEvents.before_attach`
:ref:`session_lifecycle_events`
"""
@event._legacy_signature(
"0.9",
["session", "query", "query_context", "result"],
lambda update_context: (
update_context.session,
update_context.query,
update_context.context,
update_context.result,
),
)
def after_bulk_update(self, update_context):
"""Execute after a bulk update operation to the session.
This is called as a result of the :meth:`_query.Query.update` method.
:param update_context: an "update context" object which contains
details about the update, including these attributes:
* ``session`` - the :class:`.Session` involved
* ``query`` -the :class:`_query.Query`
object that this update operation
was called upon.
* ``values`` The "values" dictionary that was passed to
:meth:`_query.Query.update`.
* ``context`` The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
* ``result`` the :class:`_engine.ResultProxy`
returned as a result of the
bulk UPDATE operation.
.. seealso::
:meth:`.QueryEvents.before_compile_update`
:meth:`.SessionEvents.after_bulk_delete`
"""
@event._legacy_signature(
"0.9",
["session", "query", "query_context", "result"],
lambda delete_context: (
delete_context.session,
delete_context.query,
delete_context.context,
delete_context.result,
),
)
def after_bulk_delete(self, delete_context):
"""Execute after a bulk delete operation to the session.
This is called as a result of the :meth:`_query.Query.delete` method.
:param delete_context: a "delete context" object which contains
details about the update, including these attributes:
* ``session`` - the :class:`.Session` involved
* ``query`` -the :class:`_query.Query`
object that this update operation
was called upon.
* ``context`` The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
* ``result`` the :class:`_engine.ResultProxy`
returned as a result of the
bulk DELETE operation.
.. seealso::
:meth:`.QueryEvents.before_compile_delete`
:meth:`.SessionEvents.after_bulk_update`
"""
@_lifecycle_event
def transient_to_pending(self, session, instance):
"""Intercept the "transient to pending" transition for a specific object.
This event is a specialization of the
:meth:`.SessionEvents.after_attach` event which is only invoked
for this specific transition. It is invoked typically during the
:meth:`.Session.add` call.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def pending_to_transient(self, session, instance):
"""Intercept the "pending to transient" transition for a specific object.
This less common transition occurs when an pending object that has
not been flushed is evicted from the session; this can occur
when the :meth:`.Session.rollback` method rolls back the transaction,
or when the :meth:`.Session.expunge` method is used.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def persistent_to_transient(self, session, instance):
"""Intercept the "persistent to transient" transition for a specific object.
This less common transition occurs when an pending object that has
has been flushed is evicted from the session; this can occur
when the :meth:`.Session.rollback` method rolls back the transaction.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def pending_to_persistent(self, session, instance):
"""Intercept the "pending to persistent"" transition for a specific object.
This event is invoked within the flush process, and is
similar to scanning the :attr:`.Session.new` collection within
the :meth:`.SessionEvents.after_flush` event. However, in this
case the object has already been moved to the persistent state
when the event is called.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def detached_to_persistent(self, session, instance):
"""Intercept the "detached to persistent" transition for a specific object.
This event is a specialization of the
:meth:`.SessionEvents.after_attach` event which is only invoked
for this specific transition. It is invoked typically during the
:meth:`.Session.add` call, as well as during the
:meth:`.Session.delete` call if the object was not previously
associated with the
:class:`.Session` (note that an object marked as "deleted" remains
in the "persistent" state until the flush proceeds).
.. note::
If the object becomes persistent as part of a call to
:meth:`.Session.delete`, the object is **not** yet marked as
deleted when this event is called. To detect deleted objects,
check the ``deleted`` flag sent to the
:meth:`.SessionEvents.persistent_to_detached` to event after the
flush proceeds, or check the :attr:`.Session.deleted` collection
within the :meth:`.SessionEvents.before_flush` event if deleted
objects need to be intercepted before the flush.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def loaded_as_persistent(self, session, instance):
"""Intercept the "loaded as persistent" transition for a specific object.
This event is invoked within the ORM loading process, and is invoked
very similarly to the :meth:`.InstanceEvents.load` event. However,
the event here is linkable to a :class:`.Session` class or instance,
rather than to a mapper or class hierarchy, and integrates
with the other session lifecycle events smoothly. The object
is guaranteed to be present in the session's identity map when
this event is called.
.. note:: This event is invoked within the loader process before
eager loaders may have been completed, and the object's state may
not be complete. Additionally, invoking row-level refresh
operations on the object will place the object into a new loader
context, interfering with the existing load context. See the note
on :meth:`.InstanceEvents.load` for background on making use of the
:paramref:`.SessionEvents.restore_load_context` parameter, which
works in the same manner as that of
:paramref:`.InstanceEvents.restore_load_context`, in order to
resolve this scenario.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def persistent_to_deleted(self, session, instance):
"""Intercept the "persistent to deleted" transition for a specific object.
This event is invoked when a persistent object's identity
is deleted from the database within a flush, however the object
still remains associated with the :class:`.Session` until the
transaction completes.
If the transaction is rolled back, the object moves again
to the persistent state, and the
:meth:`.SessionEvents.deleted_to_persistent` event is called.
If the transaction is committed, the object becomes detached,
which will emit the :meth:`.SessionEvents.deleted_to_detached`
event.
Note that while the :meth:`.Session.delete` method is the primary
public interface to mark an object as deleted, many objects
get deleted due to cascade rules, which are not always determined
until flush time. Therefore, there's no way to catch
every object that will be deleted until the flush has proceeded.
the :meth:`.SessionEvents.persistent_to_deleted` event is therefore
invoked at the end of a flush.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def deleted_to_persistent(self, session, instance):
"""Intercept the "deleted to persistent" transition for a specific object.
This transition occurs only when an object that's been deleted
successfully in a flush is restored due to a call to
:meth:`.Session.rollback`. The event is not called under
any other circumstances.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def deleted_to_detached(self, session, instance):
"""Intercept the "deleted to detached" transition for a specific object.
This event is invoked when a deleted object is evicted
from the session. The typical case when this occurs is when
the transaction for a :class:`.Session` in which the object
was deleted is committed; the object moves from the deleted
state to the detached state.
It is also invoked for objects that were deleted in a flush
when the :meth:`.Session.expunge_all` or :meth:`.Session.close`
events are called, as well as if the object is individually
expunged from its deleted state via :meth:`.Session.expunge`.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def persistent_to_detached(self, session, instance):
"""Intercept the "persistent to detached" transition for a specific object.
This event is invoked when a persistent object is evicted
from the session. There are many conditions that cause this
to happen, including:
* using a method such as :meth:`.Session.expunge`
or :meth:`.Session.close`
* Calling the :meth:`.Session.rollback` method, when the object
was part of an INSERT statement for that session's transaction
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
:param deleted: boolean. If True, indicates this object moved
to the detached state because it was marked as deleted and flushed.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
class AttributeEvents(event.Events):
r"""Define events for object attributes.
These are typically defined on the class-bound descriptor for the
target class.
e.g.::
from sqlalchemy import event
@event.listens_for(MyClass.collection, 'append', propagate=True)
def my_append_listener(target, value, initiator):
print("received append event for target: %s" % target)
Listeners have the option to return a possibly modified version of the
value, when the :paramref:`.AttributeEvents.retval` flag is passed to
:func:`.event.listen` or :func:`.event.listens_for`::
def validate_phone(target, value, oldvalue, initiator):
"Strip non-numeric characters from a phone number"
return re.sub(r'\D', '', value)
# setup listener on UserContact.phone attribute, instructing
# it to use the return value
listen(UserContact.phone, 'set', validate_phone, retval=True)
A validation function like the above can also raise an exception
such as :exc:`ValueError` to halt the operation.
The :paramref:`.AttributeEvents.propagate` flag is also important when
applying listeners to mapped classes that also have mapped subclasses,
as when using mapper inheritance patterns::
@event.listens_for(MySuperClass.attr, 'set', propagate=True)
def receive_set(target, value, initiator):
print("value set: %s" % target)
The full list of modifiers available to the :func:`.event.listen`
and :func:`.event.listens_for` functions are below.
:param active_history=False: When True, indicates that the
"set" event would like to receive the "old" value being
replaced unconditionally, even if this requires firing off
database loads. Note that ``active_history`` can also be
set directly via :func:`.column_property` and
:func:`_orm.relationship`.
:param propagate=False: When True, the listener function will
be established not just for the class attribute given, but
for attributes of the same name on all current subclasses
of that class, as well as all future subclasses of that
class, using an additional listener that listens for
instrumentation events.
:param raw=False: When True, the "target" argument to the
event will be the :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event
listening must return the "value" argument from the
function. This gives the listening function the opportunity
to change the value that is ultimately used for a "set"
or "append" event.
"""
_target_class_doc = "SomeClass.some_attribute"
_dispatch_target = QueryableAttribute
@staticmethod
def _set_dispatch(cls, dispatch_cls):
dispatch = event.Events._set_dispatch(cls, dispatch_cls)
dispatch_cls._active_history = False
return dispatch
@classmethod
def _accept_with(cls, target):
# TODO: coverage
if isinstance(target, interfaces.MapperProperty):
return getattr(target.parent.class_, target.key)
else:
return target
@classmethod
def _listen(
cls,
event_key,
active_history=False,
raw=False,
retval=False,
propagate=False,
):
target, fn = event_key.dispatch_target, event_key._listen_fn
if active_history:
target.dispatch._active_history = True
if not raw or not retval:
def wrap(target, *arg):
if not raw:
target = target.obj()
if not retval:
if arg:
value = arg[0]
else:
value = None
fn(target, *arg)
return value
else:
return fn(target, *arg)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(propagate=propagate)
if propagate:
manager = instrumentation.manager_of_class(target.class_)
for mgr in manager.subclass_managers(True):
event_key.with_dispatch_target(mgr[target.key]).base_listen(
propagate=True
)
if active_history:
mgr[target.key].dispatch._active_history = True
def append(self, target, value, initiator):
"""Receive a collection append event.
The append event is invoked for each element as it is appended
to the collection. This occurs for single-item appends as well
as for a "bulk replace" operation.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being appended. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation, as well as be inspected for information
about the source of the event.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
:meth:`.AttributeEvents.bulk_replace`
"""
def bulk_replace(self, target, values, initiator):
"""Receive a collection 'bulk replace' event.
This event is invoked for a sequence of values as they are incoming
to a bulk collection set operation, which can be
modified in place before the values are treated as ORM objects.
This is an "early hook" that runs before the bulk replace routine
attempts to reconcile which objects are already present in the
collection and which are being removed by the net replace operation.
It is typical that this method be combined with use of the
:meth:`.AttributeEvents.append` event. When using both of these
events, note that a bulk replace operation will invoke
the :meth:`.AttributeEvents.append` event for all new items,
even after :meth:`.AttributeEvents.bulk_replace` has been invoked
for the collection as a whole. In order to determine if an
:meth:`.AttributeEvents.append` event is part of a bulk replace,
use the symbol :attr:`~.attributes.OP_BULK_REPLACE` to test the
incoming initiator::
from sqlalchemy.orm.attributes import OP_BULK_REPLACE
@event.listens_for(SomeObject.collection, "bulk_replace")
def process_collection(target, values, initiator):
values[:] = [_make_value(value) for value in values]
@event.listens_for(SomeObject.collection, "append", retval=True)
def process_collection(target, value, initiator):
# make sure bulk_replace didn't already do it
if initiator is None or initiator.op is not OP_BULK_REPLACE:
return _make_value(value)
else:
return value
.. versionadded:: 1.2
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: a sequence (e.g. a list) of the values being set. The
handler can modify this list in place.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
"""
def remove(self, target, value, initiator):
"""Receive a collection remove event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being removed.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: No return value is defined for this event.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
"""
def set(self, target, value, oldvalue, initiator):
"""Receive a scalar set event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being set. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param oldvalue: the previous value being replaced. This
may also be the symbol ``NEVER_SET`` or ``NO_VALUE``.
If the listener is registered with ``active_history=True``,
the previous value of the attribute will be loaded from
the database if the existing value is currently unloaded
or expired.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
"""
def init_scalar(self, target, value, dict_):
r"""Receive a scalar "init" event.
This event is invoked when an uninitialized, unpersisted scalar
attribute is accessed, e.g. read::
x = my_object.some_attribute
The ORM's default behavior when this occurs for an un-initialized
attribute is to return the value ``None``; note this differs from
Python's usual behavior of raising ``AttributeError``. The
event here can be used to customize what value is actually returned,
with the assumption that the event listener would be mirroring
a default generator that is configured on the Core
:class:`_schema.Column`
object as well.
Since a default generator on a :class:`_schema.Column`
might also produce
a changing value such as a timestamp, the
:meth:`.AttributeEvents.init_scalar`
event handler can also be used to **set** the newly returned value, so
that a Core-level default generation function effectively fires off
only once, but at the moment the attribute is accessed on the
non-persisted object. Normally, no change to the object's state
is made when an uninitialized attribute is accessed (much older
SQLAlchemy versions did in fact change the object's state).
If a default generator on a column returned a particular constant,
a handler might be used as follows::
SOME_CONSTANT = 3.1415926
class MyClass(Base):
# ...
some_attribute = Column(Numeric, default=SOME_CONSTANT)
@event.listens_for(
MyClass.some_attribute, "init_scalar",
retval=True, propagate=True)
def _init_some_attribute(target, dict_, value):
dict_['some_attribute'] = SOME_CONSTANT
return SOME_CONSTANT
Above, we initialize the attribute ``MyClass.some_attribute`` to the
value of ``SOME_CONSTANT``. The above code includes the following
features:
* By setting the value ``SOME_CONSTANT`` in the given ``dict_``,
we indicate that this value is to be persisted to the database.
This supersedes the use of ``SOME_CONSTANT`` in the default generator
for the :class:`_schema.Column`. The ``active_column_defaults.py``
example given at :ref:`examples_instrumentation` illustrates using
the same approach for a changing default, e.g. a timestamp
generator. In this particular example, it is not strictly
necessary to do this since ``SOME_CONSTANT`` would be part of the
INSERT statement in either case.
* By establishing the ``retval=True`` flag, the value we return
from the function will be returned by the attribute getter.
Without this flag, the event is assumed to be a passive observer
and the return value of our function is ignored.
* The ``propagate=True`` flag is significant if the mapped class
includes inheriting subclasses, which would also make use of this
event listener. Without this flag, an inheriting subclass will
not use our event handler.
In the above example, the attribute set event
:meth:`.AttributeEvents.set` as well as the related validation feature
provided by :obj:`_orm.validates` is **not** invoked when we apply our
value to the given ``dict_``. To have these events to invoke in
response to our newly generated value, apply the value to the given
object as a normal attribute set operation::
SOME_CONSTANT = 3.1415926
@event.listens_for(
MyClass.some_attribute, "init_scalar",
retval=True, propagate=True)
def _init_some_attribute(target, dict_, value):
# will also fire off attribute set events
target.some_attribute = SOME_CONSTANT
return SOME_CONSTANT
When multiple listeners are set up, the generation of the value
is "chained" from one listener to the next by passing the value
returned by the previous listener that specifies ``retval=True``
as the ``value`` argument of the next listener.
.. versionadded:: 1.1
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value that is to be returned before this event
listener were invoked. This value begins as the value ``None``,
however will be the return value of the previous event handler
function if multiple listeners are present.
:param dict\_: the attribute dictionary of this mapped object.
This is normally the ``__dict__`` of the object, but in all cases
represents the destination that the attribute system uses to get
at the actual value of this attribute. Placing the value in this
dictionary has the effect that the value will be used in the
INSERT statement generated by the unit of work.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
:ref:`examples_instrumentation` - see the
``active_column_defaults.py`` example.
"""
def init_collection(self, target, collection, collection_adapter):
"""Receive a 'collection init' event.
This event is triggered for a collection-based attribute, when
the initial "empty collection" is first generated for a blank
attribute, as well as for when the collection is replaced with
a new one, such as via a set event.
E.g., given that ``User.addresses`` is a relationship-based
collection, the event is triggered here::
u1 = User()
u1.addresses.append(a1) # <- new collection
and also during replace operations::
u1.addresses = [a2, a3] # <- new collection
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param collection: the new collection. This will always be generated
from what was specified as
:paramref:`_orm.relationship.collection_class`, and will always
be empty.
:param collection_adapter: the :class:`.CollectionAdapter` that will
mediate internal access to the collection.
.. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` events supersede
the :class:`.orm.collection.linker` hook.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
"""
def dispose_collection(self, target, collection, collection_adapter):
"""Receive a 'collection dispose' event.
This event is triggered for a collection-based attribute when
a collection is replaced, that is::
u1.addresses.append(a1)
u1.addresses = [a2, a3] # <- old collection is disposed
The old collection received will contain its previous contents.
.. versionchanged:: 1.2 The collection passed to
:meth:`.AttributeEvents.dispose_collection` will now have its
contents before the dispose intact; previously, the collection
would be empty.
.. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` events supersede
the :class:`.collection.linker` hook.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
"""
def modified(self, target, initiator):
"""Receive a 'modified' event.
This event is triggered when the :func:`.attributes.flag_modified`
function is used to trigger a modify event on an attribute without
any specific value being set.
.. versionadded:: 1.2
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
"""
class QueryEvents(event.Events):
"""Represent events within the construction of a :class:`_query.Query`
object.
The events here are intended to be used with an as-yet-unreleased
inspection system for :class:`_query.Query`. Some very basic operations
are possible now, however the inspection system is intended to allow
complex query manipulations to be automated.
.. versionadded:: 1.0.0
"""
_target_class_doc = "SomeQuery"
_dispatch_target = Query
def before_compile(self, query):
"""Receive the :class:`_query.Query`
object before it is composed into a
core :class:`_expression.Select` object.
This event is intended to allow changes to the query given::
@event.listens_for(Query, "before_compile", retval=True)
def no_deleted(query):
for desc in query.column_descriptions:
if desc['type'] is User:
entity = desc['entity']
query = query.filter(entity.deleted == False)
return query
The event should normally be listened with the ``retval=True``
parameter set, so that the modified query may be returned.
The :meth:`.QueryEvents.before_compile` event by default
will disallow "baked" queries from caching a query, if the event
hook returns a new :class:`_query.Query` object.
This affects both direct
use of the baked query extension as well as its operation within
lazy loaders and eager loaders for relationships. In order to
re-establish the query being cached, apply the event adding the
``bake_ok`` flag::
@event.listens_for(
Query, "before_compile", retval=True, bake_ok=True)
def my_event(query):
for desc in query.column_descriptions:
if desc['type'] is User:
entity = desc['entity']
query = query.filter(entity.deleted == False)
return query
When ``bake_ok`` is set to True, the event hook will only be invoked
once, and not called for subsequent invocations of a particular query
that is being cached.
.. versionadded:: 1.3.11 - added the "bake_ok" flag to the
:meth:`.QueryEvents.before_compile` event and disallowed caching via
the "baked" extension from occurring for event handlers that
return a new :class:`_query.Query` object if this flag is not set.
.. seealso::
:meth:`.QueryEvents.before_compile_update`
:meth:`.QueryEvents.before_compile_delete`
:ref:`baked_with_before_compile`
"""
def before_compile_update(self, query, update_context):
"""Allow modifications to the :class:`_query.Query` object within
:meth:`_query.Query.update`.
Like the :meth:`.QueryEvents.before_compile` event, if the event
is to be used to alter the :class:`_query.Query` object, it should
be configured with ``retval=True``, and the modified
:class:`_query.Query` object returned, as in ::
@event.listens_for(Query, "before_compile_update", retval=True)
def no_deleted(query, update_context):
for desc in query.column_descriptions:
if desc['type'] is User:
entity = desc['entity']
query = query.filter(entity.deleted == False)
update_context.values['timestamp'] = datetime.utcnow()
return query
The ``.values`` dictionary of the "update context" object can also
be modified in place as illustrated above.
:param query: a :class:`_query.Query` instance; this is also
the ``.query`` attribute of the given "update context"
object.
:param update_context: an "update context" object which is
the same kind of object as described in
:paramref:`.QueryEvents.after_bulk_update.update_context`.
The object has a ``.values`` attribute in an UPDATE context which is
the dictionary of parameters passed to :meth:`_query.Query.update`.
This
dictionary can be modified to alter the VALUES clause of the
resulting UPDATE statement.
.. versionadded:: 1.2.17
.. seealso::
:meth:`.QueryEvents.before_compile`
:meth:`.QueryEvents.before_compile_delete`
"""
def before_compile_delete(self, query, delete_context):
"""Allow modifications to the :class:`_query.Query` object within
:meth:`_query.Query.delete`.
Like the :meth:`.QueryEvents.before_compile` event, this event
should be configured with ``retval=True``, and the modified
:class:`_query.Query` object returned, as in ::
@event.listens_for(Query, "before_compile_delete", retval=True)
def no_deleted(query, delete_context):
for desc in query.column_descriptions:
if desc['type'] is User:
entity = desc['entity']
query = query.filter(entity.deleted == False)
return query
:param query: a :class:`_query.Query` instance; this is also
the ``.query`` attribute of the given "delete context"
object.
:param delete_context: a "delete context" object which is
the same kind of object as described in
:paramref:`.QueryEvents.after_bulk_delete.delete_context`.
.. versionadded:: 1.2.17
.. seealso::
:meth:`.QueryEvents.before_compile`
:meth:`.QueryEvents.before_compile_update`
"""
@classmethod
def _listen(cls, event_key, retval=False, bake_ok=False, **kw):
fn = event_key._listen_fn
if not retval:
def wrap(*arg, **kw):
if not retval:
query = arg[0]
fn(*arg, **kw)
return query
else:
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
else:
# don't assume we can apply an attribute to the callable
def wrap(*arg, **kw):
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
wrap._bake_ok = bake_ok
event_key.base_listen(**kw)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/mapper.py
|
# orm/mapper.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Logic to map Python classes to and from selectables.
Defines the :class:`~sqlalchemy.orm.mapper.Mapper` class, the central
configurational unit which associates a class with a database table.
This is a semi-private module; the main configurational API of the ORM is
available in :class:`~sqlalchemy.orm.`.
"""
from __future__ import absolute_import
from collections import deque
from itertools import chain
import sys
import types
import weakref
from . import attributes
from . import exc as orm_exc
from . import instrumentation
from . import loading
from . import properties
from . import util as orm_util
from .base import _class_to_mapper
from .base import _INSTRUMENTOR
from .base import _state_mapper
from .base import class_mapper
from .base import state_str
from .interfaces import _MappedAttribute
from .interfaces import EXT_SKIP
from .interfaces import InspectionAttr
from .interfaces import MapperProperty
from .path_registry import PathRegistry
from .. import event
from .. import exc as sa_exc
from .. import inspection
from .. import log
from .. import schema
from .. import sql
from .. import util
from ..sql import expression
from ..sql import operators
from ..sql import util as sql_util
from ..sql import visitors
_mapper_registry = weakref.WeakKeyDictionary()
_already_compiling = False
_memoized_configured_property = util.group_expirable_memoized_property()
# a constant returned by _get_attr_by_column to indicate
# this mapper is not handling an attribute for a particular
# column
NO_ATTRIBUTE = util.symbol("NO_ATTRIBUTE")
# lock used to synchronize the "mapper configure" step
_CONFIGURE_MUTEX = util.threading.RLock()
@inspection._self_inspects
@log.class_logger
class Mapper(InspectionAttr):
"""Define the correlation of class attributes to database table
columns.
The :class:`_orm.Mapper` object is instantiated using the
:func:`~sqlalchemy.orm.mapper` function. For information
about instantiating new :class:`_orm.Mapper` objects, see
that function's documentation.
When :func:`.mapper` is used
explicitly to link a user defined class with table
metadata, this is referred to as *classical mapping*.
Modern SQLAlchemy usage tends to favor the
:mod:`sqlalchemy.ext.declarative` extension for class
configuration, which
makes usage of :func:`.mapper` behind the scenes.
Given a particular class known to be mapped by the ORM,
the :class:`_orm.Mapper` which maintains it can be acquired
using the :func:`_sa.inspect` function::
from sqlalchemy import inspect
mapper = inspect(MyClass)
A class which was mapped by the :mod:`sqlalchemy.ext.declarative`
extension will also have its mapper available via the ``__mapper__``
attribute.
"""
_new_mappers = False
_dispose_called = False
@util.deprecated_params(
extension=(
"0.7",
":class:`.MapperExtension` is deprecated in favor of the "
":class:`.MapperEvents` listener interface. The "
":paramref:`.mapper.extension` parameter will be "
"removed in a future release.",
),
order_by=(
"1.1",
"The :paramref:`.mapper.order_by` parameter "
"is deprecated, and will be removed in a future release. "
"Use :meth:`_query.Query.order_by` "
"to determine the ordering of a "
"result set.",
),
non_primary=(
"1.3",
"The :paramref:`.mapper.non_primary` parameter is deprecated, "
"and will be removed in a future release. The functionality "
"of non primary mappers is now better suited using the "
":class:`.AliasedClass` construct, which can also be used "
"as the target of a :func:`_orm.relationship` in 1.3.",
),
)
def __init__(
self,
class_,
local_table=None,
properties=None,
primary_key=None,
non_primary=False,
inherits=None,
inherit_condition=None,
inherit_foreign_keys=None,
extension=None,
order_by=False,
always_refresh=False,
version_id_col=None,
version_id_generator=None,
polymorphic_on=None,
_polymorphic_map=None,
polymorphic_identity=None,
concrete=False,
with_polymorphic=None,
polymorphic_load=None,
allow_partial_pks=True,
batch=True,
column_prefix=None,
include_properties=None,
exclude_properties=None,
passive_updates=True,
passive_deletes=False,
confirm_deleted_rows=True,
eager_defaults=False,
legacy_is_orphan=False,
_compiled_cache_size=100,
):
r"""Return a new :class:`_orm.Mapper` object.
This function is typically used behind the scenes
via the Declarative extension. When using Declarative,
many of the usual :func:`.mapper` arguments are handled
by the Declarative extension itself, including ``class_``,
``local_table``, ``properties``, and ``inherits``.
Other options are passed to :func:`.mapper` using
the ``__mapper_args__`` class variable::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
type = Column(String(50))
alt = Column("some_alt", Integer)
__mapper_args__ = {
'polymorphic_on' : type
}
Explicit use of :func:`.mapper`
is often referred to as *classical mapping*. The above
declarative example is equivalent in classical form to::
my_table = Table("my_table", metadata,
Column('id', Integer, primary_key=True),
Column('type', String(50)),
Column("some_alt", Integer)
)
class MyClass(object):
pass
mapper(MyClass, my_table,
polymorphic_on=my_table.c.type,
properties={
'alt':my_table.c.some_alt
})
.. seealso::
:ref:`classical_mapping` - discussion of direct usage of
:func:`.mapper`
:param class\_: The class to be mapped. When using Declarative,
this argument is automatically passed as the declared class
itself.
:param local_table: The :class:`_schema.Table` or other selectable
to which the class is mapped. May be ``None`` if
this mapper inherits from another mapper using single-table
inheritance. When using Declarative, this argument is
automatically passed by the extension, based on what
is configured via the ``__table__`` argument or via the
:class:`_schema.Table`
produced as a result of the ``__tablename__``
and :class:`_schema.Column` arguments present.
:param always_refresh: If True, all query operations for this mapped
class will overwrite all data within object instances that already
exist within the session, erasing any in-memory changes with
whatever information was loaded from the database. Usage of this
flag is highly discouraged; as an alternative, see the method
:meth:`_query.Query.populate_existing`.
:param allow_partial_pks: Defaults to True. Indicates that a
composite primary key with some NULL values should be considered as
possibly existing within the database. This affects whether a
mapper will assign an incoming row to an existing identity, as well
as if :meth:`.Session.merge` will check the database first for a
particular primary key value. A "partial primary key" can occur if
one has mapped to an OUTER JOIN, for example.
:param batch: Defaults to ``True``, indicating that save operations
of multiple entities can be batched together for efficiency.
Setting to False indicates
that an instance will be fully saved before saving the next
instance. This is used in the extremely rare case that a
:class:`.MapperEvents` listener requires being called
in between individual row persistence operations.
:param column_prefix: A string which will be prepended
to the mapped attribute name when :class:`_schema.Column`
objects are automatically assigned as attributes to the
mapped class. Does not affect explicitly specified
column-based properties.
See the section :ref:`column_prefix` for an example.
:param concrete: If True, indicates this mapper should use concrete
table inheritance with its parent mapper.
See the section :ref:`concrete_inheritance` for an example.
:param confirm_deleted_rows: defaults to True; when a DELETE occurs
of one more rows based on specific primary keys, a warning is
emitted when the number of rows matched does not equal the number
of rows expected. This parameter may be set to False to handle the
case where database ON DELETE CASCADE rules may be deleting some of
those rows automatically. The warning may be changed to an
exception in a future release.
.. versionadded:: 0.9.4 - added
:paramref:`.mapper.confirm_deleted_rows` as well as conditional
matched row checking on delete.
:param eager_defaults: if True, the ORM will immediately fetch the
value of server-generated default values after an INSERT or UPDATE,
rather than leaving them as expired to be fetched on next access.
This can be used for event schemes where the server-generated values
are needed immediately before the flush completes. By default,
this scheme will emit an individual ``SELECT`` statement per row
inserted or updated, which note can add significant performance
overhead. However, if the
target database supports :term:`RETURNING`, the default values will
be returned inline with the INSERT or UPDATE statement, which can
greatly enhance performance for an application that needs frequent
access to just-generated server defaults.
.. seealso::
:ref:`orm_server_defaults`
.. versionchanged:: 0.9.0 The ``eager_defaults`` option can now
make use of :term:`RETURNING` for backends which support it.
:param exclude_properties: A list or set of string column names to
be excluded from mapping.
See :ref:`include_exclude_cols` for an example.
:param extension: A :class:`.MapperExtension` instance or
list of :class:`.MapperExtension` instances which will be applied
to all operations by this :class:`_orm.Mapper`.
:param include_properties: An inclusive list or set of string column
names to map.
See :ref:`include_exclude_cols` for an example.
:param inherits: A mapped class or the corresponding
:class:`_orm.Mapper`
of one indicating a superclass to which this :class:`_orm.Mapper`
should *inherit* from. The mapped class here must be a subclass
of the other mapper's class. When using Declarative, this argument
is passed automatically as a result of the natural class
hierarchy of the declared classes.
.. seealso::
:ref:`inheritance_toplevel`
:param inherit_condition: For joined table inheritance, a SQL
expression which will
define how the two tables are joined; defaults to a natural join
between the two tables.
:param inherit_foreign_keys: When ``inherit_condition`` is used and
the columns present are missing a :class:`_schema.ForeignKey`
configuration, this parameter can be used to specify which columns
are "foreign". In most cases can be left as ``None``.
:param legacy_is_orphan: Boolean, defaults to ``False``.
When ``True``, specifies that "legacy" orphan consideration
is to be applied to objects mapped by this mapper, which means
that a pending (that is, not persistent) object is auto-expunged
from an owning :class:`.Session` only when it is de-associated
from *all* parents that specify a ``delete-orphan`` cascade towards
this mapper. The new default behavior is that the object is
auto-expunged when it is de-associated with *any* of its parents
that specify ``delete-orphan`` cascade. This behavior is more
consistent with that of a persistent object, and allows behavior to
be consistent in more scenarios independently of whether or not an
orphanable object has been flushed yet or not.
See the change note and example at :ref:`legacy_is_orphan_addition`
for more detail on this change.
:param non_primary: Specify that this :class:`_orm.Mapper`
is in addition
to the "primary" mapper, that is, the one used for persistence.
The :class:`_orm.Mapper` created here may be used for ad-hoc
mapping of the class to an alternate selectable, for loading
only.
:paramref:`_orm.Mapper.non_primary` is not an often used option, but
is useful in some specific :func:`_orm.relationship` cases.
.. seealso::
:ref:`relationship_non_primary_mapper`
:param order_by: A single :class:`_schema.Column` or list of
:class:`_schema.Column`
objects for which selection operations should use as the default
ordering for entities. By default mappers have no pre-defined
ordering.
:param passive_deletes: Indicates DELETE behavior of foreign key
columns when a joined-table inheritance entity is being deleted.
Defaults to ``False`` for a base mapper; for an inheriting mapper,
defaults to ``False`` unless the value is set to ``True``
on the superclass mapper.
When ``True``, it is assumed that ON DELETE CASCADE is configured
on the foreign key relationships that link this mapper's table
to its superclass table, so that when the unit of work attempts
to delete the entity, it need only emit a DELETE statement for the
superclass table, and not this table.
When ``False``, a DELETE statement is emitted for this mapper's
table individually. If the primary key attributes local to this
table are unloaded, then a SELECT must be emitted in order to
validate these attributes; note that the primary key columns
of a joined-table subclass are not part of the "primary key" of
the object as a whole.
Note that a value of ``True`` is **always** forced onto the
subclass mappers; that is, it's not possible for a superclass
to specify passive_deletes without this taking effect for
all subclass mappers.
.. versionadded:: 1.1
.. seealso::
:ref:`passive_deletes` - description of similar feature as
used with :func:`_orm.relationship`
:paramref:`.mapper.passive_updates` - supporting ON UPDATE
CASCADE for joined-table inheritance mappers
:param passive_updates: Indicates UPDATE behavior of foreign key
columns when a primary key column changes on a joined-table
inheritance mapping. Defaults to ``True``.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will handle
propagation of an UPDATE from a source column to dependent columns
on joined-table rows.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The unit of work process will
emit an UPDATE statement for the dependent columns during a
primary key change.
.. seealso::
:ref:`passive_updates` - description of a similar feature as
used with :func:`_orm.relationship`
:paramref:`.mapper.passive_deletes` - supporting ON DELETE
CASCADE for joined-table inheritance mappers
:param polymorphic_load: Specifies "polymorphic loading" behavior
for a subclass in an inheritance hierarchy (joined and single
table inheritance only). Valid values are:
* "'inline'" - specifies this class should be part of the
"with_polymorphic" mappers, e.g. its columns will be included
in a SELECT query against the base.
* "'selectin'" - specifies that when instances of this class
are loaded, an additional SELECT will be emitted to retrieve
the columns specific to this subclass. The SELECT uses
IN to fetch multiple subclasses at once.
.. versionadded:: 1.2
.. seealso::
:ref:`with_polymorphic_mapper_config`
:ref:`polymorphic_selectin`
:param polymorphic_on: Specifies the column, attribute, or
SQL expression used to determine the target class for an
incoming row, when inheriting classes are present.
This value is commonly a :class:`_schema.Column` object that's
present in the mapped :class:`_schema.Table`::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":discriminator,
"polymorphic_identity":"employee"
}
It may also be specified
as a SQL expression, as in this example where we
use the :func:`.case` construct to provide a conditional
approach::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee"),
"polymorphic_identity":"employee"
}
It may also refer to any attribute
configured with :func:`.column_property`, or to the
string name of one::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
employee_type = column_property(
case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee")
)
__mapper_args__ = {
"polymorphic_on":employee_type,
"polymorphic_identity":"employee"
}
When setting ``polymorphic_on`` to reference an
attribute or expression that's not present in the
locally mapped :class:`_schema.Table`, yet the value
of the discriminator should be persisted to the database,
the value of the
discriminator is not automatically set on new
instances; this must be handled by the user,
either through manual means or via event listeners.
A typical approach to establishing such a listener
looks like::
from sqlalchemy import event
from sqlalchemy.orm import object_mapper
@event.listens_for(Employee, "init", propagate=True)
def set_identity(instance, *arg, **kw):
mapper = object_mapper(instance)
instance.discriminator = mapper.polymorphic_identity
Where above, we assign the value of ``polymorphic_identity``
for the mapped class to the ``discriminator`` attribute,
thus persisting the value to the ``discriminator`` column
in the database.
.. warning::
Currently, **only one discriminator column may be set**, typically
on the base-most class in the hierarchy. "Cascading" polymorphic
columns are not yet supported.
.. seealso::
:ref:`inheritance_toplevel`
:param polymorphic_identity: Specifies the value which
identifies this particular class as returned by the
column expression referred to by the ``polymorphic_on``
setting. As rows are received, the value corresponding
to the ``polymorphic_on`` column expression is compared
to this value, indicating which subclass should
be used for the newly reconstructed object.
:param properties: A dictionary mapping the string names of object
attributes to :class:`.MapperProperty` instances, which define the
persistence behavior of that attribute. Note that
:class:`_schema.Column`
objects present in
the mapped :class:`_schema.Table` are automatically placed into
``ColumnProperty`` instances upon mapping, unless overridden.
When using Declarative, this argument is passed automatically,
based on all those :class:`.MapperProperty` instances declared
in the declared class body.
:param primary_key: A list of :class:`_schema.Column`
objects which define
the primary key to be used against this mapper's selectable unit.
This is normally simply the primary key of the ``local_table``, but
can be overridden here.
:param version_id_col: A :class:`_schema.Column`
that will be used to keep a running version id of rows
in the table. This is used to detect concurrent updates or
the presence of stale data in a flush. The methodology is to
detect if an UPDATE statement does not match the last known
version id, a
:class:`~sqlalchemy.orm.exc.StaleDataError` exception is
thrown.
By default, the column must be of :class:`.Integer` type,
unless ``version_id_generator`` specifies an alternative version
generator.
.. seealso::
:ref:`mapper_version_counter` - discussion of version counting
and rationale.
:param version_id_generator: Define how new version ids should
be generated. Defaults to ``None``, which indicates that
a simple integer counting scheme be employed. To provide a custom
versioning scheme, provide a callable function of the form::
def generate_version(version):
return next_version
Alternatively, server-side versioning functions such as triggers,
or programmatic versioning schemes outside of the version id
generator may be used, by specifying the value ``False``.
Please see :ref:`server_side_version_counter` for a discussion
of important points when using this option.
.. versionadded:: 0.9.0 ``version_id_generator`` supports
server-side version number generation.
.. seealso::
:ref:`custom_version_counter`
:ref:`server_side_version_counter`
:param with_polymorphic: A tuple in the form ``(<classes>,
<selectable>)`` indicating the default style of "polymorphic"
loading, that is, which tables are queried at once. <classes> is
any single or list of mappers and/or classes indicating the
inherited classes that should be loaded at once. The special value
``'*'`` may be used to indicate all descending classes should be
loaded immediately. The second tuple argument <selectable>
indicates a selectable that will be used to query for multiple
classes.
.. seealso::
:ref:`with_polymorphic` - discussion of polymorphic querying
techniques.
"""
self.class_ = util.assert_arg_type(class_, type, "class_")
self.class_manager = None
self._primary_key_argument = util.to_list(primary_key)
self.non_primary = non_primary
if order_by is not False:
self.order_by = util.to_list(order_by)
else:
self.order_by = order_by
self.always_refresh = always_refresh
if isinstance(version_id_col, MapperProperty):
self.version_id_prop = version_id_col
self.version_id_col = None
else:
self.version_id_col = version_id_col
if version_id_generator is False:
self.version_id_generator = False
elif version_id_generator is None:
self.version_id_generator = lambda x: (x or 0) + 1
else:
self.version_id_generator = version_id_generator
self.concrete = concrete
self.single = False
self.inherits = inherits
self.local_table = local_table
self.inherit_condition = inherit_condition
self.inherit_foreign_keys = inherit_foreign_keys
self._init_properties = properties or {}
self._delete_orphans = []
self.batch = batch
self.eager_defaults = eager_defaults
self.column_prefix = column_prefix
self.polymorphic_on = expression._clause_element_as_expr(
polymorphic_on
)
self._dependency_processors = []
self.validators = util.immutabledict()
self.passive_updates = passive_updates
self.passive_deletes = passive_deletes
self.legacy_is_orphan = legacy_is_orphan
self._clause_adapter = None
self._requires_row_aliasing = False
self._inherits_equated_pairs = None
self._memoized_values = {}
self._compiled_cache_size = _compiled_cache_size
self._reconstructor = None
self._deprecated_extensions = util.to_list(extension or [])
self.allow_partial_pks = allow_partial_pks
if self.inherits and not self.concrete:
self.confirm_deleted_rows = False
else:
self.confirm_deleted_rows = confirm_deleted_rows
if isinstance(self.local_table, expression.SelectBase):
raise sa_exc.InvalidRequestError(
"When mapping against a select() construct, map against "
"an alias() of the construct instead."
"This because several databases don't allow a "
"SELECT from a subquery that does not have an alias."
)
self._set_with_polymorphic(with_polymorphic)
self.polymorphic_load = polymorphic_load
# our 'polymorphic identity', a string name that when located in a
# result set row indicates this Mapper should be used to construct
# the object instance for that row.
self.polymorphic_identity = polymorphic_identity
# a dictionary of 'polymorphic identity' names, associating those
# names with Mappers that will be used to construct object instances
# upon a select operation.
if _polymorphic_map is None:
self.polymorphic_map = {}
else:
self.polymorphic_map = _polymorphic_map
if include_properties is not None:
self.include_properties = util.to_set(include_properties)
else:
self.include_properties = None
if exclude_properties:
self.exclude_properties = util.to_set(exclude_properties)
else:
self.exclude_properties = None
self.configured = False
# prevent this mapper from being constructed
# while a configure_mappers() is occurring (and defer a
# configure_mappers() until construction succeeds)
_CONFIGURE_MUTEX.acquire()
try:
self.dispatch._events._new_mapper_instance(class_, self)
self._configure_inheritance()
self._configure_legacy_instrument_class()
self._configure_class_instrumentation()
self._configure_listeners()
self._configure_properties()
self._configure_polymorphic_setter()
self._configure_pks()
Mapper._new_mappers = True
self._log("constructed")
self._expire_memoizations()
finally:
_CONFIGURE_MUTEX.release()
# major attributes initialized at the classlevel so that
# they can be Sphinx-documented.
is_mapper = True
"""Part of the inspection API."""
represents_outer_join = False
@property
def mapper(self):
"""Part of the inspection API.
Returns self.
"""
return self
@property
def entity(self):
r"""Part of the inspection API.
Returns self.class\_.
"""
return self.class_
local_table = None
"""The :class:`expression.Selectable` which this :class:`_orm.Mapper`
manages.
Typically is an instance of :class:`_schema.Table` or
:class:`_expression.Alias`.
May also be ``None``.
The "local" table is the
selectable that the :class:`_orm.Mapper` is directly responsible for
managing from an attribute access and flush perspective. For
non-inheriting mappers, the local table is the same as the
"mapped" table. For joined-table inheritance mappers, local_table
will be the particular sub-table of the overall "join" which
this :class:`_orm.Mapper` represents. If this mapper is a
single-table inheriting mapper, local_table will be ``None``.
.. seealso::
:attr:`_orm.Mapper.persist_selectable`.
"""
persist_selectable = None
"""The :class:`expression.Selectable` to which this :class:`_orm.Mapper`
is mapped.
Typically an instance of :class:`_schema.Table`, :class:`_expression.Join`
, or
:class:`_expression.Alias`.
The :attr:`_orm.Mapper.persist_selectable` is separate from
:attr:`_orm.Mapper.selectable` in that the former represents columns
that are mapped on this class or its superclasses, whereas the
latter may be a "polymorphic" selectable that contains additional columns
which are in fact mapped on subclasses only.
"persist selectable" is the "thing the mapper writes to" and
"selectable" is the "thing the mapper selects from".
:attr:`_orm.Mapper.persist_selectable` is also separate from
:attr:`_orm.Mapper.local_table`, which represents the set of columns that
are locally mapped on this class directly.
.. seealso::
:attr:`_orm.Mapper.selectable`.
:attr:`_orm.Mapper.local_table`.
"""
inherits = None
"""References the :class:`_orm.Mapper` which this :class:`_orm.Mapper`
inherits from, if any.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
configured = None
"""Represent ``True`` if this :class:`_orm.Mapper` has been configured.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
.. seealso::
:func:`.configure_mappers`.
"""
concrete = None
"""Represent ``True`` if this :class:`_orm.Mapper` is a concrete
inheritance mapper.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
tables = None
"""An iterable containing the collection of :class:`_schema.Table` objects
which this :class:`_orm.Mapper` is aware of.
If the mapper is mapped to a :class:`_expression.Join`, or an
:class:`_expression.Alias`
representing a :class:`_expression.Select`, the individual
:class:`_schema.Table`
objects that comprise the full construct will be represented here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
primary_key = None
"""An iterable containing the collection of :class:`_schema.Column`
objects
which comprise the 'primary key' of the mapped table, from the
perspective of this :class:`_orm.Mapper`.
This list is against the selectable in
:attr:`_orm.Mapper.persist_selectable`.
In the case of inheriting mappers, some columns may be managed by a
superclass mapper. For example, in the case of a
:class:`_expression.Join`, the
primary key is determined by all of the primary key columns across all
tables referenced by the :class:`_expression.Join`.
The list is also not necessarily the same as the primary key column
collection associated with the underlying tables; the :class:`_orm.Mapper`
features a ``primary_key`` argument that can override what the
:class:`_orm.Mapper` considers as primary key columns.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_ = None
"""The Python class which this :class:`_orm.Mapper` maps.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_manager = None
"""The :class:`.ClassManager` which maintains event listeners
and class-bound descriptors for this :class:`_orm.Mapper`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
single = None
"""Represent ``True`` if this :class:`_orm.Mapper` is a single table
inheritance mapper.
:attr:`_orm.Mapper.local_table` will be ``None`` if this flag is set.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
non_primary = None
"""Represent ``True`` if this :class:`_orm.Mapper` is a "non-primary"
mapper, e.g. a mapper that is used only to select rows but not for
persistence management.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_on = None
"""The :class:`_schema.Column` or SQL expression specified as the
``polymorphic_on`` argument
for this :class:`_orm.Mapper`, within an inheritance scenario.
This attribute is normally a :class:`_schema.Column` instance but
may also be an expression, such as one derived from
:func:`.cast`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_map = None
"""A mapping of "polymorphic identity" identifiers mapped to
:class:`_orm.Mapper` instances, within an inheritance scenario.
The identifiers can be of any type which is comparable to the
type of column represented by :attr:`_orm.Mapper.polymorphic_on`.
An inheritance chain of mappers will all reference the same
polymorphic map object. The object is used to correlate incoming
result rows to target mappers.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_identity = None
"""Represent an identifier which is matched against the
:attr:`_orm.Mapper.polymorphic_on` column during result row loading.
Used only with inheritance, this object can be of any type which is
comparable to the type of column represented by
:attr:`_orm.Mapper.polymorphic_on`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
base_mapper = None
"""The base-most :class:`_orm.Mapper` in an inheritance chain.
In a non-inheriting scenario, this attribute will always be this
:class:`_orm.Mapper`. In an inheritance scenario, it references
the :class:`_orm.Mapper` which is parent to all other :class:`_orm.Mapper`
objects in the inheritance chain.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
columns = None
"""A collection of :class:`_schema.Column` or other scalar expression
objects maintained by this :class:`_orm.Mapper`.
The collection behaves the same as that of the ``c`` attribute on
any :class:`_schema.Table` object,
except that only those columns included in
this mapping are present, and are keyed based on the attribute name
defined in the mapping, not necessarily the ``key`` attribute of the
:class:`_schema.Column` itself. Additionally, scalar expressions mapped
by :func:`.column_property` are also present here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
validators = None
"""An immutable dictionary of attributes which have been decorated
using the :func:`_orm.validates` decorator.
The dictionary contains string attribute names as keys
mapped to the actual validation method.
"""
c = None
"""A synonym for :attr:`_orm.Mapper.columns`."""
@property
@util.deprecated("1.3", "Use .persist_selectable")
def mapped_table(self):
return self.persist_selectable
@util.memoized_property
def _path_registry(self):
return PathRegistry.per_mapper(self)
def _configure_inheritance(self):
"""Configure settings related to inheriting and/or inherited mappers
being present."""
# a set of all mappers which inherit from this one.
self._inheriting_mappers = util.WeakSequence()
if self.inherits:
if isinstance(self.inherits, type):
self.inherits = class_mapper(self.inherits, configure=False)
if not issubclass(self.class_, self.inherits.class_):
raise sa_exc.ArgumentError(
"Class '%s' does not inherit from '%s'"
% (self.class_.__name__, self.inherits.class_.__name__)
)
if self.non_primary != self.inherits.non_primary:
np = not self.non_primary and "primary" or "non-primary"
raise sa_exc.ArgumentError(
"Inheritance of %s mapper for class '%s' is "
"only allowed from a %s mapper"
% (np, self.class_.__name__, np)
)
# inherit_condition is optional.
if self.local_table is None:
self.local_table = self.inherits.local_table
self.persist_selectable = self.inherits.persist_selectable
self.single = True
elif self.local_table is not self.inherits.local_table:
if self.concrete:
self.persist_selectable = self.local_table
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
else:
if self.inherit_condition is None:
# figure out inherit condition from our table to the
# immediate table of the inherited mapper, not its
# full table which could pull in other stuff we don't
# want (allows test/inheritance.InheritTest4 to pass)
self.inherit_condition = sql_util.join_condition(
self.inherits.local_table, self.local_table
)
self.persist_selectable = sql.join(
self.inherits.persist_selectable,
self.local_table,
self.inherit_condition,
)
fks = util.to_set(self.inherit_foreign_keys)
self._inherits_equated_pairs = sql_util.criterion_as_pairs(
self.persist_selectable.onclause,
consider_as_foreign_keys=fks,
)
else:
self.persist_selectable = self.local_table
if self.polymorphic_identity is not None and not self.concrete:
self._identity_class = self.inherits._identity_class
else:
self._identity_class = self.class_
if self.version_id_col is None:
self.version_id_col = self.inherits.version_id_col
self.version_id_generator = self.inherits.version_id_generator
elif (
self.inherits.version_id_col is not None
and self.version_id_col is not self.inherits.version_id_col
):
util.warn(
"Inheriting version_id_col '%s' does not match inherited "
"version_id_col '%s' and will not automatically populate "
"the inherited versioning column. "
"version_id_col should only be specified on "
"the base-most mapper that includes versioning."
% (
self.version_id_col.description,
self.inherits.version_id_col.description,
)
)
if (
self.order_by is False
and not self.concrete
and self.inherits.order_by is not False
):
self.order_by = self.inherits.order_by
self.polymorphic_map = self.inherits.polymorphic_map
self.batch = self.inherits.batch
self.inherits._inheriting_mappers.append(self)
self.base_mapper = self.inherits.base_mapper
self.passive_updates = self.inherits.passive_updates
self.passive_deletes = (
self.inherits.passive_deletes or self.passive_deletes
)
self._all_tables = self.inherits._all_tables
if self.polymorphic_identity is not None:
if self.polymorphic_identity in self.polymorphic_map:
util.warn(
"Reassigning polymorphic association for identity %r "
"from %r to %r: Check for duplicate use of %r as "
"value for polymorphic_identity."
% (
self.polymorphic_identity,
self.polymorphic_map[self.polymorphic_identity],
self,
self.polymorphic_identity,
)
)
self.polymorphic_map[self.polymorphic_identity] = self
if self.polymorphic_load and self.concrete:
raise sa_exc.ArgumentError(
"polymorphic_load is not currently supported "
"with concrete table inheritance"
)
if self.polymorphic_load == "inline":
self.inherits._add_with_polymorphic_subclass(self)
elif self.polymorphic_load == "selectin":
pass
elif self.polymorphic_load is not None:
raise sa_exc.ArgumentError(
"unknown argument for polymorphic_load: %r"
% self.polymorphic_load
)
else:
self._all_tables = set()
self.base_mapper = self
self.persist_selectable = self.local_table
if self.polymorphic_identity is not None:
self.polymorphic_map[self.polymorphic_identity] = self
self._identity_class = self.class_
if self.persist_selectable is None:
raise sa_exc.ArgumentError(
"Mapper '%s' does not have a persist_selectable specified."
% self
)
def _set_with_polymorphic(self, with_polymorphic):
if with_polymorphic == "*":
self.with_polymorphic = ("*", None)
elif isinstance(with_polymorphic, (tuple, list)):
if isinstance(
with_polymorphic[0], util.string_types + (tuple, list)
):
self.with_polymorphic = with_polymorphic
else:
self.with_polymorphic = (with_polymorphic, None)
elif with_polymorphic is not None:
raise sa_exc.ArgumentError("Invalid setting for with_polymorphic")
else:
self.with_polymorphic = None
if isinstance(self.local_table, expression.SelectBase):
raise sa_exc.InvalidRequestError(
"When mapping against a select() construct, map against "
"an alias() of the construct instead."
"This because several databases don't allow a "
"SELECT from a subquery that does not have an alias."
)
if self.with_polymorphic and isinstance(
self.with_polymorphic[1], expression.SelectBase
):
self.with_polymorphic = (
self.with_polymorphic[0],
self.with_polymorphic[1].alias(),
)
if self.configured:
self._expire_memoizations()
def _add_with_polymorphic_subclass(self, mapper):
subcl = mapper.class_
if self.with_polymorphic is None:
self._set_with_polymorphic((subcl,))
elif self.with_polymorphic[0] != "*":
self._set_with_polymorphic(
(self.with_polymorphic[0] + (subcl,), self.with_polymorphic[1])
)
def _set_concrete_base(self, mapper):
"""Set the given :class:`_orm.Mapper` as the 'inherits' for this
:class:`_orm.Mapper`, assuming this :class:`_orm.Mapper` is concrete
and does not already have an inherits."""
assert self.concrete
assert not self.inherits
assert isinstance(mapper, Mapper)
self.inherits = mapper
self.inherits.polymorphic_map.update(self.polymorphic_map)
self.polymorphic_map = self.inherits.polymorphic_map
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
self.batch = self.inherits.batch
for mp in self.self_and_descendants:
mp.base_mapper = self.inherits.base_mapper
self.inherits._inheriting_mappers.append(self)
self.passive_updates = self.inherits.passive_updates
self._all_tables = self.inherits._all_tables
for key, prop in mapper._props.items():
if key not in self._props and not self._should_exclude(
key, key, local=False, column=None
):
self._adapt_inherited_property(key, prop, False)
def _set_polymorphic_on(self, polymorphic_on):
self.polymorphic_on = polymorphic_on
self._configure_polymorphic_setter(True)
def _configure_legacy_instrument_class(self):
if self.inherits:
self.dispatch._update(self.inherits.dispatch)
super_extensions = set(
chain(
*[
m._deprecated_extensions
for m in self.inherits.iterate_to_root()
]
)
)
else:
super_extensions = set()
for ext in self._deprecated_extensions:
if ext not in super_extensions:
ext._adapt_instrument_class(self, ext)
def _configure_listeners(self):
if self.inherits:
super_extensions = set(
chain(
*[
m._deprecated_extensions
for m in self.inherits.iterate_to_root()
]
)
)
else:
super_extensions = set()
for ext in self._deprecated_extensions:
if ext not in super_extensions:
ext._adapt_listener(self, ext)
def _configure_class_instrumentation(self):
"""If this mapper is to be a primary mapper (i.e. the
non_primary flag is not set), associate this Mapper with the
given class and entity name.
Subsequent calls to ``class_mapper()`` for the ``class_`` / ``entity``
name combination will return this mapper. Also decorate the
`__init__` method on the mapped class to include optional
auto-session attachment logic.
"""
manager = attributes.manager_of_class(self.class_)
if self.non_primary:
if not manager or not manager.is_mapped:
raise sa_exc.InvalidRequestError(
"Class %s has no primary mapper configured. Configure "
"a primary mapper first before setting up a non primary "
"Mapper." % self.class_
)
self.class_manager = manager
self._identity_class = manager.mapper._identity_class
_mapper_registry[self] = True
return
if manager is not None:
assert manager.class_ is self.class_
if manager.is_mapped:
raise sa_exc.ArgumentError(
"Class '%s' already has a primary mapper defined. "
"Use non_primary=True to "
"create a non primary Mapper. clear_mappers() will "
"remove *all* current mappers from all classes."
% self.class_
)
# else:
# a ClassManager may already exist as
# ClassManager.instrument_attribute() creates
# new managers for each subclass if they don't yet exist.
_mapper_registry[self] = True
# note: this *must be called before instrumentation.register_class*
# to maintain the documented behavior of instrument_class
self.dispatch.instrument_class(self, self.class_)
if manager is None:
manager = instrumentation.register_class(self.class_)
self.class_manager = manager
manager.mapper = self
manager.deferred_scalar_loader = util.partial(
loading.load_scalar_attributes, self
)
# The remaining members can be added by any mapper,
# e_name None or not.
if manager.info.get(_INSTRUMENTOR, False):
return
event.listen(manager, "first_init", _event_on_first_init, raw=True)
event.listen(manager, "init", _event_on_init, raw=True)
for key, method in util.iterate_attributes(self.class_):
if key == "__init__" and hasattr(method, "_sa_original_init"):
method = method._sa_original_init
if isinstance(method, types.MethodType):
method = method.im_func
if isinstance(method, types.FunctionType):
if hasattr(method, "__sa_reconstructor__"):
self._reconstructor = method
event.listen(manager, "load", _event_on_load, raw=True)
elif hasattr(method, "__sa_validators__"):
validation_opts = method.__sa_validation_opts__
for name in method.__sa_validators__:
if name in self.validators:
raise sa_exc.InvalidRequestError(
"A validation function for mapped "
"attribute %r on mapper %s already exists."
% (name, self)
)
self.validators = self.validators.union(
{name: (method, validation_opts)}
)
manager.info[_INSTRUMENTOR] = self
@classmethod
def _configure_all(cls):
"""Class-level path to the :func:`.configure_mappers` call.
"""
configure_mappers()
def dispose(self):
# Disable any attribute-based compilation.
self.configured = True
self._dispose_called = True
if hasattr(self, "_configure_failed"):
del self._configure_failed
if (
not self.non_primary
and self.class_manager is not None
and self.class_manager.is_mapped
and self.class_manager.mapper is self
):
instrumentation.unregister_class(self.class_)
def _configure_pks(self):
self.tables = sql_util.find_tables(self.persist_selectable)
self._pks_by_table = {}
self._cols_by_table = {}
all_cols = util.column_set(
chain(*[col.proxy_set for col in self._columntoproperty])
)
pk_cols = util.column_set(c for c in all_cols if c.primary_key)
# identify primary key columns which are also mapped by this mapper.
tables = set(self.tables + [self.persist_selectable])
self._all_tables.update(tables)
for t in tables:
if t.primary_key and pk_cols.issuperset(t.primary_key):
# ordering is important since it determines the ordering of
# mapper.primary_key (and therefore query.get())
self._pks_by_table[t] = util.ordered_column_set(
t.primary_key
).intersection(pk_cols)
self._cols_by_table[t] = util.ordered_column_set(t.c).intersection(
all_cols
)
# if explicit PK argument sent, add those columns to the
# primary key mappings
if self._primary_key_argument:
for k in self._primary_key_argument:
if k.table not in self._pks_by_table:
self._pks_by_table[k.table] = util.OrderedSet()
self._pks_by_table[k.table].add(k)
# otherwise, see that we got a full PK for the mapped table
elif (
self.persist_selectable not in self._pks_by_table
or len(self._pks_by_table[self.persist_selectable]) == 0
):
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'"
% (self, self.persist_selectable.description)
)
elif self.local_table not in self._pks_by_table and isinstance(
self.local_table, schema.Table
):
util.warn(
"Could not assemble any primary "
"keys for locally mapped table '%s' - "
"no rows will be persisted in this Table."
% self.local_table.description
)
if (
self.inherits
and not self.concrete
and not self._primary_key_argument
):
# if inheriting, the "primary key" for this mapper is
# that of the inheriting (unless concrete or explicit)
self.primary_key = self.inherits.primary_key
else:
# determine primary key from argument or persist_selectable pks -
# reduce to the minimal set of columns
if self._primary_key_argument:
primary_key = sql_util.reduce_columns(
[
self.persist_selectable.corresponding_column(c)
for c in self._primary_key_argument
],
ignore_nonexistent_tables=True,
)
else:
primary_key = sql_util.reduce_columns(
self._pks_by_table[self.persist_selectable],
ignore_nonexistent_tables=True,
)
if len(primary_key) == 0:
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'"
% (self, self.persist_selectable.description)
)
self.primary_key = tuple(primary_key)
self._log("Identified primary key columns: %s", primary_key)
# determine cols that aren't expressed within our tables; mark these
# as "read only" properties which are refreshed upon INSERT/UPDATE
self._readonly_props = set(
self._columntoproperty[col]
for col in self._columntoproperty
if self._columntoproperty[col] not in self._identity_key_props
and (
not hasattr(col, "table")
or col.table not in self._cols_by_table
)
)
def _configure_properties(self):
# Column and other ClauseElement objects which are mapped
self.columns = self.c = util.OrderedProperties()
# object attribute names mapped to MapperProperty objects
self._props = util.OrderedDict()
# table columns mapped to lists of MapperProperty objects
# using a list allows a single column to be defined as
# populating multiple object attributes
self._columntoproperty = _ColumnMapping(self)
# load custom properties
if self._init_properties:
for key, prop in self._init_properties.items():
self._configure_property(key, prop, False)
# pull properties from the inherited mapper if any.
if self.inherits:
for key, prop in self.inherits._props.items():
if key not in self._props and not self._should_exclude(
key, key, local=False, column=None
):
self._adapt_inherited_property(key, prop, False)
# create properties for each column in the mapped table,
# for those columns which don't already map to a property
for column in self.persist_selectable.columns:
if column in self._columntoproperty:
continue
column_key = (self.column_prefix or "") + column.key
if self._should_exclude(
column.key,
column_key,
local=self.local_table.c.contains_column(column),
column=column,
):
continue
# adjust the "key" used for this column to that
# of the inheriting mapper
for mapper in self.iterate_to_root():
if column in mapper._columntoproperty:
column_key = mapper._columntoproperty[column].key
self._configure_property(
column_key, column, init=False, setparent=True
)
def _configure_polymorphic_setter(self, init=False):
"""Configure an attribute on the mapper representing the
'polymorphic_on' column, if applicable, and not
already generated by _configure_properties (which is typical).
Also create a setter function which will assign this
attribute to the value of the 'polymorphic_identity'
upon instance construction, also if applicable. This
routine will run when an instance is created.
"""
setter = False
if self.polymorphic_on is not None:
setter = True
if isinstance(self.polymorphic_on, util.string_types):
# polymorphic_on specified as a string - link
# it to mapped ColumnProperty
try:
self.polymorphic_on = self._props[self.polymorphic_on]
except KeyError as err:
util.raise_(
sa_exc.ArgumentError(
"Can't determine polymorphic_on "
"value '%s' - no attribute is "
"mapped to this name." % self.polymorphic_on
),
replace_context=err,
)
if self.polymorphic_on in self._columntoproperty:
# polymorphic_on is a column that is already mapped
# to a ColumnProperty
prop = self._columntoproperty[self.polymorphic_on]
elif isinstance(self.polymorphic_on, MapperProperty):
# polymorphic_on is directly a MapperProperty,
# ensure it's a ColumnProperty
if not isinstance(
self.polymorphic_on, properties.ColumnProperty
):
raise sa_exc.ArgumentError(
"Only direct column-mapped "
"property or SQL expression "
"can be passed for polymorphic_on"
)
prop = self.polymorphic_on
elif not expression._is_column(self.polymorphic_on):
# polymorphic_on is not a Column and not a ColumnProperty;
# not supported right now.
raise sa_exc.ArgumentError(
"Only direct column-mapped "
"property or SQL expression "
"can be passed for polymorphic_on"
)
else:
# polymorphic_on is a Column or SQL expression and
# doesn't appear to be mapped. this means it can be 1.
# only present in the with_polymorphic selectable or
# 2. a totally standalone SQL expression which we'd
# hope is compatible with this mapper's persist_selectable
col = self.persist_selectable.corresponding_column(
self.polymorphic_on
)
if col is None:
# polymorphic_on doesn't derive from any
# column/expression isn't present in the mapped
# table. we will make a "hidden" ColumnProperty
# for it. Just check that if it's directly a
# schema.Column and we have with_polymorphic, it's
# likely a user error if the schema.Column isn't
# represented somehow in either persist_selectable or
# with_polymorphic. Otherwise as of 0.7.4 we
# just go with it and assume the user wants it
# that way (i.e. a CASE statement)
setter = False
instrument = False
col = self.polymorphic_on
if isinstance(col, schema.Column) and (
self.with_polymorphic is None
or self.with_polymorphic[1].corresponding_column(col)
is None
):
raise sa_exc.InvalidRequestError(
"Could not map polymorphic_on column "
"'%s' to the mapped table - polymorphic "
"loads will not function properly"
% col.description
)
else:
# column/expression that polymorphic_on derives from
# is present in our mapped table
# and is probably mapped, but polymorphic_on itself
# is not. This happens when
# the polymorphic_on is only directly present in the
# with_polymorphic selectable, as when use
# polymorphic_union.
# we'll make a separate ColumnProperty for it.
instrument = True
key = getattr(col, "key", None)
if key:
if self._should_exclude(col.key, col.key, False, col):
raise sa_exc.InvalidRequestError(
"Cannot exclude or override the "
"discriminator column %r" % col.key
)
else:
self.polymorphic_on = col = col.label("_sa_polymorphic_on")
key = col.key
prop = properties.ColumnProperty(col, _instrument=instrument)
self._configure_property(key, prop, init=init, setparent=True)
# the actual polymorphic_on should be the first public-facing
# column in the property
self.polymorphic_on = prop.columns[0]
polymorphic_key = prop.key
else:
# no polymorphic_on was set.
# check inheriting mappers for one.
for mapper in self.iterate_to_root():
# determine if polymorphic_on of the parent
# should be propagated here. If the col
# is present in our mapped table, or if our mapped
# table is the same as the parent (i.e. single table
# inheritance), we can use it
if mapper.polymorphic_on is not None:
if self.persist_selectable is mapper.persist_selectable:
self.polymorphic_on = mapper.polymorphic_on
else:
self.polymorphic_on = (
self.persist_selectable
).corresponding_column(mapper.polymorphic_on)
# we can use the parent mapper's _set_polymorphic_identity
# directly; it ensures the polymorphic_identity of the
# instance's mapper is used so is portable to subclasses.
if self.polymorphic_on is not None:
self._set_polymorphic_identity = (
mapper._set_polymorphic_identity
)
self._validate_polymorphic_identity = (
mapper._validate_polymorphic_identity
)
else:
self._set_polymorphic_identity = None
return
if setter:
def _set_polymorphic_identity(state):
dict_ = state.dict
state.get_impl(polymorphic_key).set(
state,
dict_,
state.manager.mapper.polymorphic_identity,
None,
)
def _validate_polymorphic_identity(mapper, state, dict_):
if (
polymorphic_key in dict_
and dict_[polymorphic_key]
not in mapper._acceptable_polymorphic_identities
):
util.warn_limited(
"Flushing object %s with "
"incompatible polymorphic identity %r; the "
"object may not refresh and/or load correctly",
(state_str(state), dict_[polymorphic_key]),
)
self._set_polymorphic_identity = _set_polymorphic_identity
self._validate_polymorphic_identity = (
_validate_polymorphic_identity
)
else:
self._set_polymorphic_identity = None
_validate_polymorphic_identity = None
@_memoized_configured_property
def _version_id_prop(self):
if self.version_id_col is not None:
return self._columntoproperty[self.version_id_col]
else:
return None
@_memoized_configured_property
def _acceptable_polymorphic_identities(self):
identities = set()
stack = deque([self])
while stack:
item = stack.popleft()
if item.persist_selectable is self.persist_selectable:
identities.add(item.polymorphic_identity)
stack.extend(item._inheriting_mappers)
return identities
@_memoized_configured_property
def _prop_set(self):
return frozenset(self._props.values())
def _adapt_inherited_property(self, key, prop, init):
if not self.concrete:
self._configure_property(key, prop, init=False, setparent=False)
elif key not in self._props:
# determine if the class implements this attribute; if not,
# or if it is implemented by the attribute that is handling the
# given superclass-mapped property, then we need to report that we
# can't use this at the instance level since we are a concrete
# mapper and we don't map this. don't trip user-defined
# descriptors that might have side effects when invoked.
implementing_attribute = self.class_manager._get_class_attr_mro(
key, prop
)
if implementing_attribute is prop or (
isinstance(
implementing_attribute, attributes.InstrumentedAttribute
)
and implementing_attribute._parententity is prop.parent
):
self._configure_property(
key,
properties.ConcreteInheritedProperty(),
init=init,
setparent=True,
)
def _configure_property(self, key, prop, init=True, setparent=True):
self._log("_configure_property(%s, %s)", key, prop.__class__.__name__)
if not isinstance(prop, MapperProperty):
prop = self._property_from_column(key, prop)
if isinstance(prop, properties.ColumnProperty):
col = self.persist_selectable.corresponding_column(prop.columns[0])
# if the column is not present in the mapped table,
# test if a column has been added after the fact to the
# parent table (or their parent, etc.) [ticket:1570]
if col is None and self.inherits:
path = [self]
for m in self.inherits.iterate_to_root():
col = m.local_table.corresponding_column(prop.columns[0])
if col is not None:
for m2 in path:
m2.persist_selectable._reset_exported()
col = self.persist_selectable.corresponding_column(
prop.columns[0]
)
break
path.append(m)
# subquery expression, column not present in the mapped
# selectable.
if col is None:
col = prop.columns[0]
# column is coming in after _readonly_props was
# initialized; check for 'readonly'
if hasattr(self, "_readonly_props") and (
not hasattr(col, "table")
or col.table not in self._cols_by_table
):
self._readonly_props.add(prop)
else:
# if column is coming in after _cols_by_table was
# initialized, ensure the col is in the right set
if (
hasattr(self, "_cols_by_table")
and col.table in self._cols_by_table
and col not in self._cols_by_table[col.table]
):
self._cols_by_table[col.table].add(col)
# if this properties.ColumnProperty represents the "polymorphic
# discriminator" column, mark it. We'll need this when rendering
# columns in SELECT statements.
if not hasattr(prop, "_is_polymorphic_discriminator"):
prop._is_polymorphic_discriminator = (
col is self.polymorphic_on
or prop.columns[0] is self.polymorphic_on
)
self.columns[key] = col
for col in prop.columns + prop._orig_columns:
for col in col.proxy_set:
self._columntoproperty[col] = prop
prop.key = key
if setparent:
prop.set_parent(self, init)
if key in self._props and getattr(
self._props[key], "_mapped_by_synonym", False
):
syn = self._props[key]._mapped_by_synonym
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" % (syn, key, key, syn)
)
if (
key in self._props
and not isinstance(prop, properties.ColumnProperty)
and not isinstance(
self._props[key],
(
properties.ColumnProperty,
properties.ConcreteInheritedProperty,
),
)
):
util.warn(
"Property %s on %s being replaced with new "
"property %s; the old property will be discarded"
% (self._props[key], self, prop)
)
oldprop = self._props[key]
self._path_registry.pop(oldprop, None)
self._props[key] = prop
if not self.non_primary:
prop.instrument_class(self)
for mapper in self._inheriting_mappers:
mapper._adapt_inherited_property(key, prop, init)
if init:
prop.init()
prop.post_instrument_class(self)
if self.configured:
self._expire_memoizations()
def _property_from_column(self, key, prop):
"""generate/update a :class:`.ColumnProprerty` given a
:class:`_schema.Column` object. """
# we were passed a Column or a list of Columns;
# generate a properties.ColumnProperty
columns = util.to_list(prop)
column = columns[0]
if not expression._is_column(column):
raise sa_exc.ArgumentError(
"%s=%r is not an instance of MapperProperty or Column"
% (key, prop)
)
prop = self._props.get(key, None)
if isinstance(prop, properties.ColumnProperty):
if (
(
not self._inherits_equated_pairs
or (prop.columns[0], column)
not in self._inherits_equated_pairs
)
and not prop.columns[0].shares_lineage(column)
and prop.columns[0] is not self.version_id_col
and column is not self.version_id_col
):
warn_only = prop.parent is not self
msg = (
"Implicitly combining column %s with column "
"%s under attribute '%s'. Please configure one "
"or more attributes for these same-named columns "
"explicitly." % (prop.columns[-1], column, key)
)
if warn_only:
util.warn(msg)
else:
raise sa_exc.InvalidRequestError(msg)
# existing properties.ColumnProperty from an inheriting
# mapper. make a copy and append our column to it
prop = prop.copy()
prop.columns.insert(0, column)
self._log(
"inserting column to existing list "
"in properties.ColumnProperty %s" % (key)
)
return prop
elif prop is None or isinstance(
prop, properties.ConcreteInheritedProperty
):
mapped_column = []
for c in columns:
mc = self.persist_selectable.corresponding_column(c)
if mc is None:
mc = self.local_table.corresponding_column(c)
if mc is not None:
# if the column is in the local table but not the
# mapped table, this corresponds to adding a
# column after the fact to the local table.
# [ticket:1523]
self.persist_selectable._reset_exported()
mc = self.persist_selectable.corresponding_column(c)
if mc is None:
raise sa_exc.ArgumentError(
"When configuring property '%s' on %s, "
"column '%s' is not represented in the mapper's "
"table. Use the `column_property()` function to "
"force this column to be mapped as a read-only "
"attribute." % (key, self, c)
)
mapped_column.append(mc)
return properties.ColumnProperty(*mapped_column)
else:
raise sa_exc.ArgumentError(
"WARNING: when configuring property '%s' on %s, "
"column '%s' conflicts with property '%r'. "
"To resolve this, map the column to the class under a "
"different name in the 'properties' dictionary. Or, "
"to remove all awareness of the column entirely "
"(including its availability as a foreign key), "
"use the 'include_properties' or 'exclude_properties' "
"mapper arguments to control specifically which table "
"columns get mapped." % (key, self, column.key, prop)
)
def _post_configure_properties(self):
"""Call the ``init()`` method on all ``MapperProperties``
attached to this mapper.
This is a deferred configuration step which is intended
to execute once all mappers have been constructed.
"""
self._log("_post_configure_properties() started")
l = [(key, prop) for key, prop in self._props.items()]
for key, prop in l:
self._log("initialize prop %s", key)
if prop.parent is self and not prop._configure_started:
prop.init()
if prop._configure_finished:
prop.post_instrument_class(self)
self._log("_post_configure_properties() complete")
self.configured = True
def add_properties(self, dict_of_properties):
"""Add the given dictionary of properties to this mapper,
using `add_property`.
"""
for key, value in dict_of_properties.items():
self.add_property(key, value)
def add_property(self, key, prop):
"""Add an individual MapperProperty to this mapper.
If the mapper has not been configured yet, just adds the
property to the initial properties dictionary sent to the
constructor. If this Mapper has already been configured, then
the given MapperProperty is configured immediately.
"""
self._init_properties[key] = prop
self._configure_property(key, prop, init=self.configured)
def _expire_memoizations(self):
for mapper in self.iterate_to_root():
_memoized_configured_property.expire_instance(mapper)
@property
def _log_desc(self):
return (
"("
+ self.class_.__name__
+ "|"
+ (
self.local_table is not None
and self.local_table.description
or str(self.local_table)
)
+ (self.non_primary and "|non-primary" or "")
+ ")"
)
def _log(self, msg, *args):
self.logger.info("%s " + msg, *((self._log_desc,) + args))
def _log_debug(self, msg, *args):
self.logger.debug("%s " + msg, *((self._log_desc,) + args))
def __repr__(self):
return "<Mapper at 0x%x; %s>" % (id(self), self.class_.__name__)
def __str__(self):
return "mapped class %s%s->%s" % (
self.class_.__name__,
self.non_primary and " (non-primary)" or "",
self.local_table.description
if self.local_table is not None
else self.persist_selectable.description,
)
def _is_orphan(self, state):
orphan_possible = False
for mapper in self.iterate_to_root():
for (key, cls) in mapper._delete_orphans:
orphan_possible = True
has_parent = attributes.manager_of_class(cls).has_parent(
state, key, optimistic=state.has_identity
)
if self.legacy_is_orphan and has_parent:
return False
elif not self.legacy_is_orphan and not has_parent:
return True
if self.legacy_is_orphan:
return orphan_possible
else:
return False
def has_property(self, key):
return key in self._props
def get_property(self, key, _configure_mappers=True):
"""return a MapperProperty associated with the given key.
"""
if _configure_mappers and Mapper._new_mappers:
configure_mappers()
try:
return self._props[key]
except KeyError as err:
util.raise_(
sa_exc.InvalidRequestError(
"Mapper '%s' has no property '%s'" % (self, key)
),
replace_context=err,
)
def get_property_by_column(self, column):
"""Given a :class:`_schema.Column` object, return the
:class:`.MapperProperty` which maps this column."""
return self._columntoproperty[column]
@property
def iterate_properties(self):
"""return an iterator of all MapperProperty objects."""
if Mapper._new_mappers:
configure_mappers()
return iter(self._props.values())
def _mappers_from_spec(self, spec, selectable):
"""given a with_polymorphic() argument, return the set of mappers it
represents.
Trims the list of mappers to just those represented within the given
selectable, if present. This helps some more legacy-ish mappings.
"""
if spec == "*":
mappers = list(self.self_and_descendants)
elif spec:
mappers = set()
for m in util.to_list(spec):
m = _class_to_mapper(m)
if not m.isa(self):
raise sa_exc.InvalidRequestError(
"%r does not inherit from %r" % (m, self)
)
if selectable is None:
mappers.update(m.iterate_to_root())
else:
mappers.add(m)
mappers = [m for m in self.self_and_descendants if m in mappers]
else:
mappers = []
if selectable is not None:
tables = set(
sql_util.find_tables(selectable, include_aliases=True)
)
mappers = [m for m in mappers if m.local_table in tables]
return mappers
def _selectable_from_mappers(self, mappers, innerjoin):
"""given a list of mappers (assumed to be within this mapper's
inheritance hierarchy), construct an outerjoin amongst those mapper's
mapped tables.
"""
from_obj = self.persist_selectable
for m in mappers:
if m is self:
continue
if m.concrete:
raise sa_exc.InvalidRequestError(
"'with_polymorphic()' requires 'selectable' argument "
"when concrete-inheriting mappers are used."
)
elif not m.single:
if innerjoin:
from_obj = from_obj.join(
m.local_table, m.inherit_condition
)
else:
from_obj = from_obj.outerjoin(
m.local_table, m.inherit_condition
)
return from_obj
@_memoized_configured_property
def _single_table_criterion(self):
if self.single and self.inherits and self.polymorphic_on is not None:
return self.polymorphic_on._annotate({"parentmapper": self}).in_(
m.polymorphic_identity for m in self.self_and_descendants
)
else:
return None
@_memoized_configured_property
def _with_polymorphic_mappers(self):
if Mapper._new_mappers:
configure_mappers()
if not self.with_polymorphic:
return []
return self._mappers_from_spec(*self.with_polymorphic)
@_memoized_configured_property
def _with_polymorphic_selectable(self):
if not self.with_polymorphic:
return self.persist_selectable
spec, selectable = self.with_polymorphic
if selectable is not None:
return selectable
else:
return self._selectable_from_mappers(
self._mappers_from_spec(spec, selectable), False
)
with_polymorphic_mappers = _with_polymorphic_mappers
"""The list of :class:`_orm.Mapper` objects included in the
default "polymorphic" query.
"""
@_memoized_configured_property
def _insert_cols_evaluating_none(self):
return dict(
(
table,
frozenset(
col for col in columns if col.type.should_evaluate_none
),
)
for table, columns in self._cols_by_table.items()
)
@_memoized_configured_property
def _insert_cols_as_none(self):
return dict(
(
table,
frozenset(
col.key
for col in columns
if not col.primary_key
and not col.server_default
and not col.default
and not col.type.should_evaluate_none
),
)
for table, columns in self._cols_by_table.items()
)
@_memoized_configured_property
def _propkey_to_col(self):
return dict(
(
table,
dict(
(self._columntoproperty[col].key, col) for col in columns
),
)
for table, columns in self._cols_by_table.items()
)
@_memoized_configured_property
def _pk_keys_by_table(self):
return dict(
(table, frozenset([col.key for col in pks]))
for table, pks in self._pks_by_table.items()
)
@_memoized_configured_property
def _pk_attr_keys_by_table(self):
return dict(
(
table,
frozenset([self._columntoproperty[col].key for col in pks]),
)
for table, pks in self._pks_by_table.items()
)
@_memoized_configured_property
def _server_default_cols(self):
return dict(
(
table,
frozenset(
[
col.key
for col in columns
if col.server_default is not None
]
),
)
for table, columns in self._cols_by_table.items()
)
@_memoized_configured_property
def _server_default_plus_onupdate_propkeys(self):
result = set()
for table, columns in self._cols_by_table.items():
for col in columns:
if (
col.server_default is not None
or col.server_onupdate is not None
) and col in self._columntoproperty:
result.add(self._columntoproperty[col].key)
return result
@_memoized_configured_property
def _server_onupdate_default_cols(self):
return dict(
(
table,
frozenset(
[
col.key
for col in columns
if col.server_onupdate is not None
]
),
)
for table, columns in self._cols_by_table.items()
)
@property
def selectable(self):
"""The :func:`_expression.select` construct this :class:`_orm.Mapper`
selects from
by default.
Normally, this is equivalent to :attr:`.persist_selectable`, unless
the ``with_polymorphic`` feature is in use, in which case the
full "polymorphic" selectable is returned.
"""
return self._with_polymorphic_selectable
def _with_polymorphic_args(
self, spec=None, selectable=False, innerjoin=False
):
if self.with_polymorphic:
if not spec:
spec = self.with_polymorphic[0]
if selectable is False:
selectable = self.with_polymorphic[1]
elif selectable is False:
selectable = None
mappers = self._mappers_from_spec(spec, selectable)
if selectable is not None:
return mappers, selectable
else:
return mappers, self._selectable_from_mappers(mappers, innerjoin)
@_memoized_configured_property
def _polymorphic_properties(self):
return list(
self._iterate_polymorphic_properties(
self._with_polymorphic_mappers
)
)
def _iterate_polymorphic_properties(self, mappers=None):
"""Return an iterator of MapperProperty objects which will render into
a SELECT."""
if mappers is None:
mappers = self._with_polymorphic_mappers
if not mappers:
for c in self.iterate_properties:
yield c
else:
# in the polymorphic case, filter out discriminator columns
# from other mappers, as these are sometimes dependent on that
# mapper's polymorphic selectable (which we don't want rendered)
for c in util.unique_list(
chain(
*[
list(mapper.iterate_properties)
for mapper in [self] + mappers
]
)
):
if getattr(c, "_is_polymorphic_discriminator", False) and (
self.polymorphic_on is None
or c.columns[0] is not self.polymorphic_on
):
continue
yield c
@_memoized_configured_property
def attrs(self):
"""A namespace of all :class:`.MapperProperty` objects
associated this mapper.
This is an object that provides each property based on
its key name. For instance, the mapper for a
``User`` class which has ``User.name`` attribute would
provide ``mapper.attrs.name``, which would be the
:class:`.ColumnProperty` representing the ``name``
column. The namespace object can also be iterated,
which would yield each :class:`.MapperProperty`.
:class:`_orm.Mapper` has several pre-filtered views
of this attribute which limit the types of properties
returned, including :attr:`.synonyms`, :attr:`.column_attrs`,
:attr:`.relationships`, and :attr:`.composites`.
.. warning::
The :attr:`_orm.Mapper.attrs` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.attrs[somename]`` over
``getattr(mapper.attrs, somename)`` to avoid name collisions.
.. seealso::
:attr:`_orm.Mapper.all_orm_descriptors`
"""
if Mapper._new_mappers:
configure_mappers()
return util.ImmutableProperties(self._props)
@_memoized_configured_property
def all_orm_descriptors(self):
"""A namespace of all :class:`.InspectionAttr` attributes associated
with the mapped class.
These attributes are in all cases Python :term:`descriptors`
associated with the mapped class or its superclasses.
This namespace includes attributes that are mapped to the class
as well as attributes declared by extension modules.
It includes any Python descriptor type that inherits from
:class:`.InspectionAttr`. This includes
:class:`.QueryableAttribute`, as well as extension types such as
:class:`.hybrid_property`, :class:`.hybrid_method` and
:class:`.AssociationProxy`.
To distinguish between mapped attributes and extension attributes,
the attribute :attr:`.InspectionAttr.extension_type` will refer
to a constant that distinguishes between different extension types.
When dealing with a :class:`.QueryableAttribute`, the
:attr:`.QueryableAttribute.property` attribute refers to the
:class:`.MapperProperty` property, which is what you get when
referring to the collection of mapped properties via
:attr:`_orm.Mapper.attrs`.
.. warning::
The :attr:`_orm.Mapper.all_orm_descriptors`
accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.all_orm_descriptors[somename]`` over
``getattr(mapper.all_orm_descriptors, somename)`` to avoid name
collisions.
.. seealso::
:attr:`_orm.Mapper.attrs`
"""
return util.ImmutableProperties(
dict(self.class_manager._all_sqla_attributes())
)
@_memoized_configured_property
def synonyms(self):
"""Return a namespace of all :class:`.SynonymProperty`
properties maintained by this :class:`_orm.Mapper`.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.SynonymProperty)
@_memoized_configured_property
def column_attrs(self):
"""Return a namespace of all :class:`.ColumnProperty`
properties maintained by this :class:`_orm.Mapper`.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.ColumnProperty)
@_memoized_configured_property
def relationships(self):
"""A namespace of all :class:`.RelationshipProperty` properties
maintained by this :class:`_orm.Mapper`.
.. warning::
the :attr:`_orm.Mapper.relationships` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.relationships[somename]`` over
``getattr(mapper.relationships, somename)`` to avoid name
collisions.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.RelationshipProperty)
@_memoized_configured_property
def composites(self):
"""Return a namespace of all :class:`.CompositeProperty`
properties maintained by this :class:`_orm.Mapper`.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.CompositeProperty)
def _filter_properties(self, type_):
if Mapper._new_mappers:
configure_mappers()
return util.ImmutableProperties(
util.OrderedDict(
(k, v) for k, v in self._props.items() if isinstance(v, type_)
)
)
@_memoized_configured_property
def _get_clause(self):
"""create a "get clause" based on the primary key. this is used
by query.get() and many-to-one lazyloads to load this item
by primary key.
"""
params = [
(primary_key, sql.bindparam(None, type_=primary_key.type))
for primary_key in self.primary_key
]
return (
sql.and_(*[k == v for (k, v) in params]),
util.column_dict(params),
)
@_memoized_configured_property
def _equivalent_columns(self):
"""Create a map of all equivalent columns, based on
the determination of column pairs that are equated to
one another based on inherit condition. This is designed
to work with the queries that util.polymorphic_union
comes up with, which often don't include the columns from
the base table directly (including the subclass table columns
only).
The resulting structure is a dictionary of columns mapped
to lists of equivalent columns, e.g.::
{
tablea.col1:
{tableb.col1, tablec.col1},
tablea.col2:
{tabled.col2}
}
"""
result = util.column_dict()
def visit_binary(binary):
if binary.operator == operators.eq:
if binary.left in result:
result[binary.left].add(binary.right)
else:
result[binary.left] = util.column_set((binary.right,))
if binary.right in result:
result[binary.right].add(binary.left)
else:
result[binary.right] = util.column_set((binary.left,))
for mapper in self.base_mapper.self_and_descendants:
if mapper.inherit_condition is not None:
visitors.traverse(
mapper.inherit_condition, {}, {"binary": visit_binary}
)
return result
def _is_userland_descriptor(self, obj):
if isinstance(
obj,
(
_MappedAttribute,
instrumentation.ClassManager,
expression.ColumnElement,
),
):
return False
else:
return True
def _should_exclude(self, name, assigned_name, local, column):
"""determine whether a particular property should be implicitly
present on the class.
This occurs when properties are propagated from an inherited class, or
are applied from the columns present in the mapped table.
"""
# check for class-bound attributes and/or descriptors,
# either local or from an inherited class
if local:
if self.class_.__dict__.get(
assigned_name, None
) is not None and self._is_userland_descriptor(
self.class_.__dict__[assigned_name]
):
return True
else:
attr = self.class_manager._get_class_attr_mro(assigned_name, None)
if attr is not None and self._is_userland_descriptor(attr):
return True
if (
self.include_properties is not None
and name not in self.include_properties
and (column is None or column not in self.include_properties)
):
self._log("not including property %s" % (name))
return True
if self.exclude_properties is not None and (
name in self.exclude_properties
or (column is not None and column in self.exclude_properties)
):
self._log("excluding property %s" % (name))
return True
return False
def common_parent(self, other):
"""Return true if the given mapper shares a
common inherited parent as this mapper."""
return self.base_mapper is other.base_mapper
def _canload(self, state, allow_subtypes):
s = self.primary_mapper()
if self.polymorphic_on is not None or allow_subtypes:
return _state_mapper(state).isa(s)
else:
return _state_mapper(state) is s
def isa(self, other):
"""Return True if the this mapper inherits from the given mapper."""
m = self
while m and m is not other:
m = m.inherits
return bool(m)
def iterate_to_root(self):
m = self
while m:
yield m
m = m.inherits
@_memoized_configured_property
def self_and_descendants(self):
"""The collection including this mapper and all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
"""
descendants = []
stack = deque([self])
while stack:
item = stack.popleft()
descendants.append(item)
stack.extend(item._inheriting_mappers)
return util.WeakSequence(descendants)
def polymorphic_iterator(self):
"""Iterate through the collection including this mapper and
all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
To iterate through an entire hierarchy, use
``mapper.base_mapper.polymorphic_iterator()``.
"""
return iter(self.self_and_descendants)
def primary_mapper(self):
"""Return the primary mapper corresponding to this mapper's class key
(class)."""
return self.class_manager.mapper
@property
def primary_base_mapper(self):
return self.class_manager.mapper.base_mapper
def _result_has_identity_key(self, result, adapter=None):
pk_cols = self.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
for col in pk_cols:
if not result._has_key(col):
return False
else:
return True
def identity_key_from_row(self, row, identity_token=None, adapter=None):
"""Return an identity-map key for use in storing/retrieving an
item from the identity map.
:param row: A :class:`.RowProxy` instance. The columns which are
mapped by this :class:`_orm.Mapper` should be locatable in the row,
preferably via the :class:`_schema.Column`
object directly (as is the case
when a :func:`_expression.select` construct is executed),
or via string names of
the form ``<tablename>_<colname>``.
"""
pk_cols = self.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
return (
self._identity_class,
tuple(row[column] for column in pk_cols),
identity_token,
)
def identity_key_from_primary_key(self, primary_key, identity_token=None):
"""Return an identity-map key for use in storing/retrieving an
item from an identity map.
:param primary_key: A list of values indicating the identifier.
"""
return self._identity_class, tuple(primary_key), identity_token
def identity_key_from_instance(self, instance):
"""Return the identity key for the given instance, based on
its primary key attributes.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
This value is typically also found on the instance state under the
attribute name `key`.
"""
state = attributes.instance_state(instance)
return self._identity_key_from_state(state, attributes.PASSIVE_OFF)
def _identity_key_from_state(
self, state, passive=attributes.PASSIVE_RETURN_NEVER_SET
):
dict_ = state.dict
manager = state.manager
return (
self._identity_class,
tuple(
[
manager[prop.key].impl.get(state, dict_, passive)
for prop in self._identity_key_props
]
),
state.identity_token,
)
def primary_key_from_instance(self, instance):
"""Return the list of primary key values for the given
instance.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
"""
state = attributes.instance_state(instance)
identity_key = self._identity_key_from_state(
state, attributes.PASSIVE_OFF
)
return identity_key[1]
@_memoized_configured_property
def _persistent_sortkey_fn(self):
key_fns = [col.type.sort_key_function for col in self.primary_key]
if set(key_fns).difference([None]):
def key(state):
return tuple(
key_fn(val) if key_fn is not None else val
for key_fn, val in zip(key_fns, state.key[1])
)
else:
def key(state):
return state.key[1]
return key
@_memoized_configured_property
def _identity_key_props(self):
return [self._columntoproperty[col] for col in self.primary_key]
@_memoized_configured_property
def _all_pk_props(self):
collection = set()
for table in self.tables:
collection.update(self._pks_by_table[table])
return collection
@_memoized_configured_property
def _should_undefer_in_wildcard(self):
cols = set(self.primary_key)
if self.polymorphic_on is not None:
cols.add(self.polymorphic_on)
return cols
@_memoized_configured_property
def _primary_key_propkeys(self):
return {prop.key for prop in self._all_pk_props}
def _get_state_attr_by_column(
self, state, dict_, column, passive=attributes.PASSIVE_RETURN_NEVER_SET
):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get(state, dict_, passive=passive)
def _set_committed_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set_committed_value(state, dict_, value)
def _set_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set(state, dict_, value, None)
def _get_committed_attr_by_column(self, obj, column):
state = attributes.instance_state(obj)
dict_ = attributes.instance_dict(obj)
return self._get_committed_state_attr_by_column(
state, dict_, column, passive=attributes.PASSIVE_OFF
)
def _get_committed_state_attr_by_column(
self, state, dict_, column, passive=attributes.PASSIVE_RETURN_NEVER_SET
):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get_committed_value(
state, dict_, passive=passive
)
def _optimized_get_statement(self, state, attribute_names):
"""assemble a WHERE clause which retrieves a given state by primary
key, using a minimized set of tables.
Applies to a joined-table inheritance mapper where the
requested attribute names are only present on joined tables,
not the base table. The WHERE clause attempts to include
only those tables to minimize joins.
"""
props = self._props
tables = set(
chain(
*[
sql_util.find_tables(c, check_columns=True)
for key in attribute_names
for c in props[key].columns
]
)
)
if self.base_mapper.local_table in tables:
return None
def visit_binary(binary):
leftcol = binary.left
rightcol = binary.right
if leftcol is None or rightcol is None:
return
if leftcol.table not in tables:
leftval = self._get_committed_state_attr_by_column(
state,
state.dict,
leftcol,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
if leftval in orm_util._none_set:
raise _OptGetColumnsNotAvailable()
binary.left = sql.bindparam(
None, leftval, type_=binary.right.type
)
elif rightcol.table not in tables:
rightval = self._get_committed_state_attr_by_column(
state,
state.dict,
rightcol,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
if rightval in orm_util._none_set:
raise _OptGetColumnsNotAvailable()
binary.right = sql.bindparam(
None, rightval, type_=binary.right.type
)
allconds = []
try:
start = False
for mapper in reversed(list(self.iterate_to_root())):
if mapper.local_table in tables:
start = True
elif not isinstance(
mapper.local_table, expression.TableClause
):
return None
if start and not mapper.single:
allconds.append(
visitors.cloned_traverse(
mapper.inherit_condition,
{},
{"binary": visit_binary},
)
)
except _OptGetColumnsNotAvailable:
return None
cond = sql.and_(*allconds)
cols = []
for key in attribute_names:
cols.extend(props[key].columns)
return sql.select(cols, cond, use_labels=True)
def _iterate_to_target_viawpoly(self, mapper):
if self.isa(mapper):
prev = self
for m in self.iterate_to_root():
yield m
if m is not prev and prev not in m._with_polymorphic_mappers:
break
prev = m
if m is mapper:
break
def _should_selectin_load(self, enabled_via_opt, polymorphic_from):
if not enabled_via_opt:
# common case, takes place for all polymorphic loads
mapper = polymorphic_from
for m in self._iterate_to_target_viawpoly(mapper):
if m.polymorphic_load == "selectin":
return m
else:
# uncommon case, selectin load options were used
enabled_via_opt = set(enabled_via_opt)
enabled_via_opt_mappers = {e.mapper: e for e in enabled_via_opt}
for entity in enabled_via_opt.union([polymorphic_from]):
mapper = entity.mapper
for m in self._iterate_to_target_viawpoly(mapper):
if (
m.polymorphic_load == "selectin"
or m in enabled_via_opt_mappers
):
return enabled_via_opt_mappers.get(m, m)
return None
@util.dependencies(
"sqlalchemy.ext.baked", "sqlalchemy.orm.strategy_options"
)
def _subclass_load_via_in(self, baked, strategy_options, entity):
"""Assemble a BakedQuery that can load the columns local to
this subclass as a SELECT with IN.
"""
assert self.inherits
polymorphic_prop = self._columntoproperty[self.polymorphic_on]
keep_props = set([polymorphic_prop] + self._identity_key_props)
disable_opt = strategy_options.Load(entity)
enable_opt = strategy_options.Load(entity)
for prop in self.attrs:
if prop.parent is self or prop in keep_props:
# "enable" options, to turn on the properties that we want to
# load by default (subject to options from the query)
enable_opt.set_generic_strategy(
(prop.key,), dict(prop.strategy_key)
)
else:
# "disable" options, to turn off the properties from the
# superclass that we *don't* want to load, applied after
# the options from the query to override them
disable_opt.set_generic_strategy(
(prop.key,), {"do_nothing": True}
)
if len(self.primary_key) > 1:
in_expr = sql.tuple_(*self.primary_key)
else:
in_expr = self.primary_key[0]
if entity.is_aliased_class:
assert entity.mapper is self
q = baked.BakedQuery(
self._compiled_cache,
lambda session: session.query(entity)
.select_entity_from(entity.selectable)
._adapt_all_clauses(),
(self,),
)
q.spoil()
else:
q = baked.BakedQuery(
self._compiled_cache,
lambda session: session.query(self),
(self,),
)
q += lambda q: q.filter(
in_expr.in_(sql.bindparam("primary_keys", expanding=True))
).order_by(*self.primary_key)
return q, enable_opt, disable_opt
@_memoized_configured_property
def _subclass_load_via_in_mapper(self):
return self._subclass_load_via_in(self)
def cascade_iterator(self, type_, state, halt_on=None):
r"""Iterate each element and its mapper in an object graph,
for all relationships that meet the given cascade rule.
:param type\_:
The name of the cascade rule (i.e. ``"save-update"``, ``"delete"``,
etc.).
.. note:: the ``"all"`` cascade is not accepted here. For a generic
object traversal function, see :ref:`faq_walk_objects`.
:param state:
The lead InstanceState. child items will be processed per
the relationships defined for this object's mapper.
:return: the method yields individual object instances.
.. seealso::
:ref:`unitofwork_cascades`
:ref:`faq_walk_objects` - illustrates a generic function to
traverse all objects without relying on cascades.
"""
visited_states = set()
prp, mpp = object(), object()
assert state.mapper.isa(self)
visitables = deque(
[(deque(state.mapper._props.values()), prp, state, state.dict)]
)
while visitables:
iterator, item_type, parent_state, parent_dict = visitables[-1]
if not iterator:
visitables.pop()
continue
if item_type is prp:
prop = iterator.popleft()
if type_ not in prop.cascade:
continue
queue = deque(
prop.cascade_iterator(
type_,
parent_state,
parent_dict,
visited_states,
halt_on,
)
)
if queue:
visitables.append((queue, mpp, None, None))
elif item_type is mpp:
(
instance,
instance_mapper,
corresponding_state,
corresponding_dict,
) = iterator.popleft()
yield (
instance,
instance_mapper,
corresponding_state,
corresponding_dict,
)
visitables.append(
(
deque(instance_mapper._props.values()),
prp,
corresponding_state,
corresponding_dict,
)
)
@_memoized_configured_property
def _compiled_cache(self):
return util.LRUCache(self._compiled_cache_size)
@_memoized_configured_property
def _sorted_tables(self):
table_to_mapper = {}
for mapper in self.base_mapper.self_and_descendants:
for t in mapper.tables:
table_to_mapper.setdefault(t, mapper)
extra_dependencies = []
for table, mapper in table_to_mapper.items():
super_ = mapper.inherits
if super_:
extra_dependencies.extend(
[(super_table, table) for super_table in super_.tables]
)
def skip(fk):
# attempt to skip dependencies that are not
# significant to the inheritance chain
# for two tables that are related by inheritance.
# while that dependency may be important, it's technically
# not what we mean to sort on here.
parent = table_to_mapper.get(fk.parent.table)
dep = table_to_mapper.get(fk.column.table)
if (
parent is not None
and dep is not None
and dep is not parent
and dep.inherit_condition is not None
):
cols = set(sql_util._find_columns(dep.inherit_condition))
if parent.inherit_condition is not None:
cols = cols.union(
sql_util._find_columns(parent.inherit_condition)
)
return fk.parent not in cols and fk.column not in cols
else:
return fk.parent not in cols
return False
sorted_ = sql_util.sort_tables(
table_to_mapper,
skip_fn=skip,
extra_dependencies=extra_dependencies,
)
ret = util.OrderedDict()
for t in sorted_:
ret[t] = table_to_mapper[t]
return ret
def _memo(self, key, callable_):
if key in self._memoized_values:
return self._memoized_values[key]
else:
self._memoized_values[key] = value = callable_()
return value
@util.memoized_property
def _table_to_equated(self):
"""memoized map of tables to collections of columns to be
synchronized upwards to the base mapper."""
result = util.defaultdict(list)
for table in self._sorted_tables:
cols = set(table.c)
for m in self.iterate_to_root():
if m._inherits_equated_pairs and cols.intersection(
util.reduce(
set.union,
[l.proxy_set for l, r in m._inherits_equated_pairs],
)
):
result[table].append((m, m._inherits_equated_pairs))
return result
class _OptGetColumnsNotAvailable(Exception):
pass
def configure_mappers():
"""Initialize the inter-mapper relationships of all mappers that
have been constructed thus far.
This function can be called any number of times, but in
most cases is invoked automatically, the first time mappings are used,
as well as whenever mappings are used and additional not-yet-configured
mappers have been constructed.
Points at which this occur include when a mapped class is instantiated
into an instance, as well as when the :meth:`.Session.query` method
is used.
The :func:`.configure_mappers` function provides several event hooks
that can be used to augment its functionality. These methods include:
* :meth:`.MapperEvents.before_configured` - called once before
:func:`.configure_mappers` does any work; this can be used to establish
additional options, properties, or related mappings before the operation
proceeds.
* :meth:`.MapperEvents.mapper_configured` - called as each individual
:class:`_orm.Mapper` is configured within the process; will include all
mapper state except for backrefs set up by other mappers that are still
to be configured.
* :meth:`.MapperEvents.after_configured` - called once after
:func:`.configure_mappers` is complete; at this stage, all
:class:`_orm.Mapper` objects that are known to SQLAlchemy will be fully
configured. Note that the calling application may still have other
mappings that haven't been produced yet, such as if they are in modules
as yet unimported.
"""
if not Mapper._new_mappers:
return
_CONFIGURE_MUTEX.acquire()
try:
global _already_compiling
if _already_compiling:
return
_already_compiling = True
try:
# double-check inside mutex
if not Mapper._new_mappers:
return
has_skip = False
Mapper.dispatch._for_class(Mapper).before_configured()
# initialize properties on all mappers
# note that _mapper_registry is unordered, which
# may randomly conceal/reveal issues related to
# the order of mapper compilation
for mapper in list(_mapper_registry):
run_configure = None
for fn in mapper.dispatch.before_mapper_configured:
run_configure = fn(mapper, mapper.class_)
if run_configure is EXT_SKIP:
has_skip = True
break
if run_configure is EXT_SKIP:
continue
if getattr(mapper, "_configure_failed", False):
e = sa_exc.InvalidRequestError(
"One or more mappers failed to initialize - "
"can't proceed with initialization of other "
"mappers. Triggering mapper: '%s'. "
"Original exception was: %s"
% (mapper, mapper._configure_failed)
)
e._configure_failed = mapper._configure_failed
raise e
if not mapper.configured:
try:
mapper._post_configure_properties()
mapper._expire_memoizations()
mapper.dispatch.mapper_configured(
mapper, mapper.class_
)
except Exception:
exc = sys.exc_info()[1]
if not hasattr(exc, "_configure_failed"):
mapper._configure_failed = exc
raise
if not has_skip:
Mapper._new_mappers = False
finally:
_already_compiling = False
finally:
_CONFIGURE_MUTEX.release()
Mapper.dispatch._for_class(Mapper).after_configured()
def reconstructor(fn):
"""Decorate a method as the 'reconstructor' hook.
Designates a method as the "reconstructor", an ``__init__``-like
method that will be called by the ORM after the instance has been
loaded from the database or otherwise reconstituted.
The reconstructor will be invoked with no arguments. Scalar
(non-collection) database-mapped attributes of the instance will
be available for use within the function. Eagerly-loaded
collections are generally not yet available and will usually only
contain the first element. ORM state changes made to objects at
this stage will not be recorded for the next flush() operation, so
the activity within a reconstructor should be conservative.
.. seealso::
:ref:`mapping_constructors`
:meth:`.InstanceEvents.load`
"""
fn.__sa_reconstructor__ = True
return fn
def validates(*names, **kw):
r"""Decorate a method as a 'validator' for one or more named properties.
Designates a method as a validator, a method which receives the
name of the attribute as well as a value to be assigned, or in the
case of a collection, the value to be added to the collection.
The function can then raise validation exceptions to halt the
process from continuing (where Python's built-in ``ValueError``
and ``AssertionError`` exceptions are reasonable choices), or can
modify or replace the value before proceeding. The function should
otherwise return the given value.
Note that a validator for a collection **cannot** issue a load of that
collection within the validation routine - this usage raises
an assertion to avoid recursion overflows. This is a reentrant
condition which is not supported.
:param \*names: list of attribute names to be validated.
:param include_removes: if True, "remove" events will be
sent as well - the validation function must accept an additional
argument "is_remove" which will be a boolean.
:param include_backrefs: defaults to ``True``; if ``False``, the
validation function will not emit if the originator is an attribute
event related via a backref. This can be used for bi-directional
:func:`.validates` usage where only one validator should emit per
attribute operation.
.. versionadded:: 0.9.0
.. seealso::
:ref:`simple_validators` - usage examples for :func:`.validates`
"""
include_removes = kw.pop("include_removes", False)
include_backrefs = kw.pop("include_backrefs", True)
def wrap(fn):
fn.__sa_validators__ = names
fn.__sa_validation_opts__ = {
"include_removes": include_removes,
"include_backrefs": include_backrefs,
}
return fn
return wrap
def _event_on_load(state, ctx):
instrumenting_mapper = state.manager.info[_INSTRUMENTOR]
if instrumenting_mapper._reconstructor:
instrumenting_mapper._reconstructor(state.obj())
def _event_on_first_init(manager, cls):
"""Initial mapper compilation trigger.
instrumentation calls this one when InstanceState
is first generated, and is needed for legacy mutable
attributes to work.
"""
instrumenting_mapper = manager.info.get(_INSTRUMENTOR)
if instrumenting_mapper:
if Mapper._new_mappers:
configure_mappers()
def _event_on_init(state, args, kwargs):
"""Run init_instance hooks.
This also includes mapper compilation, normally not needed
here but helps with some piecemeal configuration
scenarios (such as in the ORM tutorial).
"""
instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR)
if instrumenting_mapper:
if Mapper._new_mappers:
configure_mappers()
if instrumenting_mapper._set_polymorphic_identity:
instrumenting_mapper._set_polymorphic_identity(state)
class _ColumnMapping(dict):
"""Error reporting helper for mapper._columntoproperty."""
__slots__ = ("mapper",)
def __init__(self, mapper):
self.mapper = mapper
def __missing__(self, column):
prop = self.mapper._props.get(column)
if prop:
raise orm_exc.UnmappedColumnError(
"Column '%s.%s' is not available, due to "
"conflicting property '%s':%r"
% (column.table.name, column.name, column.key, prop)
)
raise orm_exc.UnmappedColumnError(
"No column %s is configured on mapper %s..."
% (column, self.mapper)
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/scoping.py
|
# orm/scoping.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import class_mapper
from . import exc as orm_exc
from .session import Session
from .. import exc as sa_exc
from ..util import ScopedRegistry
from ..util import ThreadLocalRegistry
from ..util import warn
__all__ = ["scoped_session"]
class scoped_session(object):
"""Provides scoped management of :class:`.Session` objects.
See :ref:`unitofwork_contextual` for a tutorial.
"""
session_factory = None
"""The `session_factory` provided to `__init__` is stored in this
attribute and may be accessed at a later time. This can be useful when
a new non-scoped :class:`.Session` or :class:`_engine.Connection` to the
database is needed."""
def __init__(self, session_factory, scopefunc=None):
"""Construct a new :class:`.scoped_session`.
:param session_factory: a factory to create new :class:`.Session`
instances. This is usually, but not necessarily, an instance
of :class:`.sessionmaker`.
:param scopefunc: optional function which defines
the current scope. If not passed, the :class:`.scoped_session`
object assumes "thread-local" scope, and will use
a Python ``threading.local()`` in order to maintain the current
:class:`.Session`. If passed, the function should return
a hashable token; this token will be used as the key in a
dictionary in order to store and retrieve the current
:class:`.Session`.
"""
self.session_factory = session_factory
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
self.registry = ThreadLocalRegistry(session_factory)
def __call__(self, **kw):
r"""Return the current :class:`.Session`, creating it
using the :attr:`.scoped_session.session_factory` if not present.
:param \**kw: Keyword arguments will be passed to the
:attr:`.scoped_session.session_factory` callable, if an existing
:class:`.Session` is not present. If the :class:`.Session` is present
and keyword arguments have been passed,
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if kw:
if self.registry.has():
raise sa_exc.InvalidRequestError(
"Scoped session is already present; "
"no new arguments may be specified."
)
else:
sess = self.session_factory(**kw)
self.registry.set(sess)
return sess
else:
return self.registry()
def remove(self):
"""Dispose of the current :class:`.Session`, if present.
This will first call :meth:`.Session.close` method
on the current :class:`.Session`, which releases any existing
transactional/connection resources still being held; transactions
specifically are rolled back. The :class:`.Session` is then
discarded. Upon next usage within the same scope,
the :class:`.scoped_session` will produce a new
:class:`.Session` object.
"""
if self.registry.has():
self.registry().close()
self.registry.clear()
def configure(self, **kwargs):
"""reconfigure the :class:`.sessionmaker` used by this
:class:`.scoped_session`.
See :meth:`.sessionmaker.configure`.
"""
if self.registry.has():
warn(
"At least one scoped session is already present. "
" configure() can not affect sessions that have "
"already been created."
)
self.session_factory.configure(**kwargs)
def query_property(self, query_cls=None):
"""return a class property which produces a :class:`_query.Query`
object
against the class and the current :class:`.Session` when called.
e.g.::
Session = scoped_session(sessionmaker())
class MyClass(object):
query = Session.query_property()
# after mappers are defined
result = MyClass.query.filter(MyClass.name=='foo').all()
Produces instances of the session's configured query class by
default. To override and use a custom implementation, provide
a ``query_cls`` callable. The callable will be invoked with
the class's mapper as a positional argument and a session
keyword argument.
There is no limit to the number of query properties placed on
a class.
"""
class query(object):
def __get__(s, instance, owner):
try:
mapper = class_mapper(owner)
if mapper:
if query_cls:
# custom query class
return query_cls(mapper, session=self.registry())
else:
# session's configured query class
return self.registry().query(mapper)
except orm_exc.UnmappedClassError:
return None
return query()
ScopedSession = scoped_session
"""Old name for backwards compatibility."""
def instrument(name):
def do(self, *args, **kwargs):
return getattr(self.registry(), name)(*args, **kwargs)
return do
for meth in Session.public_methods:
setattr(scoped_session, meth, instrument(meth))
def makeprop(name):
def set_(self, attr):
setattr(self.registry(), name, attr)
def get(self):
return getattr(self.registry(), name)
return property(get, set_)
for prop in (
"bind",
"dirty",
"deleted",
"new",
"identity_map",
"is_active",
"autoflush",
"no_autoflush",
"info",
"autocommit",
):
setattr(scoped_session, prop, makeprop(prop))
def clslevel(name):
def do(cls, *args, **kwargs):
return getattr(Session, name)(*args, **kwargs)
return classmethod(do)
for prop in ("close_all", "object_session", "identity_key"):
setattr(scoped_session, prop, clslevel(prop))
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/session.py
|
# orm/session.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the Session class and related utilities."""
import itertools
import sys
import weakref
from . import attributes
from . import exc
from . import identity
from . import loading
from . import persistence
from . import query
from . import state as statelib
from .base import _class_to_mapper
from .base import _none_set
from .base import _state_mapper
from .base import instance_str
from .base import object_mapper
from .base import object_state
from .base import state_str
from .deprecated_interfaces import SessionExtension
from .unitofwork import UOWTransaction
from .. import engine
from .. import exc as sa_exc
from .. import sql
from .. import util
from ..inspection import inspect
from ..sql import expression
from ..sql import util as sql_util
__all__ = ["Session", "SessionTransaction", "SessionExtension", "sessionmaker"]
_sessions = weakref.WeakValueDictionary()
"""Weak-referencing dictionary of :class:`.Session` objects.
"""
def _state_session(state):
"""Given an :class:`.InstanceState`, return the :class:`.Session`
associated, if any.
"""
if state.session_id:
try:
return _sessions[state.session_id]
except KeyError:
pass
return None
class _SessionClassMethods(object):
"""Class-level methods for :class:`.Session`, :class:`.sessionmaker`."""
@classmethod
@util.deprecated(
"1.3",
"The :meth:`.Session.close_all` method is deprecated and will be "
"removed in a future release. Please refer to "
":func:`.session.close_all_sessions`.",
)
def close_all(cls):
"""Close *all* sessions in memory."""
close_all_sessions()
@classmethod
@util.dependencies("sqlalchemy.orm.util")
def identity_key(cls, orm_util, *args, **kwargs):
"""Return an identity key.
This is an alias of :func:`.util.identity_key`.
"""
return orm_util.identity_key(*args, **kwargs)
@classmethod
def object_session(cls, instance):
"""Return the :class:`.Session` to which an object belongs.
This is an alias of :func:`.object_session`.
"""
return object_session(instance)
ACTIVE = util.symbol("ACTIVE")
PREPARED = util.symbol("PREPARED")
COMMITTED = util.symbol("COMMITTED")
DEACTIVE = util.symbol("DEACTIVE")
CLOSED = util.symbol("CLOSED")
class SessionTransaction(object):
"""A :class:`.Session`-level transaction.
:class:`.SessionTransaction` is a mostly behind-the-scenes object
not normally referenced directly by application code. It coordinates
among multiple :class:`_engine.Connection` objects, maintaining a database
transaction for each one individually, committing or rolling them
back all at once. It also provides optional two-phase commit behavior
which can augment this coordination operation.
The :attr:`.Session.transaction` attribute of :class:`.Session`
refers to the current :class:`.SessionTransaction` object in use, if any.
The :attr:`.SessionTransaction.parent` attribute refers to the parent
:class:`.SessionTransaction` in the stack of :class:`.SessionTransaction`
objects. If this attribute is ``None``, then this is the top of the stack.
If non-``None``, then this :class:`.SessionTransaction` refers either
to a so-called "subtransaction" or a "nested" transaction. A
"subtransaction" is a scoping concept that demarcates an inner portion
of the outermost "real" transaction. A nested transaction, which
is indicated when the :attr:`.SessionTransaction.nested`
attribute is also True, indicates that this :class:`.SessionTransaction`
corresponds to a SAVEPOINT.
**Life Cycle**
A :class:`.SessionTransaction` is associated with a :class:`.Session`
in its default mode of ``autocommit=False`` immediately, associated
with no database connections. As the :class:`.Session` is called upon
to emit SQL on behalf of various :class:`_engine.Engine` or
:class:`_engine.Connection`
objects, a corresponding :class:`_engine.Connection` and associated
:class:`.Transaction` is added to a collection within the
:class:`.SessionTransaction` object, becoming one of the
connection/transaction pairs maintained by the
:class:`.SessionTransaction`. The start of a :class:`.SessionTransaction`
can be tracked using the :meth:`.SessionEvents.after_transaction_create`
event.
The lifespan of the :class:`.SessionTransaction` ends when the
:meth:`.Session.commit`, :meth:`.Session.rollback` or
:meth:`.Session.close` methods are called. At this point, the
:class:`.SessionTransaction` removes its association with its parent
:class:`.Session`. A :class:`.Session` that is in ``autocommit=False``
mode will create a new :class:`.SessionTransaction` to replace it
immediately, whereas a :class:`.Session` that's in ``autocommit=True``
mode will remain without a :class:`.SessionTransaction` until the
:meth:`.Session.begin` method is called. The end of a
:class:`.SessionTransaction` can be tracked using the
:meth:`.SessionEvents.after_transaction_end` event.
**Nesting and Subtransactions**
Another detail of :class:`.SessionTransaction` behavior is that it is
capable of "nesting". This means that the :meth:`.Session.begin` method
can be called while an existing :class:`.SessionTransaction` is already
present, producing a new :class:`.SessionTransaction` that temporarily
replaces the parent :class:`.SessionTransaction`. When a
:class:`.SessionTransaction` is produced as nested, it assigns itself to
the :attr:`.Session.transaction` attribute, and it additionally will assign
the previous :class:`.SessionTransaction` to its :attr:`.Session.parent`
attribute. The behavior is effectively a
stack, where :attr:`.Session.transaction` refers to the current head of
the stack, and the :attr:`.SessionTransaction.parent` attribute allows
traversal up the stack until :attr:`.SessionTransaction.parent` is
``None``, indicating the top of the stack.
When the scope of :class:`.SessionTransaction` is ended via
:meth:`.Session.commit` or :meth:`.Session.rollback`, it restores its
parent :class:`.SessionTransaction` back onto the
:attr:`.Session.transaction` attribute.
The purpose of this stack is to allow nesting of
:meth:`.Session.rollback` or :meth:`.Session.commit` calls in context
with various flavors of :meth:`.Session.begin`. This nesting behavior
applies to when :meth:`.Session.begin_nested` is used to emit a
SAVEPOINT transaction, and is also used to produce a so-called
"subtransaction" which allows a block of code to use a
begin/rollback/commit sequence regardless of whether or not its enclosing
code block has begun a transaction. The :meth:`.flush` method, whether
called explicitly or via autoflush, is the primary consumer of the
"subtransaction" feature, in that it wishes to guarantee that it works
within in a transaction block regardless of whether or not the
:class:`.Session` is in transactional mode when the method is called.
Note that the flush process that occurs within the "autoflush" feature
as well as when the :meth:`.Session.flush` method is used **always**
creates a :class:`.SessionTransaction` object. This object is normally
a subtransaction, unless the :class:`.Session` is in autocommit mode
and no transaction exists at all, in which case it's the outermost
transaction. Any event-handling logic or other inspection logic
needs to take into account whether a :class:`.SessionTransaction`
is the outermost transaction, a subtransaction, or a "nested" / SAVEPOINT
transaction.
.. seealso::
:meth:`.Session.rollback`
:meth:`.Session.commit`
:meth:`.Session.begin`
:meth:`.Session.begin_nested`
:attr:`.Session.is_active`
:meth:`.SessionEvents.after_transaction_create`
:meth:`.SessionEvents.after_transaction_end`
:meth:`.SessionEvents.after_commit`
:meth:`.SessionEvents.after_rollback`
:meth:`.SessionEvents.after_soft_rollback`
"""
_rollback_exception = None
def __init__(self, session, parent=None, nested=False):
self.session = session
self._connections = {}
self._parent = parent
self.nested = nested
self._state = ACTIVE
if not parent and nested:
raise sa_exc.InvalidRequestError(
"Can't start a SAVEPOINT transaction when no existing "
"transaction is in progress"
)
if self.session._enable_transaction_accounting:
self._take_snapshot()
self.session.dispatch.after_transaction_create(self.session, self)
@property
def parent(self):
"""The parent :class:`.SessionTransaction` of this
:class:`.SessionTransaction`.
If this attribute is ``None``, indicates this
:class:`.SessionTransaction` is at the top of the stack, and
corresponds to a real "COMMIT"/"ROLLBACK"
block. If non-``None``, then this is either a "subtransaction"
or a "nested" / SAVEPOINT transaction. If the
:attr:`.SessionTransaction.nested` attribute is ``True``, then
this is a SAVEPOINT, and if ``False``, indicates this a subtransaction.
.. versionadded:: 1.0.16 - use ._parent for previous versions
"""
return self._parent
nested = False
"""Indicates if this is a nested, or SAVEPOINT, transaction.
When :attr:`.SessionTransaction.nested` is True, it is expected
that :attr:`.SessionTransaction.parent` will be True as well.
"""
@property
def is_active(self):
return self.session is not None and self._state is ACTIVE
def _assert_active(
self,
prepared_ok=False,
rollback_ok=False,
deactive_ok=False,
closed_msg="This transaction is closed",
):
if self._state is COMMITTED:
raise sa_exc.InvalidRequestError(
"This session is in 'committed' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is PREPARED:
if not prepared_ok:
raise sa_exc.InvalidRequestError(
"This session is in 'prepared' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is DEACTIVE:
if not deactive_ok and not rollback_ok:
if self._rollback_exception:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"due to a previous exception during flush."
" To begin a new transaction with this Session, "
"first issue Session.rollback()."
" Original exception was: %s"
% self._rollback_exception,
code="7s2a",
)
elif not deactive_ok:
raise sa_exc.InvalidRequestError(
"This session is in 'inactive' state, due to the "
"SQL transaction being rolled back; no further "
"SQL can be emitted within this transaction."
)
elif self._state is CLOSED:
raise sa_exc.ResourceClosedError(closed_msg)
@property
def _is_transaction_boundary(self):
return self.nested or not self._parent
def connection(self, bindkey, execution_options=None, **kwargs):
self._assert_active()
bind = self.session.get_bind(bindkey, **kwargs)
return self._connection_for_bind(bind, execution_options)
def _begin(self, nested=False):
self._assert_active()
return SessionTransaction(self.session, self, nested=nested)
def _iterate_self_and_parents(self, upto=None):
current = self
result = ()
while current:
result += (current,)
if current._parent is upto:
break
elif current._parent is None:
raise sa_exc.InvalidRequestError(
"Transaction %s is not on the active transaction list"
% (upto)
)
else:
current = current._parent
return result
def _take_snapshot(self):
if not self._is_transaction_boundary:
self._new = self._parent._new
self._deleted = self._parent._deleted
self._dirty = self._parent._dirty
self._key_switches = self._parent._key_switches
return
if not self.session._flushing:
self.session.flush()
self._new = weakref.WeakKeyDictionary()
self._deleted = weakref.WeakKeyDictionary()
self._dirty = weakref.WeakKeyDictionary()
self._key_switches = weakref.WeakKeyDictionary()
def _restore_snapshot(self, dirty_only=False):
"""Restore the restoration state taken before a transaction began.
Corresponds to a rollback.
"""
assert self._is_transaction_boundary
to_expunge = set(self._new).union(self.session._new)
self.session._expunge_states(to_expunge, to_transient=True)
for s, (oldkey, newkey) in self._key_switches.items():
# we probably can do this conditionally based on
# if we expunged or not, but safe_discard does that anyway
self.session.identity_map.safe_discard(s)
# restore the old key
s.key = oldkey
# now restore the object, but only if we didn't expunge
if s not in to_expunge:
self.session.identity_map.replace(s)
for s in set(self._deleted).union(self.session._deleted):
self.session._update_impl(s, revert_deletion=True)
assert not self.session._deleted
for s in self.session.identity_map.all_states():
if not dirty_only or s.modified or s in self._dirty:
s._expire(s.dict, self.session.identity_map._modified)
def _remove_snapshot(self):
"""Remove the restoration state taken before a transaction began.
Corresponds to a commit.
"""
assert self._is_transaction_boundary
if not self.nested and self.session.expire_on_commit:
for s in self.session.identity_map.all_states():
s._expire(s.dict, self.session.identity_map._modified)
statelib.InstanceState._detach_states(
list(self._deleted), self.session
)
self._deleted.clear()
elif self.nested:
self._parent._new.update(self._new)
self._parent._dirty.update(self._dirty)
self._parent._deleted.update(self._deleted)
self._parent._key_switches.update(self._key_switches)
def _connection_for_bind(self, bind, execution_options):
self._assert_active()
if bind in self._connections:
if execution_options:
util.warn(
"Connection is already established for the "
"given bind; execution_options ignored"
)
return self._connections[bind][0]
local_connect = False
if self._parent:
conn = self._parent._connection_for_bind(bind, execution_options)
if not self.nested:
return conn
else:
if isinstance(bind, engine.Connection):
conn = bind
if conn.engine in self._connections:
raise sa_exc.InvalidRequestError(
"Session already has a Connection associated for the "
"given Connection's Engine"
)
else:
conn = bind._contextual_connect()
local_connect = True
try:
if execution_options:
conn = conn.execution_options(**execution_options)
if self.session.twophase and self._parent is None:
transaction = conn.begin_twophase()
elif self.nested:
transaction = conn.begin_nested()
else:
transaction = conn.begin()
except:
# connection will not not be associated with this Session;
# close it immediately so that it isn't closed under GC
if local_connect:
conn.close()
raise
else:
self._connections[conn] = self._connections[conn.engine] = (
conn,
transaction,
conn is not bind,
)
self.session.dispatch.after_begin(self.session, self, conn)
return conn
def prepare(self):
if self._parent is not None or not self.session.twophase:
raise sa_exc.InvalidRequestError(
"'twophase' mode not enabled, or not root transaction; "
"can't prepare."
)
self._prepare_impl()
def _prepare_impl(self):
self._assert_active()
if self._parent is None or self.nested:
self.session.dispatch.before_commit(self.session)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_self_and_parents(upto=self):
subtransaction.commit()
if not self.session._flushing:
for _flush_guard in range(100):
if self.session._is_clean():
break
self.session.flush()
else:
raise exc.FlushError(
"Over 100 subsequent flushes have occurred within "
"session.commit() - is an after_flush() hook "
"creating new objects?"
)
if self._parent is None and self.session.twophase:
try:
for t in set(self._connections.values()):
t[1].prepare()
except:
with util.safe_reraise():
self.rollback()
self._state = PREPARED
def commit(self):
self._assert_active(prepared_ok=True)
if self._state is not PREPARED:
self._prepare_impl()
if self._parent is None or self.nested:
for t in set(self._connections.values()):
t[1].commit()
self._state = COMMITTED
self.session.dispatch.after_commit(self.session)
if self.session._enable_transaction_accounting:
self._remove_snapshot()
self.close()
return self._parent
def rollback(self, _capture_exception=False):
self._assert_active(prepared_ok=True, rollback_ok=True)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_self_and_parents(upto=self):
subtransaction.close()
boundary = self
rollback_err = None
if self._state in (ACTIVE, PREPARED):
for transaction in self._iterate_self_and_parents():
if transaction._parent is None or transaction.nested:
try:
for t in set(transaction._connections.values()):
t[1].rollback()
transaction._state = DEACTIVE
self.session.dispatch.after_rollback(self.session)
except:
rollback_err = sys.exc_info()
finally:
transaction._state = DEACTIVE
if self.session._enable_transaction_accounting:
transaction._restore_snapshot(
dirty_only=transaction.nested
)
boundary = transaction
break
else:
transaction._state = DEACTIVE
sess = self.session
if (
not rollback_err
and sess._enable_transaction_accounting
and not sess._is_clean()
):
# if items were added, deleted, or mutated
# here, we need to re-restore the snapshot
util.warn(
"Session's state has been changed on "
"a non-active transaction - this state "
"will be discarded."
)
boundary._restore_snapshot(dirty_only=boundary.nested)
self.close()
if self._parent and _capture_exception:
self._parent._rollback_exception = sys.exc_info()[1]
if rollback_err:
util.raise_(rollback_err[1], with_traceback=rollback_err[2])
sess.dispatch.after_soft_rollback(sess, self)
return self._parent
def close(self, invalidate=False):
self.session.transaction = self._parent
if self._parent is None:
for connection, transaction, autoclose in set(
self._connections.values()
):
if invalidate:
connection.invalidate()
if autoclose:
connection.close()
else:
transaction.close()
self._state = CLOSED
self.session.dispatch.after_transaction_end(self.session, self)
if self._parent is None:
if not self.session.autocommit:
self.session.begin()
self.session = None
self._connections = None
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self._assert_active(deactive_ok=True, prepared_ok=True)
if self.session.transaction is None:
return
if type_ is None:
try:
self.commit()
except:
with util.safe_reraise():
self.rollback()
else:
self.rollback()
class Session(_SessionClassMethods):
"""Manages persistence operations for ORM-mapped objects.
The Session's usage paradigm is described at :doc:`/orm/session`.
"""
public_methods = (
"__contains__",
"__iter__",
"add",
"add_all",
"begin",
"begin_nested",
"close",
"commit",
"connection",
"delete",
"execute",
"expire",
"expire_all",
"expunge",
"expunge_all",
"flush",
"get_bind",
"is_modified",
"bulk_save_objects",
"bulk_insert_mappings",
"bulk_update_mappings",
"merge",
"query",
"refresh",
"rollback",
"scalar",
)
@util.deprecated_params(
weak_identity_map=(
"1.0",
"The :paramref:`.Session.weak_identity_map` parameter as well as "
"the strong-referencing identity map are deprecated, and will be "
"removed in a future release. For the use case where objects "
"present in a :class:`.Session` need to be automatically strong "
"referenced, see the recipe at "
":ref:`session_referencing_behavior` for an event-based approach "
"to maintaining strong identity references. ",
),
_enable_transaction_accounting=(
"0.7",
"The :paramref:`.Session._enable_transaction_accounting` "
"parameter is deprecated and will be removed in a future release.",
),
extension=(
"0.7",
":class:`.SessionExtension` is deprecated in favor of the "
":class:`.SessionEvents` listener interface. The "
":paramref:`.Session.extension` parameter will be "
"removed in a future release.",
),
)
def __init__(
self,
bind=None,
autoflush=True,
expire_on_commit=True,
_enable_transaction_accounting=True,
autocommit=False,
twophase=False,
weak_identity_map=None,
binds=None,
extension=None,
enable_baked_queries=True,
info=None,
query_cls=None,
):
r"""Construct a new Session.
See also the :class:`.sessionmaker` function which is used to
generate a :class:`.Session`-producing callable with a given
set of arguments.
:param autocommit:
.. warning::
The autocommit flag is **not for general use**, and if it is
used, queries should only be invoked within the span of a
:meth:`.Session.begin` / :meth:`.Session.commit` pair. Executing
queries outside of a demarcated transaction is a legacy mode
of usage, and can in some cases lead to concurrent connection
checkouts.
Defaults to ``False``. When ``True``, the
:class:`.Session` does not keep a persistent transaction running,
and will acquire connections from the engine on an as-needed basis,
returning them immediately after their use. Flushes will begin and
commit (or possibly rollback) their own transaction if no
transaction is present. When using this mode, the
:meth:`.Session.begin` method is used to explicitly start
transactions.
.. seealso::
:ref:`session_autocommit`
:param autoflush: When ``True``, all query operations will issue a
:meth:`~.Session.flush` call to this ``Session`` before proceeding.
This is a convenience feature so that :meth:`~.Session.flush` need
not be called repeatedly in order for database queries to retrieve
results. It's typical that ``autoflush`` is used in conjunction
with ``autocommit=False``. In this scenario, explicit calls to
:meth:`~.Session.flush` are rarely needed; you usually only need to
call :meth:`~.Session.commit` (which flushes) to finalize changes.
:param bind: An optional :class:`_engine.Engine` or
:class:`_engine.Connection` to
which this ``Session`` should be bound. When specified, all SQL
operations performed by this session will execute via this
connectable.
:param binds: A dictionary which may specify any number of
:class:`_engine.Engine` or :class:`_engine.Connection`
objects as the source of
connectivity for SQL operations on a per-entity basis. The keys
of the dictionary consist of any series of mapped classes,
arbitrary Python classes that are bases for mapped classes,
:class:`_schema.Table` objects and :class:`_orm.Mapper` objects.
The
values of the dictionary are then instances of
:class:`_engine.Engine`
or less commonly :class:`_engine.Connection` objects.
Operations which
proceed relative to a particular mapped class will consult this
dictionary for the closest matching entity in order to determine
which :class:`_engine.Engine` should be used for a particular SQL
operation. The complete heuristics for resolution are
described at :meth:`.Session.get_bind`. Usage looks like::
Session = sessionmaker(binds={
SomeMappedClass: create_engine('postgresql://engine1'),
SomeDeclarativeBase: create_engine('postgresql://engine2'),
some_mapper: create_engine('postgresql://engine3'),
some_table: create_engine('postgresql://engine4'),
})
.. seealso::
:ref:`session_partitioning`
:meth:`.Session.bind_mapper`
:meth:`.Session.bind_table`
:meth:`.Session.get_bind`
:param \class_: Specify an alternate class other than
``sqlalchemy.orm.session.Session`` which should be used by the
returned class. This is the only argument that is local to the
:class:`.sessionmaker` function, and is not sent directly to the
constructor for ``Session``.
:param enable_baked_queries: defaults to ``True``. A flag consumed
by the :mod:`sqlalchemy.ext.baked` extension to determine if
"baked queries" should be cached, as is the normal operation
of this extension. When set to ``False``, all caching is disabled,
including baked queries defined by the calling application as
well as those used internally. Setting this flag to ``False``
can significantly reduce memory use, however will also degrade
performance for those areas that make use of baked queries
(such as relationship loaders). Additionally, baked query
logic in the calling application or potentially within the ORM
that may be malfunctioning due to cache key collisions or similar
can be flagged by observing if this flag resolves the issue.
.. versionadded:: 1.2
:param _enable_transaction_accounting: A
legacy-only flag which when ``False`` disables *all* 0.5-style
object accounting on transaction boundaries.
:param expire_on_commit: Defaults to ``True``. When ``True``, all
instances will be fully expired after each :meth:`~.commit`,
so that all attribute/object access subsequent to a completed
transaction will load from the most recent database state.
:param extension: An optional
:class:`~.SessionExtension` instance, or a list
of such instances, which will receive pre- and post- commit and
flush events, as well as a post-rollback event.
:param info: optional dictionary of arbitrary data to be associated
with this :class:`.Session`. Is available via the
:attr:`.Session.info` attribute. Note the dictionary is copied at
construction time so that modifications to the per-
:class:`.Session` dictionary will be local to that
:class:`.Session`.
.. versionadded:: 0.9.0
:param query_cls: Class which should be used to create new Query
objects, as returned by the :meth:`~.Session.query` method.
Defaults to :class:`_query.Query`.
:param twophase: When ``True``, all transactions will be started as
a "two phase" transaction, i.e. using the "two phase" semantics
of the database in use along with an XID. During a
:meth:`~.commit`, after :meth:`~.flush` has been issued for all
attached databases, the :meth:`~.TwoPhaseTransaction.prepare`
method on each database's :class:`.TwoPhaseTransaction` will be
called. This allows each database to roll back the entire
transaction, before each transaction is committed.
:param weak_identity_map: Defaults to ``True`` - when set to
``False``, objects placed in the :class:`.Session` will be
strongly referenced until explicitly removed or the
:class:`.Session` is closed.
"""
if weak_identity_map in (True, None):
self._identity_cls = identity.WeakInstanceDict
else:
self._identity_cls = identity.StrongInstanceDict
self.identity_map = self._identity_cls()
self._new = {} # InstanceState->object, strong refs object
self._deleted = {} # same
self.bind = bind
self.__binds = {}
self._flushing = False
self._warn_on_events = False
self.transaction = None
self.hash_key = _new_sessionid()
self.autoflush = autoflush
self.autocommit = autocommit
self.expire_on_commit = expire_on_commit
self.enable_baked_queries = enable_baked_queries
self._enable_transaction_accounting = _enable_transaction_accounting
self.twophase = twophase
self._query_cls = query_cls if query_cls else query.Query
if info:
self.info.update(info)
if extension:
for ext in util.to_list(extension):
SessionExtension._adapt_listener(self, ext)
if binds is not None:
for key, bind in binds.items():
self._add_bind(key, bind)
if not self.autocommit:
self.begin()
_sessions[self.hash_key] = self
connection_callable = None
transaction = None
"""The current active or inactive :class:`.SessionTransaction`."""
@util.memoized_property
def info(self):
"""A user-modifiable dictionary.
The initial value of this dictionary can be populated using the
``info`` argument to the :class:`.Session` constructor or
:class:`.sessionmaker` constructor or factory methods. The dictionary
here is always local to this :class:`.Session` and can be modified
independently of all other :class:`.Session` objects.
.. versionadded:: 0.9.0
"""
return {}
def begin(self, subtransactions=False, nested=False):
"""Begin a transaction on this :class:`.Session`.
.. warning::
The :meth:`.Session.begin` method is part of a larger pattern
of use with the :class:`.Session` known as **autocommit mode**.
This is essentially a **legacy mode of use** and is
not necessary for new applications. The :class:`.Session`
normally handles the work of "begin" transparently, which in
turn relies upon the Python DBAPI to transparently "begin"
transactions; there is **no need to explicitly begin transactions**
when using modern :class:`.Session` programming patterns.
In its default mode of ``autocommit=False``, the
:class:`.Session` does all of its work within
the context of a transaction, so as soon as you call
:meth:`.Session.commit`, the next transaction is implicitly
started when the next database operation is invoked. See
:ref:`session_autocommit` for further background.
The method will raise an error if this :class:`.Session` is already
inside of a transaction, unless
:paramref:`~.Session.begin.subtransactions` or
:paramref:`~.Session.begin.nested` are specified. A "subtransaction"
is essentially a code embedding pattern that does not affect the
transactional state of the database connection unless a rollback is
emitted, in which case the whole transaction is rolled back. For
documentation on subtransactions, please see
:ref:`session_subtransactions`.
:param subtransactions: if True, indicates that this
:meth:`~.Session.begin` can create a "subtransaction".
:param nested: if True, begins a SAVEPOINT transaction and is
equivalent to calling :meth:`~.Session.begin_nested`. For
documentation on SAVEPOINT transactions, please see
:ref:`session_begin_nested`.
:return: the :class:`.SessionTransaction` object. Note that
:class:`.SessionTransaction`
acts as a Python context manager, allowing :meth:`.Session.begin`
to be used in a "with" block. See :ref:`session_autocommit` for
an example.
.. seealso::
:ref:`session_autocommit`
:meth:`.Session.begin_nested`
"""
if self.transaction is not None:
if subtransactions or nested:
self.transaction = self.transaction._begin(nested=nested)
else:
raise sa_exc.InvalidRequestError(
"A transaction is already begun. Use "
"subtransactions=True to allow subtransactions."
)
else:
self.transaction = SessionTransaction(self, nested=nested)
return self.transaction # needed for __enter__/__exit__ hook
def begin_nested(self):
"""Begin a "nested" transaction on this Session, e.g. SAVEPOINT.
The target database(s) and associated drivers must support SQL
SAVEPOINT for this method to function correctly.
For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
:return: the :class:`.SessionTransaction` object. Note that
:class:`.SessionTransaction` acts as a context manager, allowing
:meth:`.Session.begin_nested` to be used in a "with" block.
See :ref:`session_begin_nested` for a usage example.
.. seealso::
:ref:`session_begin_nested`
:ref:`pysqlite_serializable` - special workarounds required
with the SQLite driver in order for SAVEPOINT to work
correctly.
"""
return self.begin(nested=True)
def rollback(self):
"""Rollback the current transaction in progress.
If no transaction is in progress, this method is a pass-through.
This method rolls back the current transaction or nested transaction
regardless of subtransactions being in effect. All subtransactions up
to the first real transaction are closed. Subtransactions occur when
:meth:`.begin` is called multiple times.
.. seealso::
:ref:`session_rollback`
"""
if self.transaction is None:
pass
else:
self.transaction.rollback()
def commit(self):
"""Flush pending changes and commit the current transaction.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
By default, the :class:`.Session` also expires all database
loaded state on all ORM-managed attributes after transaction commit.
This so that subsequent operations load the most recent
data from the database. This behavior can be disabled using
the ``expire_on_commit=False`` option to :class:`.sessionmaker` or
the :class:`.Session` constructor.
If a subtransaction is in effect (which occurs when begin() is called
multiple times), the subtransaction will be closed, and the next call
to ``commit()`` will operate on the enclosing transaction.
When using the :class:`.Session` in its default mode of
``autocommit=False``, a new transaction will
be begun immediately after the commit, but note that the newly begun
transaction does *not* use any connection resources until the first
SQL is actually emitted.
.. seealso::
:ref:`session_committing`
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.commit()
def prepare(self):
"""Prepare the current transaction in progress for two phase commit.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
Only root transactions of two phase sessions can be prepared. If the
current transaction is not such, an
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.prepare()
def connection(
self,
mapper=None,
clause=None,
bind=None,
close_with_result=False,
execution_options=None,
**kw
):
r"""Return a :class:`_engine.Connection` object corresponding to this
:class:`.Session` object's transactional state.
If this :class:`.Session` is configured with ``autocommit=False``,
either the :class:`_engine.Connection` corresponding to the current
transaction is returned, or if no transaction is in progress, a new
one is begun and the :class:`_engine.Connection`
returned (note that no
transactional state is established with the DBAPI until the first
SQL statement is emitted).
Alternatively, if this :class:`.Session` is configured with
``autocommit=True``, an ad-hoc :class:`_engine.Connection` is returned
using :meth:`_engine.Engine.connect` on the underlying
:class:`_engine.Engine`.
Ambiguity in multi-bind or unbound :class:`.Session` objects can be
resolved through any of the optional keyword arguments. This
ultimately makes usage of the :meth:`.get_bind` method for resolution.
:param bind:
Optional :class:`_engine.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes precedence
over ``mapper``, ``clause``.
:param mapper:
Optional :func:`.mapper` mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause``.
:param clause:
A :class:`_expression.ClauseElement` (i.e.
:func:`_expression.select`,
:func:`_expression.text`,
etc.) which will be used to locate a bind, if a bind
cannot otherwise be identified.
:param close_with_result: Passed to :meth:`_engine.Engine.connect`,
indicating the :class:`_engine.Connection` should be considered
"single use", automatically closing when the first result set is
closed. This flag only has an effect if this :class:`.Session` is
configured with ``autocommit=True`` and does not already have a
transaction in progress.
:param execution_options: a dictionary of execution options that will
be passed to :meth:`_engine.Connection.execution_options`, **when the
connection is first procured only**. If the connection is already
present within the :class:`.Session`, a warning is emitted and
the arguments are ignored.
.. versionadded:: 0.9.9
.. seealso::
:ref:`session_transaction_isolation`
:param \**kw:
Additional keyword arguments are sent to :meth:`get_bind()`,
allowing additional arguments to be passed to custom
implementations of :meth:`get_bind`.
"""
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(
bind,
close_with_result=close_with_result,
execution_options=execution_options,
)
def _connection_for_bind(self, engine, execution_options=None, **kw):
if self.transaction is not None:
return self.transaction._connection_for_bind(
engine, execution_options
)
else:
conn = engine._contextual_connect(**kw)
if execution_options:
conn = conn.execution_options(**execution_options)
return conn
def execute(self, clause, params=None, mapper=None, bind=None, **kw):
r"""Execute a SQL expression construct or string statement within
the current transaction.
Returns a :class:`_engine.ResultProxy` representing
results of the statement execution, in the same manner as that of an
:class:`_engine.Engine` or
:class:`_engine.Connection`.
E.g.::
result = session.execute(
user_table.select().where(user_table.c.id == 5)
)
:meth:`~.Session.execute` accepts any executable clause construct,
such as :func:`_expression.select`,
:func:`_expression.insert`,
:func:`_expression.update`,
:func:`_expression.delete`, and
:func:`_expression.text`. Plain SQL strings can be passed
as well, which in the case of :meth:`.Session.execute` only
will be interpreted the same as if it were passed via a
:func:`_expression.text` construct. That is, the following usage::
result = session.execute(
"SELECT * FROM user WHERE id=:param",
{"param":5}
)
is equivalent to::
from sqlalchemy import text
result = session.execute(
text("SELECT * FROM user WHERE id=:param"),
{"param":5}
)
The second positional argument to :meth:`.Session.execute` is an
optional parameter set. Similar to that of
:meth:`_engine.Connection.execute`, whether this is passed as a single
dictionary, or a sequence of dictionaries, determines whether the DBAPI
cursor's ``execute()`` or ``executemany()`` is used to execute the
statement. An INSERT construct may be invoked for a single row::
result = session.execute(
users.insert(), {"id": 7, "name": "somename"})
or for multiple rows::
result = session.execute(users.insert(), [
{"id": 7, "name": "somename7"},
{"id": 8, "name": "somename8"},
{"id": 9, "name": "somename9"}
])
The statement is executed within the current transactional context of
this :class:`.Session`. The :class:`_engine.Connection`
which is used
to execute the statement can also be acquired directly by
calling the :meth:`.Session.connection` method. Both methods use
a rule-based resolution scheme in order to determine the
:class:`_engine.Connection`,
which in the average case is derived directly
from the "bind" of the :class:`.Session` itself, and in other cases
can be based on the :func:`.mapper`
and :class:`_schema.Table` objects passed to the method; see the
documentation for :meth:`.Session.get_bind` for a full description of
this scheme.
The :meth:`.Session.execute` method does *not* invoke autoflush.
The :class:`_engine.ResultProxy` returned by the
:meth:`.Session.execute`
method is returned with the "close_with_result" flag set to true;
the significance of this flag is that if this :class:`.Session` is
autocommitting and does not have a transaction-dedicated
:class:`_engine.Connection` available, a temporary
:class:`_engine.Connection` is
established for the statement execution, which is closed (meaning,
returned to the connection pool) when the :class:`_engine.ResultProxy`
has
consumed all available data. This applies *only* when the
:class:`.Session` is configured with autocommit=True and no
transaction has been started.
:param clause:
An executable statement (i.e. an :class:`.Executable` expression
such as :func:`_expression.select`) or string SQL statement
to be executed.
:param params:
Optional dictionary, or list of dictionaries, containing
bound parameter values. If a single dictionary, single-row
execution occurs; if a list of dictionaries, an
"executemany" will be invoked. The keys in each dictionary
must correspond to parameter names present in the statement.
:param mapper:
Optional :func:`.mapper` or mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause`` when locating a bind. See :meth:`.Session.get_bind`
for more details.
:param bind:
Optional :class:`_engine.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes
precedence over ``mapper`` and ``clause`` when locating
a bind.
:param \**kw:
Additional keyword arguments are sent to :meth:`.Session.get_bind()`
to allow extensibility of "bind" schemes.
.. seealso::
:ref:`sqlexpression_toplevel` - Tutorial on using Core SQL
constructs.
:ref:`connections_toplevel` - Further information on direct
statement execution.
:meth:`_engine.Connection.execute`
- core level statement execution
method, which is :meth:`.Session.execute` ultimately uses
in order to execute the statement.
"""
clause = expression._literal_as_text(
clause, allow_coercion_to_text=True
)
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind, close_with_result=True).execute(
clause, params or {}
)
def scalar(self, clause, params=None, mapper=None, bind=None, **kw):
"""Like :meth:`~.Session.execute` but return a scalar result."""
return self.execute(
clause, params=params, mapper=mapper, bind=bind, **kw
).scalar()
def close(self):
"""Close this Session.
This clears all items and ends any transaction in progress.
If this session were created with ``autocommit=False``, a new
transaction is immediately begun. Note that this new transaction does
not use any connection resources until they are first needed.
"""
self._close_impl(invalidate=False)
def invalidate(self):
"""Close this Session, using connection invalidation.
This is a variant of :meth:`.Session.close` that will additionally
ensure that the :meth:`_engine.Connection.invalidate`
method will be called
on all :class:`_engine.Connection` objects. This can be called when
the database is known to be in a state where the connections are
no longer safe to be used.
E.g.::
try:
sess = Session()
sess.add(User())
sess.commit()
except gevent.Timeout:
sess.invalidate()
raise
except:
sess.rollback()
raise
This clears all items and ends any transaction in progress.
If this session were created with ``autocommit=False``, a new
transaction is immediately begun. Note that this new transaction does
not use any connection resources until they are first needed.
.. versionadded:: 0.9.9
"""
self._close_impl(invalidate=True)
def _close_impl(self, invalidate):
self.expunge_all()
if self.transaction is not None:
for transaction in self.transaction._iterate_self_and_parents():
transaction.close(invalidate)
def expunge_all(self):
"""Remove all object instances from this ``Session``.
This is equivalent to calling ``expunge(obj)`` on all objects in this
``Session``.
"""
all_states = self.identity_map.all_states() + list(self._new)
self.identity_map = self._identity_cls()
self._new = {}
self._deleted = {}
statelib.InstanceState._detach_states(all_states, self)
def _add_bind(self, key, bind):
try:
insp = inspect(key)
except sa_exc.NoInspectionAvailable as err:
if not isinstance(key, type):
util.raise_(
sa_exc.ArgumentError(
"Not an acceptable bind target: %s" % key
),
replace_context=err,
)
else:
self.__binds[key] = bind
else:
if insp.is_selectable:
self.__binds[insp] = bind
elif insp.is_mapper:
self.__binds[insp.class_] = bind
for selectable in insp._all_tables:
self.__binds[selectable] = bind
else:
raise sa_exc.ArgumentError(
"Not an acceptable bind target: %s" % key
)
def bind_mapper(self, mapper, bind):
"""Associate a :class:`_orm.Mapper` or arbitrary Python class with a
"bind", e.g. an :class:`_engine.Engine` or :class:`_engine.Connection`
.
The given entity is added to a lookup used by the
:meth:`.Session.get_bind` method.
:param mapper: a :class:`_orm.Mapper` object,
or an instance of a mapped
class, or any Python class that is the base of a set of mapped
classes.
:param bind: an :class:`_engine.Engine` or :class:`_engine.Connection`
object.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_table`
"""
self._add_bind(mapper, bind)
def bind_table(self, table, bind):
"""Associate a :class:`_schema.Table` with a "bind", e.g. an
:class:`_engine.Engine`
or :class:`_engine.Connection`.
The given :class:`_schema.Table` is added to a lookup used by the
:meth:`.Session.get_bind` method.
:param table: a :class:`_schema.Table` object,
which is typically the target
of an ORM mapping, or is present within a selectable that is
mapped.
:param bind: an :class:`_engine.Engine` or :class:`_engine.Connection`
object.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_mapper`
"""
self._add_bind(table, bind)
def get_bind(self, mapper=None, clause=None):
"""Return a "bind" to which this :class:`.Session` is bound.
The "bind" is usually an instance of :class:`_engine.Engine`,
except in the case where the :class:`.Session` has been
explicitly bound directly to a :class:`_engine.Connection`.
For a multiply-bound or unbound :class:`.Session`, the
``mapper`` or ``clause`` arguments are used to determine the
appropriate bind to return.
Note that the "mapper" argument is usually present
when :meth:`.Session.get_bind` is called via an ORM
operation such as a :meth:`.Session.query`, each
individual INSERT/UPDATE/DELETE operation within a
:meth:`.Session.flush`, call, etc.
The order of resolution is:
1. if mapper given and session.binds is present,
locate a bind based first on the mapper in use, then
on the mapped class in use, then on any base classes that are
present in the ``__mro__`` of the mapped class, from more specific
superclasses to more general.
2. if clause given and session.binds is present,
locate a bind based on :class:`_schema.Table` objects
found in the given clause present in session.binds.
3. if session.bind is present, return that.
4. if clause given, attempt to return a bind
linked to the :class:`_schema.MetaData` ultimately
associated with the clause.
5. if mapper given, attempt to return a bind
linked to the :class:`_schema.MetaData` ultimately
associated with the :class:`_schema.Table` or other
selectable to which the mapper is mapped.
6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError`
is raised.
Note that the :meth:`.Session.get_bind` method can be overridden on
a user-defined subclass of :class:`.Session` to provide any kind
of bind resolution scheme. See the example at
:ref:`session_custom_partitioning`.
:param mapper:
Optional :func:`.mapper` mapped class or instance of
:class:`_orm.Mapper`. The bind can be derived from a
:class:`_orm.Mapper`
first by consulting the "binds" map associated with this
:class:`.Session`, and secondly by consulting the
:class:`_schema.MetaData`
associated with the :class:`_schema.Table` to which the
:class:`_orm.Mapper`
is mapped for a bind.
:param clause:
A :class:`_expression.ClauseElement` (i.e.
:func:`_expression.select`,
:func:`_expression.text`,
etc.). If the ``mapper`` argument is not present or could not
produce a bind, the given expression construct will be searched
for a bound element, typically a :class:`_schema.Table`
associated with
bound :class:`_schema.MetaData`.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_mapper`
:meth:`.Session.bind_table`
"""
if mapper is clause is None:
if self.bind:
return self.bind
else:
raise sa_exc.UnboundExecutionError(
"This session is not bound to a single Engine or "
"Connection, and no context was provided to locate "
"a binding."
)
if mapper is not None:
try:
mapper = inspect(mapper)
except sa_exc.NoInspectionAvailable as err:
if isinstance(mapper, type):
util.raise_(
exc.UnmappedClassError(mapper), replace_context=err,
)
else:
raise
if self.__binds:
if mapper:
for cls in mapper.class_.__mro__:
if cls in self.__binds:
return self.__binds[cls]
if clause is None:
clause = mapper.persist_selectable
if clause is not None:
for t in sql_util.find_tables(clause, include_crud=True):
if t in self.__binds:
return self.__binds[t]
if self.bind:
return self.bind
if isinstance(clause, sql.expression.ClauseElement) and clause.bind:
return clause.bind
if mapper and mapper.persist_selectable.bind:
return mapper.persist_selectable.bind
context = []
if mapper is not None:
context.append("mapper %s" % mapper)
if clause is not None:
context.append("SQL expression")
raise sa_exc.UnboundExecutionError(
"Could not locate a bind configured on %s or this Session"
% (", ".join(context))
)
def query(self, *entities, **kwargs):
"""Return a new :class:`_query.Query` object corresponding to this
:class:`.Session`."""
return self._query_cls(entities, self, **kwargs)
@property
@util.contextmanager
def no_autoflush(self):
"""Return a context manager that disables autoflush.
e.g.::
with session.no_autoflush:
some_object = SomeClass()
session.add(some_object)
# won't autoflush
some_object.related_thing = session.query(SomeRelated).first()
Operations that proceed within the ``with:`` block
will not be subject to flushes occurring upon query
access. This is useful when initializing a series
of objects which involve existing database queries,
where the uncompleted object should not yet be flushed.
"""
autoflush = self.autoflush
self.autoflush = False
try:
yield self
finally:
self.autoflush = autoflush
def _autoflush(self):
if self.autoflush and not self._flushing:
try:
self.flush()
except sa_exc.StatementError as e:
# note we are reraising StatementError as opposed to
# raising FlushError with "chaining" to remain compatible
# with code that catches StatementError, IntegrityError,
# etc.
e.add_detail(
"raised as a result of Query-invoked autoflush; "
"consider using a session.no_autoflush block if this "
"flush is occurring prematurely"
)
util.raise_(e, with_traceback=sys.exc_info()[2])
def refresh(
self,
instance,
attribute_names=None,
with_for_update=None,
lockmode=None,
):
"""Expire and refresh the attributes on the given instance.
A query will be issued to the database and all attributes will be
refreshed with their current database value.
Lazy-loaded relational attributes will remain lazily loaded, so that
the instance-wide refresh operation will be followed immediately by
the lazy load of that attribute.
Eagerly-loaded relational attributes will eagerly load within the
single refresh operation.
Note that a highly isolated transaction will return the same values as
were previously read in that same transaction, regardless of changes
in database state outside of that transaction - usage of
:meth:`~Session.refresh` usually only makes sense if non-ORM SQL
statement were emitted in the ongoing transaction, or if autocommit
mode is turned on.
:param attribute_names: optional. An iterable collection of
string attribute names indicating a subset of attributes to
be refreshed.
:param with_for_update: optional boolean ``True`` indicating FOR UPDATE
should be used, or may be a dictionary containing flags to
indicate a more specific set of FOR UPDATE flags for the SELECT;
flags should match the parameters of
:meth:`_query.Query.with_for_update`.
Supersedes the :paramref:`.Session.refresh.lockmode` parameter.
.. versionadded:: 1.2
:param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query`
as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`.
Superseded by :paramref:`.Session.refresh.with_for_update`.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.expire_all`
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._expire_state(state, attribute_names)
if with_for_update == {}:
raise sa_exc.ArgumentError(
"with_for_update should be the boolean value "
"True, or a dictionary with options. "
"A blank dictionary is ambiguous."
)
if lockmode:
with_for_update = query.LockmodeArg.parse_legacy_query(lockmode)
elif with_for_update is not None:
if with_for_update is True:
with_for_update = query.LockmodeArg()
elif with_for_update:
with_for_update = query.LockmodeArg(**with_for_update)
else:
with_for_update = None
if (
loading.load_on_ident(
self.query(object_mapper(instance)),
state.key,
refresh_state=state,
with_for_update=with_for_update,
only_load_props=attribute_names,
)
is None
):
raise sa_exc.InvalidRequestError(
"Could not refresh instance '%s'" % instance_str(instance)
)
def expire_all(self):
"""Expires all persistent instances within this Session.
When any attributes on a persistent instance is next accessed,
a query will be issued using the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire individual objects and individual attributes
on those objects, use :meth:`Session.expire`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire_all` should not be needed when
autocommit is ``False``, assuming the transaction is isolated.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
"""
for state in self.identity_map.all_states():
state._expire(state.dict, self.identity_map._modified)
def expire(self, instance, attribute_names=None):
"""Expire the attributes on an instance.
Marks the attributes of an instance as out of date. When an expired
attribute is next accessed, a query will be issued to the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire all objects in the :class:`.Session` simultaneously,
use :meth:`Session.expire_all`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire` only makes sense for the specific
case that a non-ORM SQL statement was emitted in the current
transaction.
:param instance: The instance to be refreshed.
:param attribute_names: optional list of string attribute names
indicating a subset of attributes to be expired.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._expire_state(state, attribute_names)
def _expire_state(self, state, attribute_names):
self._validate_persistent(state)
if attribute_names:
state._expire_attributes(state.dict, attribute_names)
else:
# pre-fetch the full cascade since the expire is going to
# remove associations
cascaded = list(
state.manager.mapper.cascade_iterator("refresh-expire", state)
)
self._conditional_expire(state)
for o, m, st_, dct_ in cascaded:
self._conditional_expire(st_)
def _conditional_expire(self, state):
"""Expire a state if persistent, else expunge if pending"""
if state.key:
state._expire(state.dict, self.identity_map._modified)
elif state in self._new:
self._new.pop(state)
state._detach(self)
@util.deprecated(
"0.7",
"The :meth:`.Session.prune` method is deprecated along with "
":paramref:`.Session.weak_identity_map`. This method will be "
"removed in a future release.",
)
def prune(self):
"""Remove unreferenced instances cached in the identity map.
Note that this method is only meaningful if "weak_identity_map" is set
to False. The default weak identity map is self-pruning.
Removes any object in this Session's identity map that is not
referenced in user code, modified, new or scheduled for deletion.
Returns the number of objects pruned.
"""
return self.identity_map.prune()
def expunge(self, instance):
"""Remove the `instance` from this ``Session``.
This will free all internal references to the instance. Cascading
will be applied according to the *expunge* cascade rule.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
if state.session_id is not self.hash_key:
raise sa_exc.InvalidRequestError(
"Instance %s is not present in this Session" % state_str(state)
)
cascaded = list(
state.manager.mapper.cascade_iterator("expunge", state)
)
self._expunge_states([state] + [st_ for o, m, st_, dct_ in cascaded])
def _expunge_states(self, states, to_transient=False):
for state in states:
if state in self._new:
self._new.pop(state)
elif self.identity_map.contains_state(state):
self.identity_map.safe_discard(state)
self._deleted.pop(state, None)
elif self.transaction:
# state is "detached" from being deleted, but still present
# in the transaction snapshot
self.transaction._deleted.pop(state, None)
statelib.InstanceState._detach_states(
states, self, to_transient=to_transient
)
def _register_persistent(self, states):
"""Register all persistent objects from a flush.
This is used both for pending objects moving to the persistent
state as well as already persistent objects.
"""
pending_to_persistent = self.dispatch.pending_to_persistent or None
for state in states:
mapper = _state_mapper(state)
# prevent against last minute dereferences of the object
obj = state.obj()
if obj is not None:
instance_key = mapper._identity_key_from_state(state)
if (
_none_set.intersection(instance_key[1])
and not mapper.allow_partial_pks
or _none_set.issuperset(instance_key[1])
):
raise exc.FlushError(
"Instance %s has a NULL identity key. If this is an "
"auto-generated value, check that the database table "
"allows generation of new primary key values, and "
"that the mapped Column object is configured to "
"expect these generated values. Ensure also that "
"this flush() is not occurring at an inappropriate "
"time, such as within a load() event."
% state_str(state)
)
if state.key is None:
state.key = instance_key
elif state.key != instance_key:
# primary key switch. use safe_discard() in case another
# state has already replaced this one in the identity
# map (see test/orm/test_naturalpks.py ReversePKsTest)
self.identity_map.safe_discard(state)
if state in self.transaction._key_switches:
orig_key = self.transaction._key_switches[state][0]
else:
orig_key = state.key
self.transaction._key_switches[state] = (
orig_key,
instance_key,
)
state.key = instance_key
# there can be an existing state in the identity map
# that is replaced when the primary keys of two instances
# are swapped; see test/orm/test_naturalpks.py -> test_reverse
old = self.identity_map.replace(state)
if (
old is not None
and mapper._identity_key_from_state(old) == instance_key
and old.obj() is not None
):
util.warn(
"Identity map already had an identity for %s, "
"replacing it with newly flushed object. Are there "
"load operations occurring inside of an event handler "
"within the flush?" % (instance_key,)
)
state._orphaned_outside_of_session = False
statelib.InstanceState._commit_all_states(
((state, state.dict) for state in states), self.identity_map
)
self._register_altered(states)
if pending_to_persistent is not None:
for state in states.intersection(self._new):
pending_to_persistent(self, state)
# remove from new last, might be the last strong ref
for state in set(states).intersection(self._new):
self._new.pop(state)
def _register_altered(self, states):
if self._enable_transaction_accounting and self.transaction:
for state in states:
if state in self._new:
self.transaction._new[state] = True
else:
self.transaction._dirty[state] = True
def _remove_newly_deleted(self, states):
persistent_to_deleted = self.dispatch.persistent_to_deleted or None
for state in states:
if self._enable_transaction_accounting and self.transaction:
self.transaction._deleted[state] = True
if persistent_to_deleted is not None:
# get a strong reference before we pop out of
# self._deleted
obj = state.obj() # noqa
self.identity_map.safe_discard(state)
self._deleted.pop(state, None)
state._deleted = True
# can't call state._detach() here, because this state
# is still in the transaction snapshot and needs to be
# tracked as part of that
if persistent_to_deleted is not None:
persistent_to_deleted(self, state)
def add(self, instance, _warn=True):
"""Place an object in the ``Session``.
Its state will be persisted to the database on the next flush
operation.
Repeated calls to ``add()`` will be ignored. The opposite of ``add()``
is ``expunge()``.
"""
if _warn and self._warn_on_events:
self._flush_warning("Session.add()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._save_or_update_state(state)
def add_all(self, instances):
"""Add the given collection of instances to this ``Session``."""
if self._warn_on_events:
self._flush_warning("Session.add_all()")
for instance in instances:
self.add(instance, _warn=False)
def _save_or_update_state(self, state):
state._orphaned_outside_of_session = False
self._save_or_update_impl(state)
mapper = _state_mapper(state)
for o, m, st_, dct_ in mapper.cascade_iterator(
"save-update", state, halt_on=self._contains_state
):
self._save_or_update_impl(st_)
def delete(self, instance):
"""Mark an instance as deleted.
The database delete operation occurs upon ``flush()``.
"""
if self._warn_on_events:
self._flush_warning("Session.delete()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._delete_impl(state, instance, head=True)
def _delete_impl(self, state, obj, head):
if state.key is None:
if head:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" % state_str(state)
)
else:
return
to_attach = self._before_attach(state, obj)
if state in self._deleted:
return
self.identity_map.add(state)
if to_attach:
self._after_attach(state, obj)
if head:
# grab the cascades before adding the item to the deleted list
# so that autoflush does not delete the item
# the strong reference to the instance itself is significant here
cascade_states = list(
state.manager.mapper.cascade_iterator("delete", state)
)
self._deleted[state] = obj
if head:
for o, m, st_, dct_ in cascade_states:
self._delete_impl(st_, o, False)
def merge(self, instance, load=True):
"""Copy the state of a given instance into a corresponding instance
within this :class:`.Session`.
:meth:`.Session.merge` examines the primary key attributes of the
source instance, and attempts to reconcile it with an instance of the
same primary key in the session. If not found locally, it attempts
to load the object from the database based on primary key, and if
none can be located, creates a new instance. The state of each
attribute on the source instance is then copied to the target
instance. The resulting target instance is then returned by the
method; the original source instance is left unmodified, and
un-associated with the :class:`.Session` if not already.
This operation cascades to associated instances if the association is
mapped with ``cascade="merge"``.
See :ref:`unitofwork_merging` for a detailed discussion of merging.
.. versionchanged:: 1.1 - :meth:`.Session.merge` will now reconcile
pending objects with overlapping primary keys in the same way
as persistent. See :ref:`change_3601` for discussion.
:param instance: Instance to be merged.
:param load: Boolean, when False, :meth:`.merge` switches into
a "high performance" mode which causes it to forego emitting history
events as well as all database access. This flag is used for
cases such as transferring graphs of objects into a :class:`.Session`
from a second level cache, or to transfer just-loaded objects
into the :class:`.Session` owned by a worker thread or process
without re-querying the database.
The ``load=False`` use case adds the caveat that the given
object has to be in a "clean" state, that is, has no pending changes
to be flushed - even if the incoming object is detached from any
:class:`.Session`. This is so that when
the merge operation populates local attributes and
cascades to related objects and
collections, the values can be "stamped" onto the
target object as is, without generating any history or attribute
events, and without the need to reconcile the incoming data with
any existing related objects or collections that might not
be loaded. The resulting objects from ``load=False`` are always
produced as "clean", so it is only appropriate that the given objects
should be "clean" as well, else this suggests a mis-use of the
method.
.. seealso::
:func:`.make_transient_to_detached` - provides for an alternative
means of "merging" a single object into the :class:`.Session`
"""
if self._warn_on_events:
self._flush_warning("Session.merge()")
_recursive = {}
_resolve_conflict_map = {}
if load:
# flush current contents if we expect to load data
self._autoflush()
object_mapper(instance) # verify mapped
autoflush = self.autoflush
try:
self.autoflush = False
return self._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
finally:
self.autoflush = autoflush
def _merge(
self,
state,
state_dict,
load=True,
_recursive=None,
_resolve_conflict_map=None,
):
mapper = _state_mapper(state)
if state in _recursive:
return _recursive[state]
new_instance = False
key = state.key
if key is None:
if state in self._new:
util.warn(
"Instance %s is already pending in this Session yet is "
"being merged again; this is probably not what you want "
"to do" % state_str(state)
)
if not load:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects transient (i.e. unpersisted) objects. flush() "
"all changes on mapped instances before merging with "
"load=False."
)
key = mapper._identity_key_from_state(state)
key_is_persistent = attributes.NEVER_SET not in key[1] and (
not _none_set.intersection(key[1])
or (
mapper.allow_partial_pks
and not _none_set.issuperset(key[1])
)
)
else:
key_is_persistent = True
if key in self.identity_map:
try:
merged = self.identity_map[key]
except KeyError:
# object was GC'ed right as we checked for it
merged = None
else:
merged = None
if merged is None:
if key_is_persistent and key in _resolve_conflict_map:
merged = _resolve_conflict_map[key]
elif not load:
if state.modified:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects marked as 'dirty'. flush() all changes on "
"mapped instances before merging with load=False."
)
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_state.key = key
self._update_impl(merged_state)
new_instance = True
elif key_is_persistent:
merged = self.query(mapper.class_).get(key[1])
if merged is None:
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
new_instance = True
self._save_or_update_state(merged_state)
else:
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
_recursive[state] = merged
_resolve_conflict_map[key] = merged
# check that we didn't just pull the exact same
# state out.
if state is not merged_state:
# version check if applicable
if mapper.version_id_col is not None:
existing_version = mapper._get_state_attr_by_column(
state,
state_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
merged_version = mapper._get_state_attr_by_column(
merged_state,
merged_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
if (
existing_version is not attributes.PASSIVE_NO_RESULT
and merged_version is not attributes.PASSIVE_NO_RESULT
and existing_version != merged_version
):
raise exc.StaleDataError(
"Version id '%s' on merged state %s "
"does not match existing version '%s'. "
"Leave the version attribute unset when "
"merging to update the most recent version."
% (
existing_version,
state_str(merged_state),
merged_version,
)
)
merged_state.load_path = state.load_path
merged_state.load_options = state.load_options
# since we are copying load_options, we need to copy
# the callables_ that would have been generated by those
# load_options.
# assumes that the callables we put in state.callables_
# are not instance-specific (which they should not be)
merged_state._copy_callables(state)
for prop in mapper.iterate_properties:
prop.merge(
self,
state,
state_dict,
merged_state,
merged_dict,
load,
_recursive,
_resolve_conflict_map,
)
if not load:
# remove any history
merged_state._commit_all(merged_dict, self.identity_map)
if new_instance:
merged_state.manager.dispatch.load(merged_state, None)
return merged
def _validate_persistent(self, state):
if not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persistent within this Session"
% state_str(state)
)
def _save_impl(self, state):
if state.key is not None:
raise sa_exc.InvalidRequestError(
"Object '%s' already has an identity - "
"it can't be registered as pending" % state_str(state)
)
obj = state.obj()
to_attach = self._before_attach(state, obj)
if state not in self._new:
self._new[state] = obj
state.insert_order = len(self._new)
if to_attach:
self._after_attach(state, obj)
def _update_impl(self, state, revert_deletion=False):
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" % state_str(state)
)
if state._deleted:
if revert_deletion:
if not state._attached:
return
del state._deleted
else:
raise sa_exc.InvalidRequestError(
"Instance '%s' has been deleted. "
"Use the make_transient() "
"function to send this object back "
"to the transient state." % state_str(state)
)
obj = state.obj()
# check for late gc
if obj is None:
return
to_attach = self._before_attach(state, obj)
self._deleted.pop(state, None)
if revert_deletion:
self.identity_map.replace(state)
else:
self.identity_map.add(state)
if to_attach:
self._after_attach(state, obj)
elif revert_deletion:
self.dispatch.deleted_to_persistent(self, state)
def _save_or_update_impl(self, state):
if state.key is None:
self._save_impl(state)
else:
self._update_impl(state)
def enable_relationship_loading(self, obj):
"""Associate an object with this :class:`.Session` for related
object loading.
.. warning::
:meth:`.enable_relationship_loading` exists to serve special
use cases and is not recommended for general use.
Accesses of attributes mapped with :func:`_orm.relationship`
will attempt to load a value from the database using this
:class:`.Session` as the source of connectivity. The values
will be loaded based on foreign key and primary key values
present on this object - if not present, then those relationships
will be unavailable.
The object will be attached to this session, but will
**not** participate in any persistence operations; its state
for almost all purposes will remain either "transient" or
"detached", except for the case of relationship loading.
Also note that backrefs will often not work as expected.
Altering a relationship-bound attribute on the target object
may not fire off a backref event, if the effective value
is what was already loaded from a foreign-key-holding value.
The :meth:`.Session.enable_relationship_loading` method is
similar to the ``load_on_pending`` flag on :func:`_orm.relationship`.
Unlike that flag, :meth:`.Session.enable_relationship_loading` allows
an object to remain transient while still being able to load
related items.
To make a transient object associated with a :class:`.Session`
via :meth:`.Session.enable_relationship_loading` pending, add
it to the :class:`.Session` using :meth:`.Session.add` normally.
If the object instead represents an existing identity in the database,
it should be merged using :meth:`.Session.merge`.
:meth:`.Session.enable_relationship_loading` does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before flush()
proceeds. This method is not intended for general use.
.. seealso::
``load_on_pending`` at :func:`_orm.relationship` - this flag
allows per-relationship loading of many-to-ones on items that
are pending.
:func:`.make_transient_to_detached` - allows for an object to
be added to a :class:`.Session` without SQL emitted, which then
will unexpire attributes on access.
"""
state = attributes.instance_state(obj)
to_attach = self._before_attach(state, obj)
state._load_pending = True
if to_attach:
self._after_attach(state, obj)
def _before_attach(self, state, obj):
if state.session_id == self.hash_key:
return False
if state.session_id and state.session_id in _sessions:
raise sa_exc.InvalidRequestError(
"Object '%s' is already attached to session '%s' "
"(this is '%s')"
% (state_str(state), state.session_id, self.hash_key)
)
self.dispatch.before_attach(self, state)
return True
def _after_attach(self, state, obj):
state.session_id = self.hash_key
if state.modified and state._strong_obj is None:
state._strong_obj = obj
self.dispatch.after_attach(self, state)
if state.key:
self.dispatch.detached_to_persistent(self, state)
else:
self.dispatch.transient_to_pending(self, state)
def __contains__(self, instance):
"""Return True if the instance is associated with this session.
The instance may be pending or persistent within the Session for a
result of True.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
return self._contains_state(state)
def __iter__(self):
"""Iterate over all pending or persistent instances within this
Session.
"""
return iter(
list(self._new.values()) + list(self.identity_map.values())
)
def _contains_state(self, state):
return state in self._new or self.identity_map.contains_state(state)
def flush(self, objects=None):
"""Flush all the object changes to the database.
Writes out all pending object creations, deletions and modifications
to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are
automatically ordered by the Session's unit of work dependency
solver.
Database operations will be issued in the current transactional
context and do not affect the state of the transaction, unless an
error occurs, in which case the entire transaction is rolled back.
You may flush() as often as you like within a transaction to move
changes from Python to the database's transaction buffer.
For ``autocommit`` Sessions with no active manual transaction, flush()
will create a transaction on the fly that surrounds the entire set of
operations into the flush.
:param objects: Optional; restricts the flush operation to operate
only on elements that are in the given collection.
This feature is for an extremely narrow set of use cases where
particular objects may need to be operated upon before the
full flush() occurs. It is not intended for general use.
"""
if self._flushing:
raise sa_exc.InvalidRequestError("Session is already flushing")
if self._is_clean():
return
try:
self._flushing = True
self._flush(objects)
finally:
self._flushing = False
def _flush_warning(self, method):
util.warn(
"Usage of the '%s' operation is not currently supported "
"within the execution stage of the flush process. "
"Results may not be consistent. Consider using alternative "
"event listeners or connection-level operations instead." % method
)
def _is_clean(self):
return (
not self.identity_map.check_modified()
and not self._deleted
and not self._new
)
def _flush(self, objects=None):
dirty = self._dirty_states
if not dirty and not self._deleted and not self._new:
self.identity_map._modified.clear()
return
flush_context = UOWTransaction(self)
if self.dispatch.before_flush:
self.dispatch.before_flush(self, flush_context, objects)
# re-establish "dirty states" in case the listeners
# added
dirty = self._dirty_states
deleted = set(self._deleted)
new = set(self._new)
dirty = set(dirty).difference(deleted)
# create the set of all objects we want to operate upon
if objects:
# specific list passed in
objset = set()
for o in objects:
try:
state = attributes.instance_state(o)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(o), replace_context=err,
)
objset.add(state)
else:
objset = None
# store objects whose fate has been decided
processed = set()
# put all saves/updates into the flush context. detect top-level
# orphans and throw them into deleted.
if objset:
proc = new.union(dirty).intersection(objset).difference(deleted)
else:
proc = new.union(dirty).difference(deleted)
for state in proc:
is_orphan = _state_mapper(state)._is_orphan(state)
is_persistent_orphan = is_orphan and state.has_identity
if (
is_orphan
and not is_persistent_orphan
and state._orphaned_outside_of_session
):
self._expunge_states([state])
else:
_reg = flush_context.register_object(
state, isdelete=is_persistent_orphan
)
assert _reg, "Failed to add object to the flush context!"
processed.add(state)
# put all remaining deletes into the flush context.
if objset:
proc = deleted.intersection(objset).difference(processed)
else:
proc = deleted.difference(processed)
for state in proc:
_reg = flush_context.register_object(state, isdelete=True)
assert _reg, "Failed to add object to the flush context!"
if not flush_context.has_work:
return
flush_context.transaction = transaction = self.begin(
subtransactions=True
)
try:
self._warn_on_events = True
try:
flush_context.execute()
finally:
self._warn_on_events = False
self.dispatch.after_flush(self, flush_context)
flush_context.finalize_flush_changes()
if not objects and self.identity_map._modified:
len_ = len(self.identity_map._modified)
statelib.InstanceState._commit_all_states(
[
(state, state.dict)
for state in self.identity_map._modified
],
instance_dict=self.identity_map,
)
util.warn(
"Attribute history events accumulated on %d "
"previously clean instances "
"within inner-flush event handlers have been "
"reset, and will not result in database updates. "
"Consider using set_committed_value() within "
"inner-flush event handlers to avoid this warning." % len_
)
# useful assertions:
# if not objects:
# assert not self.identity_map._modified
# else:
# assert self.identity_map._modified == \
# self.identity_map._modified.difference(objects)
self.dispatch.after_flush_postexec(self, flush_context)
transaction.commit()
except:
with util.safe_reraise():
transaction.rollback(_capture_exception=True)
def bulk_save_objects(
self,
objects,
return_defaults=False,
update_changed_only=True,
preserve_order=True,
):
"""Perform a bulk save of the given list of objects.
The bulk save feature allows mapped objects to be used as the
source of simple INSERT and UPDATE operations which can be more easily
grouped together into higher performing "executemany"
operations; the extraction of data from the objects is also performed
using a lower-latency process that ignores whether or not attributes
have actually been modified in the case of UPDATEs, and also ignores
SQL expressions.
The objects as given are not added to the session and no additional
state is established on them, unless the ``return_defaults`` flag
is also set, in which case primary key attributes and server-side
default values will be populated.
.. versionadded:: 1.0.0
.. warning::
The bulk save feature allows for a lower-latency INSERT/UPDATE
of rows at the expense of most other unit-of-work features.
Features such as object management, relationship handling,
and SQL clause support are **silently omitted** in favor of raw
INSERT/UPDATES of records.
**Please read the list of caveats at** :ref:`bulk_operations`
**before using this method, and fully test and confirm the
functionality of all code developed using these systems.**
:param objects: a sequence of mapped object instances. The mapped
objects are persisted as is, and are **not** associated with the
:class:`.Session` afterwards.
For each object, whether the object is sent as an INSERT or an
UPDATE is dependent on the same rules used by the :class:`.Session`
in traditional operation; if the object has the
:attr:`.InstanceState.key`
attribute set, then the object is assumed to be "detached" and
will result in an UPDATE. Otherwise, an INSERT is used.
In the case of an UPDATE, statements are grouped based on which
attributes have changed, and are thus to be the subject of each
SET clause. If ``update_changed_only`` is False, then all
attributes present within each object are applied to the UPDATE
statement, which may help in allowing the statements to be grouped
together into a larger executemany(), and will also reduce the
overhead of checking history on attributes.
:param return_defaults: when True, rows that are missing values which
generate defaults, namely integer primary key defaults and sequences,
will be inserted **one at a time**, so that the primary key value
is available. In particular this will allow joined-inheritance
and other multi-table mappings to insert correctly without the need
to provide primary key values ahead of time; however,
:paramref:`.Session.bulk_save_objects.return_defaults` **greatly
reduces the performance gains** of the method overall.
:param update_changed_only: when True, UPDATE statements are rendered
based on those attributes in each state that have logged changes.
When False, all attributes present are rendered into the SET clause
with the exception of primary key attributes.
:param preserve_order: when True, the order of inserts and updates
matches exactly the order in which the objects are given. When
False, common types of objects are grouped into inserts
and updates, to allow for more batching opportunities.
.. versionadded:: 1.3
.. seealso::
:ref:`bulk_operations`
:meth:`.Session.bulk_insert_mappings`
:meth:`.Session.bulk_update_mappings`
"""
def key(state):
return (state.mapper, state.key is not None)
obj_states = (attributes.instance_state(obj) for obj in objects)
if not preserve_order:
obj_states = sorted(obj_states, key=key)
for (mapper, isupdate), states in itertools.groupby(obj_states, key):
self._bulk_save_mappings(
mapper,
states,
isupdate,
True,
return_defaults,
update_changed_only,
False,
)
def bulk_insert_mappings(
self, mapper, mappings, return_defaults=False, render_nulls=False
):
"""Perform a bulk insert of the given list of mapping dictionaries.
The bulk insert feature allows plain Python dictionaries to be used as
the source of simple INSERT operations which can be more easily
grouped together into higher performing "executemany"
operations. Using dictionaries, there is no "history" or session
state management features in use, reducing latency when inserting
large numbers of simple rows.
The values within the dictionaries as given are typically passed
without modification into Core :meth:`_expression.Insert` constructs,
after
organizing the values within them across the tables to which
the given mapper is mapped.
.. versionadded:: 1.0.0
.. warning::
The bulk insert feature allows for a lower-latency INSERT
of rows at the expense of most other unit-of-work features.
Features such as object management, relationship handling,
and SQL clause support are **silently omitted** in favor of raw
INSERT of records.
**Please read the list of caveats at** :ref:`bulk_operations`
**before using this method, and fully test and confirm the
functionality of all code developed using these systems.**
:param mapper: a mapped class, or the actual :class:`_orm.Mapper`
object,
representing the single kind of object represented within the mapping
list.
:param mappings: a sequence of dictionaries, each one containing the
state of the mapped row to be inserted, in terms of the attribute
names on the mapped class. If the mapping refers to multiple tables,
such as a joined-inheritance mapping, each dictionary must contain all
keys to be populated into all tables.
:param return_defaults: when True, rows that are missing values which
generate defaults, namely integer primary key defaults and sequences,
will be inserted **one at a time**, so that the primary key value
is available. In particular this will allow joined-inheritance
and other multi-table mappings to insert correctly without the need
to provide primary
key values ahead of time; however,
:paramref:`.Session.bulk_insert_mappings.return_defaults`
**greatly reduces the performance gains** of the method overall.
If the rows
to be inserted only refer to a single table, then there is no
reason this flag should be set as the returned default information
is not used.
:param render_nulls: When True, a value of ``None`` will result
in a NULL value being included in the INSERT statement, rather
than the column being omitted from the INSERT. This allows all
the rows being INSERTed to have the identical set of columns which
allows the full set of rows to be batched to the DBAPI. Normally,
each column-set that contains a different combination of NULL values
than the previous row must omit a different series of columns from
the rendered INSERT statement, which means it must be emitted as a
separate statement. By passing this flag, the full set of rows
are guaranteed to be batchable into one batch; the cost however is
that server-side defaults which are invoked by an omitted column will
be skipped, so care must be taken to ensure that these are not
necessary.
.. warning::
When this flag is set, **server side default SQL values will
not be invoked** for those columns that are inserted as NULL;
the NULL value will be sent explicitly. Care must be taken
to ensure that no server-side default functions need to be
invoked for the operation as a whole.
.. versionadded:: 1.1
.. seealso::
:ref:`bulk_operations`
:meth:`.Session.bulk_save_objects`
:meth:`.Session.bulk_update_mappings`
"""
self._bulk_save_mappings(
mapper,
mappings,
False,
False,
return_defaults,
False,
render_nulls,
)
def bulk_update_mappings(self, mapper, mappings):
"""Perform a bulk update of the given list of mapping dictionaries.
The bulk update feature allows plain Python dictionaries to be used as
the source of simple UPDATE operations which can be more easily
grouped together into higher performing "executemany"
operations. Using dictionaries, there is no "history" or session
state management features in use, reducing latency when updating
large numbers of simple rows.
.. versionadded:: 1.0.0
.. warning::
The bulk update feature allows for a lower-latency UPDATE
of rows at the expense of most other unit-of-work features.
Features such as object management, relationship handling,
and SQL clause support are **silently omitted** in favor of raw
UPDATES of records.
**Please read the list of caveats at** :ref:`bulk_operations`
**before using this method, and fully test and confirm the
functionality of all code developed using these systems.**
:param mapper: a mapped class, or the actual :class:`_orm.Mapper`
object,
representing the single kind of object represented within the mapping
list.
:param mappings: a sequence of dictionaries, each one containing the
state of the mapped row to be updated, in terms of the attribute names
on the mapped class. If the mapping refers to multiple tables, such
as a joined-inheritance mapping, each dictionary may contain keys
corresponding to all tables. All those keys which are present and
are not part of the primary key are applied to the SET clause of the
UPDATE statement; the primary key values, which are required, are
applied to the WHERE clause.
.. seealso::
:ref:`bulk_operations`
:meth:`.Session.bulk_insert_mappings`
:meth:`.Session.bulk_save_objects`
"""
self._bulk_save_mappings(
mapper, mappings, True, False, False, False, False
)
def _bulk_save_mappings(
self,
mapper,
mappings,
isupdate,
isstates,
return_defaults,
update_changed_only,
render_nulls,
):
mapper = _class_to_mapper(mapper)
self._flushing = True
transaction = self.begin(subtransactions=True)
try:
if isupdate:
persistence._bulk_update(
mapper,
mappings,
transaction,
isstates,
update_changed_only,
)
else:
persistence._bulk_insert(
mapper,
mappings,
transaction,
isstates,
return_defaults,
render_nulls,
)
transaction.commit()
except:
with util.safe_reraise():
transaction.rollback(_capture_exception=True)
finally:
self._flushing = False
@util.deprecated_params(
passive=(
"0.8",
"The :paramref:`.Session.is_modified.passive` flag is deprecated "
"and will be removed in a future release. The flag is no longer "
"used and is ignored.",
)
)
def is_modified(self, instance, include_collections=True, passive=None):
r"""Return ``True`` if the given instance has locally
modified attributes.
This method retrieves the history for each instrumented
attribute on the instance and performs a comparison of the current
value to its previously committed value, if any.
It is in effect a more expensive and accurate
version of checking for the given instance in the
:attr:`.Session.dirty` collection; a full test for
each attribute's net "dirty" status is performed.
E.g.::
return session.is_modified(someobject)
A few caveats to this method apply:
* Instances present in the :attr:`.Session.dirty` collection may
report ``False`` when tested with this method. This is because
the object may have received change events via attribute mutation,
thus placing it in :attr:`.Session.dirty`, but ultimately the state
is the same as that loaded from the database, resulting in no net
change here.
* Scalar attributes may not have recorded the previously set
value when a new value was applied, if the attribute was not loaded,
or was expired, at the time the new value was received - in these
cases, the attribute is assumed to have a change, even if there is
ultimately no net change against its database value. SQLAlchemy in
most cases does not need the "old" value when a set event occurs, so
it skips the expense of a SQL call if the old value isn't present,
based on the assumption that an UPDATE of the scalar value is
usually needed, and in those few cases where it isn't, is less
expensive on average than issuing a defensive SELECT.
The "old" value is fetched unconditionally upon set only if the
attribute container has the ``active_history`` flag set to ``True``.
This flag is set typically for primary key attributes and scalar
object references that are not a simple many-to-one. To set this
flag for any arbitrary mapped column, use the ``active_history``
argument with :func:`.column_property`.
:param instance: mapped instance to be tested for pending changes.
:param include_collections: Indicates if multivalued collections
should be included in the operation. Setting this to ``False`` is a
way to detect only local-column based properties (i.e. scalar columns
or many-to-one foreign keys) that would result in an UPDATE for this
instance upon flush.
:param passive: not used
"""
state = object_state(instance)
if not state.modified:
return False
dict_ = state.dict
for attr in state.manager.attributes:
if (
not include_collections
and hasattr(attr.impl, "get_collection")
) or not hasattr(attr.impl, "get_history"):
continue
(added, unchanged, deleted) = attr.impl.get_history(
state, dict_, passive=attributes.NO_CHANGE
)
if added or deleted:
return True
else:
return False
@property
def is_active(self):
"""True if this :class:`.Session` is in "transaction mode" and
is not in "partial rollback" state.
The :class:`.Session` in its default mode of ``autocommit=False``
is essentially always in "transaction mode", in that a
:class:`.SessionTransaction` is associated with it as soon as
it is instantiated. This :class:`.SessionTransaction` is immediately
replaced with a new one as soon as it is ended, due to a rollback,
commit, or close operation.
"Transaction mode" does *not* indicate whether
or not actual database connection resources are in use; the
:class:`.SessionTransaction` object coordinates among zero or more
actual database transactions, and starts out with none, accumulating
individual DBAPI connections as different data sources are used
within its scope. The best way to track when a particular
:class:`.Session` has actually begun to use DBAPI resources is to
implement a listener using the :meth:`.SessionEvents.after_begin`
method, which will deliver both the :class:`.Session` as well as the
target :class:`_engine.Connection` to a user-defined event listener.
The "partial rollback" state refers to when an "inner" transaction,
typically used during a flush, encounters an error and emits a
rollback of the DBAPI connection. At this point, the
:class:`.Session` is in "partial rollback" and awaits for the user to
call :meth:`.Session.rollback`, in order to close out the
transaction stack. It is in this "partial rollback" period that the
:attr:`.is_active` flag returns False. After the call to
:meth:`.Session.rollback`, the :class:`.SessionTransaction` is
replaced with a new one and :attr:`.is_active` returns ``True`` again.
When a :class:`.Session` is used in ``autocommit=True`` mode, the
:class:`.SessionTransaction` is only instantiated within the scope
of a flush call, or when :meth:`.Session.begin` is called. So
:attr:`.is_active` will always be ``False`` outside of a flush or
:meth:`.Session.begin` block in this mode, and will be ``True``
within the :meth:`.Session.begin` block as long as it doesn't enter
"partial rollback" state.
From all the above, it follows that the only purpose to this flag is
for application frameworks that wish to detect if a "rollback" is
necessary within a generic error handling routine, for
:class:`.Session` objects that would otherwise be in
"partial rollback" mode. In a typical integration case, this is also
not necessary as it is standard practice to emit
:meth:`.Session.rollback` unconditionally within the outermost
exception catch.
To track the transactional state of a :class:`.Session` fully,
use event listeners, primarily the :meth:`.SessionEvents.after_begin`,
:meth:`.SessionEvents.after_commit`,
:meth:`.SessionEvents.after_rollback` and related events.
"""
return self.transaction and self.transaction.is_active
identity_map = None
"""A mapping of object identities to objects themselves.
Iterating through ``Session.identity_map.values()`` provides
access to the full set of persistent objects (i.e., those
that have row identity) currently in the session.
.. seealso::
:func:`.identity_key` - helper function to produce the keys used
in this dictionary.
"""
@property
def _dirty_states(self):
"""The set of all persistent states considered dirty.
This method returns all states that were modified including
those that were possibly deleted.
"""
return self.identity_map._dirty_states()
@property
def dirty(self):
"""The set of all persistent instances considered dirty.
E.g.::
some_mapped_object in session.dirty
Instances are considered dirty when they were modified but not
deleted.
Note that this 'dirty' calculation is 'optimistic'; most
attribute-setting or collection modification operations will
mark an instance as 'dirty' and place it in this set, even if
there is no net change to the attribute's value. At flush
time, the value of each attribute is compared to its
previously saved value, and if there's no net change, no SQL
operation will occur (this is a more expensive operation so
it's only done at flush time).
To check if an instance has actionable net changes to its
attributes, use the :meth:`.Session.is_modified` method.
"""
return util.IdentitySet(
[
state.obj()
for state in self._dirty_states
if state not in self._deleted
]
)
@property
def deleted(self):
"The set of all instances marked as 'deleted' within this ``Session``"
return util.IdentitySet(list(self._deleted.values()))
@property
def new(self):
"The set of all instances marked as 'new' within this ``Session``."
return util.IdentitySet(list(self._new.values()))
class sessionmaker(_SessionClassMethods):
"""A configurable :class:`.Session` factory.
The :class:`.sessionmaker` factory generates new
:class:`.Session` objects when called, creating them given
the configurational arguments established here.
e.g.::
# global scope
Session = sessionmaker(autoflush=False)
# later, in a local scope, create and use a session:
sess = Session()
Any keyword arguments sent to the constructor itself will override the
"configured" keywords::
Session = sessionmaker()
# bind an individual session to a connection
sess = Session(bind=connection)
The class also includes a method :meth:`.configure`, which can
be used to specify additional keyword arguments to the factory, which
will take effect for subsequent :class:`.Session` objects generated.
This is usually used to associate one or more :class:`_engine.Engine`
objects
with an existing :class:`.sessionmaker` factory before it is first
used::
# application starts
Session = sessionmaker()
# ... later
engine = create_engine('sqlite:///foo.db')
Session.configure(bind=engine)
sess = Session()
.. seealso:
:ref:`session_getting` - introductory text on creating
sessions using :class:`.sessionmaker`.
"""
def __init__(
self,
bind=None,
class_=Session,
autoflush=True,
autocommit=False,
expire_on_commit=True,
info=None,
**kw
):
r"""Construct a new :class:`.sessionmaker`.
All arguments here except for ``class_`` correspond to arguments
accepted by :class:`.Session` directly. See the
:meth:`.Session.__init__` docstring for more details on parameters.
:param bind: a :class:`_engine.Engine` or other :class:`.Connectable`
with
which newly created :class:`.Session` objects will be associated.
:param class\_: class to use in order to create new :class:`.Session`
objects. Defaults to :class:`.Session`.
:param autoflush: The autoflush setting to use with newly created
:class:`.Session` objects.
:param autocommit: The autocommit setting to use with newly created
:class:`.Session` objects.
:param expire_on_commit=True: the expire_on_commit setting to use
with newly created :class:`.Session` objects.
:param info: optional dictionary of information that will be available
via :attr:`.Session.info`. Note this dictionary is *updated*, not
replaced, when the ``info`` parameter is specified to the specific
:class:`.Session` construction operation.
.. versionadded:: 0.9.0
:param \**kw: all other keyword arguments are passed to the
constructor of newly created :class:`.Session` objects.
"""
kw["bind"] = bind
kw["autoflush"] = autoflush
kw["autocommit"] = autocommit
kw["expire_on_commit"] = expire_on_commit
if info is not None:
kw["info"] = info
self.kw = kw
# make our own subclass of the given class, so that
# events can be associated with it specifically.
self.class_ = type(class_.__name__, (class_,), {})
def __call__(self, **local_kw):
"""Produce a new :class:`.Session` object using the configuration
established in this :class:`.sessionmaker`.
In Python, the ``__call__`` method is invoked on an object when
it is "called" in the same way as a function::
Session = sessionmaker()
session = Session() # invokes sessionmaker.__call__()
"""
for k, v in self.kw.items():
if k == "info" and "info" in local_kw:
d = v.copy()
d.update(local_kw["info"])
local_kw["info"] = d
else:
local_kw.setdefault(k, v)
return self.class_(**local_kw)
def configure(self, **new_kw):
"""(Re)configure the arguments for this sessionmaker.
e.g.::
Session = sessionmaker()
Session.configure(bind=create_engine('sqlite://'))
"""
self.kw.update(new_kw)
def __repr__(self):
return "%s(class_=%r, %s)" % (
self.__class__.__name__,
self.class_.__name__,
", ".join("%s=%r" % (k, v) for k, v in self.kw.items()),
)
def close_all_sessions():
"""Close all sessions in memory.
This function consults a global registry of all :class:`.Session` objects
and calls :meth:`.Session.close` on them, which resets them to a clean
state.
This function is not for general use but may be useful for test suites
within the teardown scheme.
.. versionadded:: 1.3
"""
for sess in _sessions.values():
sess.close()
def make_transient(instance):
"""Alter the state of the given instance so that it is :term:`transient`.
.. note::
:func:`.make_transient` is a special-case function for
advanced use cases only.
The given mapped instance is assumed to be in the :term:`persistent` or
:term:`detached` state. The function will remove its association with any
:class:`.Session` as well as its :attr:`.InstanceState.identity`. The
effect is that the object will behave as though it were newly constructed,
except retaining any attribute / collection values that were loaded at the
time of the call. The :attr:`.InstanceState.deleted` flag is also reset
if this object had been deleted as a result of using
:meth:`.Session.delete`.
.. warning::
:func:`.make_transient` does **not** "unexpire" or otherwise eagerly
load ORM-mapped attributes that are not currently loaded at the time
the function is called. This includes attributes which:
* were expired via :meth:`.Session.expire`
* were expired as the natural effect of committing a session
transaction, e.g. :meth:`.Session.commit`
* are normally :term:`lazy loaded` but are not currently loaded
* are "deferred" via :ref:`deferred` and are not yet loaded
* were not present in the query which loaded this object, such as that
which is common in joined table inheritance and other scenarios.
After :func:`.make_transient` is called, unloaded attributes such
as those above will normally resolve to the value ``None`` when
accessed, or an empty collection for a collection-oriented attribute.
As the object is transient and un-associated with any database
identity, it will no longer retrieve these values.
.. seealso::
:func:`.make_transient_to_detached`
"""
state = attributes.instance_state(instance)
s = _state_session(state)
if s:
s._expunge_states([state])
# remove expired state
state.expired_attributes.clear()
# remove deferred callables
if state.callables:
del state.callables
if state.key:
del state.key
if state._deleted:
del state._deleted
def make_transient_to_detached(instance):
"""Make the given transient instance :term:`detached`.
.. note::
:func:`.make_transient_to_detached` is a special-case function for
advanced use cases only.
All attribute history on the given instance
will be reset as though the instance were freshly loaded
from a query. Missing attributes will be marked as expired.
The primary key attributes of the object, which are required, will be made
into the "key" of the instance.
The object can then be added to a session, or merged
possibly with the load=False flag, at which point it will look
as if it were loaded that way, without emitting SQL.
This is a special use case function that differs from a normal
call to :meth:`.Session.merge` in that a given persistent state
can be manufactured without any SQL calls.
.. versionadded:: 0.9.5
.. seealso::
:func:`.make_transient`
:meth:`.Session.enable_relationship_loading`
"""
state = attributes.instance_state(instance)
if state.session_id or state.key:
raise sa_exc.InvalidRequestError("Given object must be transient")
state.key = state.mapper._identity_key_from_state(state)
if state._deleted:
del state._deleted
state._commit_all(state.dict)
state._expire_attributes(state.dict, state.unloaded_expirable)
def object_session(instance):
"""Return the :class:`.Session` to which the given instance belongs.
This is essentially the same as the :attr:`.InstanceState.session`
accessor. See that attribute for details.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
else:
return _state_session(state)
_new_sessionid = util.counter()
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/__init__.py
|
# orm/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Functional constructs for ORM configuration.
See the SQLAlchemy object relational tutorial and mapper configuration
documentation for an overview of how this module is used.
"""
from . import exc # noqa
from . import mapper as mapperlib # noqa
from . import strategy_options
from .deprecated_interfaces import AttributeExtension # noqa
from .deprecated_interfaces import MapperExtension # noqa
from .deprecated_interfaces import SessionExtension # noqa
from .descriptor_props import ComparableProperty # noqa
from .descriptor_props import CompositeProperty # noqa
from .descriptor_props import SynonymProperty # noqa
from .interfaces import EXT_CONTINUE # noqa
from .interfaces import EXT_SKIP # noqa
from .interfaces import EXT_STOP # noqa
from .interfaces import PropComparator # noqa
from .mapper import _mapper_registry
from .mapper import class_mapper # noqa
from .mapper import configure_mappers # noqa
from .mapper import Mapper # noqa
from .mapper import reconstructor # noqa
from .mapper import validates # noqa
from .properties import ColumnProperty # noqa
from .query import AliasOption # noqa
from .query import Bundle # noqa
from .query import Query # noqa
from .relationships import foreign # noqa
from .relationships import RelationshipProperty # noqa
from .relationships import remote # noqa
from .scoping import scoped_session # noqa
from .session import close_all_sessions # noqa
from .session import make_transient # noqa
from .session import make_transient_to_detached # noqa
from .session import object_session # noqa
from .session import Session # noqa
from .session import sessionmaker # noqa
from .strategy_options import Load # noqa
from .util import aliased # noqa
from .util import join # noqa
from .util import object_mapper # noqa
from .util import outerjoin # noqa
from .util import polymorphic_union # noqa
from .util import was_deleted # noqa
from .util import with_parent # noqa
from .util import with_polymorphic # noqa
from .. import sql as _sql
from .. import util as _sa_util
from ..util.langhelpers import public_factory
def create_session(bind=None, **kwargs):
r"""Create a new :class:`.Session`
with no automation enabled by default.
This function is used primarily for testing. The usual
route to :class:`.Session` creation is via its constructor
or the :func:`.sessionmaker` function.
:param bind: optional, a single Connectable to use for all
database access in the created
:class:`~sqlalchemy.orm.session.Session`.
:param \*\*kwargs: optional, passed through to the
:class:`.Session` constructor.
:returns: an :class:`~sqlalchemy.orm.session.Session` instance
The defaults of create_session() are the opposite of that of
:func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are
False, ``autocommit`` is True. In this sense the session acts
more like the "classic" SQLAlchemy 0.3 session with these.
Usage::
>>> from sqlalchemy.orm import create_session
>>> session = create_session()
It is recommended to use :func:`sessionmaker` instead of
create_session().
"""
kwargs.setdefault("autoflush", False)
kwargs.setdefault("autocommit", True)
kwargs.setdefault("expire_on_commit", False)
return Session(bind=bind, **kwargs)
relationship = public_factory(RelationshipProperty, ".orm.relationship")
def relation(*arg, **kw):
"""A synonym for :func:`relationship`."""
return relationship(*arg, **kw)
def dynamic_loader(argument, **kw):
"""Construct a dynamically-loading mapper property.
This is essentially the same as
using the ``lazy='dynamic'`` argument with :func:`relationship`::
dynamic_loader(SomeClass)
# is the same as
relationship(SomeClass, lazy="dynamic")
See the section :ref:`dynamic_relationship` for more details
on dynamic loading.
"""
kw["lazy"] = "dynamic"
return relationship(argument, **kw)
column_property = public_factory(ColumnProperty, ".orm.column_property")
composite = public_factory(CompositeProperty, ".orm.composite")
def backref(name, **kwargs):
"""Create a back reference with explicit keyword arguments, which are the
same arguments one can send to :func:`relationship`.
Used with the ``backref`` keyword argument to :func:`relationship` in
place of a string argument, e.g.::
'items':relationship(
SomeItem, backref=backref('parent', lazy='subquery'))
.. seealso::
:ref:`relationships_backref`
"""
return (name, kwargs)
def deferred(*columns, **kw):
r"""Indicate a column-based mapped attribute that by default will
not load unless accessed.
:param \*columns: columns to be mapped. This is typically a single
:class:`_schema.Column` object,
however a collection is supported in order
to support multiple columns mapped under the same attribute.
:param \**kw: additional keyword arguments passed to
:class:`.ColumnProperty`.
.. seealso::
:ref:`deferred`
"""
return ColumnProperty(deferred=True, *columns, **kw)
def query_expression(default_expr=_sql.null()):
"""Indicate an attribute that populates from a query-time SQL expression.
:param default_expr: Optional SQL expression object that will be used in
all cases if not assigned later with :func:`_orm.with_expression`.
E.g.::
from sqlalchemy.sql import literal
class C(Base):
#...
my_expr = query_expression(literal(1))
.. versionadded:: 1.3.18
.. versionadded:: 1.2
.. seealso::
:ref:`mapper_querytime_expression`
"""
prop = ColumnProperty(default_expr)
prop.strategy_key = (("query_expression", True),)
return prop
mapper = public_factory(Mapper, ".orm.mapper")
synonym = public_factory(SynonymProperty, ".orm.synonym")
comparable_property = public_factory(
ComparableProperty, ".orm.comparable_property"
)
@_sa_util.deprecated(
"0.7",
message=":func:`.compile_mappers` is deprecated and will be removed "
"in a future release. Please use :func:`.configure_mappers`",
)
def compile_mappers():
"""Initialize the inter-mapper relationships of all mappers that have
been defined.
"""
configure_mappers()
def clear_mappers():
"""Remove all mappers from all classes.
This function removes all instrumentation from classes and disposes
of their associated mappers. Once called, the classes are unmapped
and can be later re-mapped with new mappers.
:func:`.clear_mappers` is *not* for normal use, as there is literally no
valid usage for it outside of very specific testing scenarios. Normally,
mappers are permanent structural components of user-defined classes, and
are never discarded independently of their class. If a mapped class
itself is garbage collected, its mapper is automatically disposed of as
well. As such, :func:`.clear_mappers` is only for usage in test suites
that re-use the same classes with different mappings, which is itself an
extremely rare use case - the only such use case is in fact SQLAlchemy's
own test suite, and possibly the test suites of other ORM extension
libraries which intend to test various combinations of mapper construction
upon a fixed set of classes.
"""
mapperlib._CONFIGURE_MUTEX.acquire()
try:
while _mapper_registry:
try:
# can't even reliably call list(weakdict) in jython
mapper, b = _mapper_registry.popitem()
mapper.dispose()
except KeyError:
pass
finally:
mapperlib._CONFIGURE_MUTEX.release()
joinedload = strategy_options.joinedload._unbound_fn
joinedload_all = strategy_options.joinedload._unbound_all_fn
contains_eager = strategy_options.contains_eager._unbound_fn
defer = strategy_options.defer._unbound_fn
undefer = strategy_options.undefer._unbound_fn
undefer_group = strategy_options.undefer_group._unbound_fn
with_expression = strategy_options.with_expression._unbound_fn
load_only = strategy_options.load_only._unbound_fn
lazyload = strategy_options.lazyload._unbound_fn
lazyload_all = strategy_options.lazyload_all._unbound_all_fn
subqueryload = strategy_options.subqueryload._unbound_fn
subqueryload_all = strategy_options.subqueryload_all._unbound_all_fn
selectinload = strategy_options.selectinload._unbound_fn
selectinload_all = strategy_options.selectinload_all._unbound_all_fn
immediateload = strategy_options.immediateload._unbound_fn
noload = strategy_options.noload._unbound_fn
raiseload = strategy_options.raiseload._unbound_fn
defaultload = strategy_options.defaultload._unbound_fn
selectin_polymorphic = strategy_options.selectin_polymorphic._unbound_fn
def eagerload(*args, **kwargs):
"""A synonym for :func:`joinedload()`."""
return joinedload(*args, **kwargs)
def eagerload_all(*args, **kwargs):
"""A synonym for :func:`joinedload_all()`"""
return joinedload_all(*args, **kwargs)
contains_alias = public_factory(AliasOption, ".orm.contains_alias")
def __go(lcls):
global __all__
from .. import util as sa_util # noqa
from . import dynamic # noqa
from . import events # noqa
from . import loading # noqa
import inspect as _inspect
__all__ = sorted(
name
for name, obj in lcls.items()
if not (name.startswith("_") or _inspect.ismodule(obj))
)
_sa_util.dependencies.resolve_all("sqlalchemy.orm")
_sa_util.dependencies.resolve_all("sqlalchemy.ext")
__go(locals())
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/dynamic.py
|
# orm/dynamic.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Dynamic collection API.
Dynamic collections act like Query() objects for read operations and support
basic add/delete mutation.
"""
from . import attributes
from . import exc as orm_exc
from . import interfaces
from . import object_mapper
from . import object_session
from . import properties
from . import strategies
from . import util as orm_util
from .query import Query
from .. import exc
from .. import log
from .. import util
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="dynamic")
class DynaLoader(strategies.AbstractRelationshipLoader):
def init_class_attribute(self, mapper):
self.is_class_level = True
if not self.uselist:
raise exc.InvalidRequestError(
"On relationship %s, 'dynamic' loaders cannot be used with "
"many-to-one/one-to-one relationships and/or "
"uselist=False." % self.parent_property
)
elif self.parent_property.direction not in (
interfaces.ONETOMANY,
interfaces.MANYTOMANY,
):
util.warn(
"On relationship %s, 'dynamic' loaders cannot be used with "
"many-to-one/one-to-one relationships and/or "
"uselist=False. This warning will be an exception in a "
"future release." % self.parent_property
)
strategies._register_attribute(
self.parent_property,
mapper,
useobject=True,
impl_class=DynamicAttributeImpl,
target_mapper=self.parent_property.mapper,
order_by=self.parent_property.order_by,
query_class=self.parent_property.query_class,
)
class DynamicAttributeImpl(attributes.AttributeImpl):
uses_objects = True
default_accepts_scalar_loader = False
supports_population = False
collection = False
dynamic = True
def __init__(
self,
class_,
key,
typecallable,
dispatch,
target_mapper,
order_by,
query_class=None,
**kw
):
super(DynamicAttributeImpl, self).__init__(
class_, key, typecallable, dispatch, **kw
)
self.target_mapper = target_mapper
self.order_by = order_by
if not query_class:
self.query_class = AppenderQuery
elif AppenderMixin in query_class.mro():
self.query_class = query_class
else:
self.query_class = mixin_user_query(query_class)
def get(self, state, dict_, passive=attributes.PASSIVE_OFF):
if not passive & attributes.SQL_OK:
return self._get_collection_history(
state, attributes.PASSIVE_NO_INITIALIZE
).added_items
else:
return self.query_class(self, state)
def get_collection(
self,
state,
dict_,
user_data=None,
passive=attributes.PASSIVE_NO_INITIALIZE,
):
if not passive & attributes.SQL_OK:
return self._get_collection_history(state, passive).added_items
else:
history = self._get_collection_history(state, passive)
return history.added_plus_unchanged
@util.memoized_property
def _append_token(self):
return attributes.Event(self, attributes.OP_APPEND)
@util.memoized_property
def _remove_token(self):
return attributes.Event(self, attributes.OP_REMOVE)
def fire_append_event(
self, state, dict_, value, initiator, collection_history=None
):
if collection_history is None:
collection_history = self._modified_event(state, dict_)
collection_history.add_added(value)
for fn in self.dispatch.append:
value = fn(state, value, initiator or self._append_token)
if self.trackparent and value is not None:
self.sethasparent(attributes.instance_state(value), state, True)
def fire_remove_event(
self, state, dict_, value, initiator, collection_history=None
):
if collection_history is None:
collection_history = self._modified_event(state, dict_)
collection_history.add_removed(value)
if self.trackparent and value is not None:
self.sethasparent(attributes.instance_state(value), state, False)
for fn in self.dispatch.remove:
fn(state, value, initiator or self._remove_token)
def _modified_event(self, state, dict_):
if self.key not in state.committed_state:
state.committed_state[self.key] = CollectionHistory(self, state)
state._modified_event(dict_, self, attributes.NEVER_SET)
# this is a hack to allow the fixtures.ComparableEntity fixture
# to work
dict_[self.key] = True
return state.committed_state[self.key]
def set(
self,
state,
dict_,
value,
initiator=None,
passive=attributes.PASSIVE_OFF,
check_old=None,
pop=False,
_adapt=True,
):
if initiator and initiator.parent_token is self.parent_token:
return
if pop and value is None:
return
iterable = value
new_values = list(iterable)
if state.has_identity:
old_collection = util.IdentitySet(self.get(state, dict_))
collection_history = self._modified_event(state, dict_)
if not state.has_identity:
old_collection = collection_history.added_items
else:
old_collection = old_collection.union(
collection_history.added_items
)
idset = util.IdentitySet
constants = old_collection.intersection(new_values)
additions = idset(new_values).difference(constants)
removals = old_collection.difference(constants)
for member in new_values:
if member in additions:
self.fire_append_event(
state,
dict_,
member,
None,
collection_history=collection_history,
)
for member in removals:
self.fire_remove_event(
state,
dict_,
member,
None,
collection_history=collection_history,
)
def delete(self, *args, **kwargs):
raise NotImplementedError()
def set_committed_value(self, state, dict_, value):
raise NotImplementedError(
"Dynamic attributes don't support " "collection population."
)
def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
c = self._get_collection_history(state, passive)
return c.as_history()
def get_all_pending(
self, state, dict_, passive=attributes.PASSIVE_NO_INITIALIZE
):
c = self._get_collection_history(state, passive)
return [(attributes.instance_state(x), x) for x in c.all_items]
def _get_collection_history(self, state, passive=attributes.PASSIVE_OFF):
if self.key in state.committed_state:
c = state.committed_state[self.key]
else:
c = CollectionHistory(self, state)
if state.has_identity and (passive & attributes.INIT_OK):
return CollectionHistory(self, state, apply_to=c)
else:
return c
def append(
self, state, dict_, value, initiator, passive=attributes.PASSIVE_OFF
):
if initiator is not self:
self.fire_append_event(state, dict_, value, initiator)
def remove(
self, state, dict_, value, initiator, passive=attributes.PASSIVE_OFF
):
if initiator is not self:
self.fire_remove_event(state, dict_, value, initiator)
def pop(
self, state, dict_, value, initiator, passive=attributes.PASSIVE_OFF
):
self.remove(state, dict_, value, initiator, passive=passive)
class AppenderMixin(object):
query_class = None
def __init__(self, attr, state):
super(AppenderMixin, self).__init__(attr.target_mapper, None)
self.instance = instance = state.obj()
self.attr = attr
mapper = object_mapper(instance)
prop = mapper._props[self.attr.key]
if prop.secondary is not None:
# this is a hack right now. The Query only knows how to
# make subsequent joins() without a given left-hand side
# from self._from_obj[0]. We need to ensure prop.secondary
# is in the FROM. So we purposly put the mapper selectable
# in _from_obj[0] to ensure a user-defined join() later on
# doesn't fail, and secondary is then in _from_obj[1].
self._from_obj = (prop.mapper.selectable, prop.secondary)
self._criterion = prop._with_parent(instance, alias_secondary=False)
if self.attr.order_by:
self._order_by = self.attr.order_by
def session(self):
sess = object_session(self.instance)
if (
sess is not None
and self.autoflush
and sess.autoflush
and self.instance in sess
):
sess.flush()
if not orm_util.has_identity(self.instance):
return None
else:
return sess
session = property(session, lambda s, x: None)
def __iter__(self):
sess = self.session
if sess is None:
return iter(
self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE,
).added_items
)
else:
return iter(self._clone(sess))
def __getitem__(self, index):
sess = self.session
if sess is None:
return self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE,
).indexed(index)
else:
return self._clone(sess).__getitem__(index)
def count(self):
sess = self.session
if sess is None:
return len(
self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE,
).added_items
)
else:
return self._clone(sess).count()
def _clone(self, sess=None):
# note we're returning an entirely new Query class instance
# here without any assignment capabilities; the class of this
# query is determined by the session.
instance = self.instance
if sess is None:
sess = object_session(instance)
if sess is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session, and no "
"contextual session is established; lazy load operation "
"of attribute '%s' cannot proceed"
% (orm_util.instance_str(instance), self.attr.key)
)
if self.query_class:
query = self.query_class(self.attr.target_mapper, session=sess)
else:
query = sess.query(self.attr.target_mapper)
query._criterion = self._criterion
query._from_obj = self._from_obj
query._order_by = self._order_by
return query
def extend(self, iterator):
for item in iterator:
self.attr.append(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance),
item,
None,
)
def append(self, item):
self.attr.append(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance),
item,
None,
)
def remove(self, item):
self.attr.remove(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance),
item,
None,
)
class AppenderQuery(AppenderMixin, Query):
"""A dynamic query that supports basic collection storage operations."""
def mixin_user_query(cls):
"""Return a new class with AppenderQuery functionality layered over."""
name = "Appender" + cls.__name__
return type(name, (AppenderMixin, cls), {"query_class": cls})
class CollectionHistory(object):
"""Overrides AttributeHistory to receive append/remove events directly."""
def __init__(self, attr, state, apply_to=None):
if apply_to:
coll = AppenderQuery(attr, state).autoflush(False)
self.unchanged_items = util.OrderedIdentitySet(coll)
self.added_items = apply_to.added_items
self.deleted_items = apply_to.deleted_items
self._reconcile_collection = True
else:
self.deleted_items = util.OrderedIdentitySet()
self.added_items = util.OrderedIdentitySet()
self.unchanged_items = util.OrderedIdentitySet()
self._reconcile_collection = False
@property
def added_plus_unchanged(self):
return list(self.added_items.union(self.unchanged_items))
@property
def all_items(self):
return list(
self.added_items.union(self.unchanged_items).union(
self.deleted_items
)
)
def as_history(self):
if self._reconcile_collection:
added = self.added_items.difference(self.unchanged_items)
deleted = self.deleted_items.intersection(self.unchanged_items)
unchanged = self.unchanged_items.difference(deleted)
else:
added, unchanged, deleted = (
self.added_items,
self.unchanged_items,
self.deleted_items,
)
return attributes.History(list(added), list(unchanged), list(deleted))
def indexed(self, index):
return list(self.added_items)[index]
def add_added(self, value):
self.added_items.add(value)
def add_removed(self, value):
if value in self.added_items:
self.added_items.remove(value)
else:
self.deleted_items.add(value)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/exc.py
|
# orm/exc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQLAlchemy ORM exceptions."""
from .. import exc as sa_exc
from .. import util
NO_STATE = (AttributeError, KeyError)
"""Exception types that may be raised by instrumentation implementations."""
class StaleDataError(sa_exc.SQLAlchemyError):
"""An operation encountered database state that is unaccounted for.
Conditions which cause this to happen include:
* A flush may have attempted to update or delete rows
and an unexpected number of rows were matched during
the UPDATE or DELETE statement. Note that when
version_id_col is used, rows in UPDATE or DELETE statements
are also matched against the current known version
identifier.
* A mapped object with version_id_col was refreshed,
and the version number coming back from the database does
not match that of the object itself.
* A object is detached from its parent object, however
the object was previously attached to a different parent
identity which was garbage collected, and a decision
cannot be made if the new parent was really the most
recent "parent".
"""
ConcurrentModificationError = StaleDataError
class FlushError(sa_exc.SQLAlchemyError):
"""A invalid condition was detected during flush()."""
class UnmappedError(sa_exc.InvalidRequestError):
"""Base for exceptions that involve expected mappings not present."""
class ObjectDereferencedError(sa_exc.SQLAlchemyError):
"""An operation cannot complete due to an object being garbage
collected.
"""
class DetachedInstanceError(sa_exc.SQLAlchemyError):
"""An attempt to access unloaded attributes on a
mapped instance that is detached."""
code = "bhk3"
class UnmappedInstanceError(UnmappedError):
"""An mapping operation was requested for an unknown instance."""
@util.dependencies("sqlalchemy.orm.base")
def __init__(self, base, obj, msg=None):
if not msg:
try:
base.class_mapper(type(obj))
name = _safe_cls_name(type(obj))
msg = (
"Class %r is mapped, but this instance lacks "
"instrumentation. This occurs when the instance "
"is created before sqlalchemy.orm.mapper(%s) "
"was called." % (name, name)
)
except UnmappedClassError:
msg = _default_unmapped(type(obj))
if isinstance(obj, type):
msg += (
"; was a class (%s) supplied where an instance was "
"required?" % _safe_cls_name(obj)
)
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedClassError(UnmappedError):
"""An mapping operation was requested for an unknown class."""
def __init__(self, cls, msg=None):
if not msg:
msg = _default_unmapped(cls)
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class ObjectDeletedError(sa_exc.InvalidRequestError):
"""A refresh operation failed to retrieve the database
row corresponding to an object's known primary key identity.
A refresh operation proceeds when an expired attribute is
accessed on an object, or when :meth:`_query.Query.get` is
used to retrieve an object which is, upon retrieval, detected
as expired. A SELECT is emitted for the target row
based on primary key; if no row is returned, this
exception is raised.
The true meaning of this exception is simply that
no row exists for the primary key identifier associated
with a persistent object. The row may have been
deleted, or in some cases the primary key updated
to a new value, outside of the ORM's management of the target
object.
"""
@util.dependencies("sqlalchemy.orm.base")
def __init__(self, base, state, msg=None):
if not msg:
msg = (
"Instance '%s' has been deleted, or its "
"row is otherwise not present." % base.state_str(state)
)
sa_exc.InvalidRequestError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedColumnError(sa_exc.InvalidRequestError):
"""Mapping operation was requested on an unknown column."""
class NoResultFound(sa_exc.InvalidRequestError):
"""A database result was required but none was found."""
class MultipleResultsFound(sa_exc.InvalidRequestError):
"""A single database result was required but more than one were found."""
class LoaderStrategyException(sa_exc.InvalidRequestError):
"""A loader strategy for an attribute does not exist."""
def __init__(
self,
applied_to_property_type,
requesting_property,
applies_to,
actual_strategy_type,
strategy_key,
):
if actual_strategy_type is None:
sa_exc.InvalidRequestError.__init__(
self,
"Can't find strategy %s for %s"
% (strategy_key, requesting_property),
)
else:
sa_exc.InvalidRequestError.__init__(
self,
'Can\'t apply "%s" strategy to property "%s", '
'which is a "%s"; this loader strategy is intended '
'to be used with a "%s".'
% (
util.clsname_as_plain_name(actual_strategy_type),
requesting_property,
util.clsname_as_plain_name(applied_to_property_type),
util.clsname_as_plain_name(applies_to),
),
)
def _safe_cls_name(cls):
try:
cls_name = ".".join((cls.__module__, cls.__name__))
except AttributeError:
cls_name = getattr(cls, "__name__", None)
if cls_name is None:
cls_name = repr(cls)
return cls_name
@util.dependencies("sqlalchemy.orm.base")
def _default_unmapped(base, cls):
try:
mappers = base.manager_of_class(cls).mappers
except NO_STATE:
mappers = {}
except TypeError:
mappers = {}
name = _safe_cls_name(cls)
if not mappers:
return "Class '%s' is not mapped" % name
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/collections.py
|
# orm/collections.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for collections of mapped entities.
The collections package supplies the machinery used to inform the ORM of
collection membership changes. An instrumentation via decoration approach is
used, allowing arbitrary types (including built-ins) to be used as entity
collections without requiring inheritance from a base class.
Instrumentation decoration relays membership change events to the
:class:`.CollectionAttributeImpl` that is currently managing the collection.
The decorators observe function call arguments and return values, tracking
entities entering or leaving the collection. Two decorator approaches are
provided. One is a bundle of generic decorators that map function arguments
and return values to events::
from sqlalchemy.orm.collections import collection
class MyClass(object):
# ...
@collection.adds(1)
def store(self, item):
self.data.append(item)
@collection.removes_return()
def pop(self):
return self.data.pop()
The second approach is a bundle of targeted decorators that wrap appropriate
append and remove notifiers around the mutation methods present in the
standard Python ``list``, ``set`` and ``dict`` interfaces. These could be
specified in terms of generic decorator recipes, but are instead hand-tooled
for increased efficiency. The targeted decorators occasionally implement
adapter-like behavior, such as mapping bulk-set methods (``extend``,
``update``, ``__setslice__``, etc.) into the series of atomic mutation events
that the ORM requires.
The targeted decorators are used internally for automatic instrumentation of
entity collection classes. Every collection class goes through a
transformation process roughly like so:
1. If the class is a built-in, substitute a trivial sub-class
2. Is this class already instrumented?
3. Add in generic decorators
4. Sniff out the collection interface through duck-typing
5. Add targeted decoration to any undecorated interface method
This process modifies the class at runtime, decorating methods and adding some
bookkeeping properties. This isn't possible (or desirable) for built-in
classes like ``list``, so trivial sub-classes are substituted to hold
decoration::
class InstrumentedList(list):
pass
Collection classes can be specified in ``relationship(collection_class=)`` as
types or a function that returns an instance. Collection classes are
inspected and instrumented during the mapper compilation phase. The
collection_class callable will be executed once to produce a specimen
instance, and the type of that specimen will be instrumented. Functions that
return built-in types like ``lists`` will be adapted to produce instrumented
instances.
When extending a known type like ``list``, additional decorations are not
generally not needed. Odds are, the extension method will delegate to a
method that's already instrumented. For example::
class QueueIsh(list):
def push(self, item):
self.append(item)
def shift(self):
return self.pop(0)
There's no need to decorate these methods. ``append`` and ``pop`` are already
instrumented as part of the ``list`` interface. Decorating them would fire
duplicate events, which should be avoided.
The targeted decoration tries not to rely on other methods in the underlying
collection class, but some are unavoidable. Many depend on 'read' methods
being present to properly instrument a 'write', for example, ``__setitem__``
needs ``__getitem__``. "Bulk" methods like ``update`` and ``extend`` may also
reimplemented in terms of atomic appends and removes, so the ``extend``
decoration will actually perform many ``append`` operations and not call the
underlying method at all.
Tight control over bulk operation and the firing of events is also possible by
implementing the instrumentation internally in your methods. The basic
instrumentation package works under the general assumption that collection
mutation will not raise unusual exceptions. If you want to closely
orchestrate append and remove events with exception management, internal
instrumentation may be the answer. Within your method,
``collection_adapter(self)`` will retrieve an object that you can use for
explicit control over triggering append and remove events.
The owning object and :class:`.CollectionAttributeImpl` are also reachable
through the adapter, allowing for some very sophisticated behavior.
"""
import operator
import weakref
from sqlalchemy.util.compat import inspect_getfullargspec
from . import base
from .. import exc as sa_exc
from .. import util
from ..sql import expression
__all__ = [
"collection",
"collection_adapter",
"mapped_collection",
"column_mapped_collection",
"attribute_mapped_collection",
]
__instrumentation_mutex = util.threading.Lock()
class _PlainColumnGetter(object):
"""Plain column getter, stores collection of Column objects
directly.
Serializes to a :class:`._SerializableColumnGetterV2`
which has more expensive __call__() performance
and some rare caveats.
"""
def __init__(self, cols):
self.cols = cols
self.composite = len(cols) > 1
def __reduce__(self):
return _SerializableColumnGetterV2._reduce_from_cols(self.cols)
def _cols(self, mapper):
return self.cols
def __call__(self, value):
state = base.instance_state(value)
m = base._state_mapper(state)
key = [
m._get_state_attr_by_column(state, state.dict, col)
for col in self._cols(m)
]
if self.composite:
return tuple(key)
else:
return key[0]
class _SerializableColumnGetter(object):
"""Column-based getter used in version 0.7.6 only.
Remains here for pickle compatibility with 0.7.6.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return _SerializableColumnGetter, (self.colkeys,)
def __call__(self, value):
state = base.instance_state(value)
m = base._state_mapper(state)
key = [
m._get_state_attr_by_column(
state, state.dict, m.mapped_table.columns[k]
)
for k in self.colkeys
]
if self.composite:
return tuple(key)
else:
return key[0]
class _SerializableColumnGetterV2(_PlainColumnGetter):
"""Updated serializable getter which deals with
multi-table mapped classes.
Two extremely unusual cases are not supported.
Mappings which have tables across multiple metadata
objects, or which are mapped to non-Table selectables
linked across inheriting mappers may fail to function
here.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return self.__class__, (self.colkeys,)
@classmethod
def _reduce_from_cols(cls, cols):
def _table_key(c):
if not isinstance(c.table, expression.TableClause):
return None
else:
return c.table.key
colkeys = [(c.key, _table_key(c)) for c in cols]
return _SerializableColumnGetterV2, (colkeys,)
def _cols(self, mapper):
cols = []
metadata = getattr(mapper.local_table, "metadata", None)
for (ckey, tkey) in self.colkeys:
if tkey is None or metadata is None or tkey not in metadata:
cols.append(mapper.local_table.c[ckey])
else:
cols.append(metadata.tables[tkey].c[ckey])
return cols
def column_mapped_collection(mapping_spec):
"""A dictionary-based collection type with column-based keying.
Returns a :class:`.MappedCollection` factory with a keying function
generated from mapping_spec, which may be a Column or a sequence
of Columns.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
cols = [
expression._only_column_elements(q, "mapping_spec")
for q in util.to_list(mapping_spec)
]
keyfunc = _PlainColumnGetter(cols)
return lambda: MappedCollection(keyfunc)
class _SerializableAttrGetter(object):
def __init__(self, name):
self.name = name
self.getter = operator.attrgetter(name)
def __call__(self, target):
return self.getter(target)
def __reduce__(self):
return _SerializableAttrGetter, (self.name,)
def attribute_mapped_collection(attr_name):
"""A dictionary-based collection type with attribute-based keying.
Returns a :class:`.MappedCollection` factory with a keying based on the
'attr_name' attribute of entities in the collection, where ``attr_name``
is the string name of the attribute.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
getter = _SerializableAttrGetter(attr_name)
return lambda: MappedCollection(getter)
def mapped_collection(keyfunc):
"""A dictionary-based collection type with arbitrary keying.
Returns a :class:`.MappedCollection` factory with a keying function
generated from keyfunc, a callable that takes an entity and returns a
key value.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
return lambda: MappedCollection(keyfunc)
class collection(object):
"""Decorators for entity collection classes.
The decorators fall into two groups: annotations and interception recipes.
The annotating decorators (appender, remover, iterator, linker, converter,
internally_instrumented) indicate the method's purpose and take no
arguments. They are not written with parens::
@collection.appender
def append(self, append): ...
The recipe decorators all require parens, even those that take no
arguments::
@collection.adds('entity')
def insert(self, position, entity): ...
@collection.removes_return()
def popitem(self): ...
"""
# Bundled as a class solely for ease of use: packaging, doc strings,
# importability.
@staticmethod
def appender(fn):
"""Tag the method as the collection appender.
The appender method is called with one positional argument: the value
to append. The method will be automatically decorated with 'adds(1)'
if not already decorated::
@collection.appender
def add(self, append): ...
# or, equivalently
@collection.appender
@collection.adds(1)
def add(self, append): ...
# for mapping type, an 'append' may kick out a previous value
# that occupies that slot. consider d['a'] = 'foo'- any previous
# value in d['a'] is discarded.
@collection.appender
@collection.replaces(1)
def add(self, entity):
key = some_key_func(entity)
previous = None
if key in self:
previous = self[key]
self[key] = entity
return previous
If the value to append is not allowed in the collection, you may
raise an exception. Something to remember is that the appender
will be called for each object mapped by a database query. If the
database contains rows that violate your collection semantics, you
will need to get creative to fix the problem, as access via the
collection will not work.
If the appender method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = "appender"
return fn
@staticmethod
def remover(fn):
"""Tag the method as the collection remover.
The remover method is called with one positional argument: the value
to remove. The method will be automatically decorated with
:meth:`removes_return` if not already decorated::
@collection.remover
def zap(self, entity): ...
# or, equivalently
@collection.remover
@collection.removes_return()
def zap(self, ): ...
If the value to remove is not present in the collection, you may
raise an exception or return None to ignore the error.
If the remove method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = "remover"
return fn
@staticmethod
def iterator(fn):
"""Tag the method as the collection remover.
The iterator method is called with no arguments. It is expected to
return an iterator over all collection members::
@collection.iterator
def __iter__(self): ...
"""
fn._sa_instrument_role = "iterator"
return fn
@staticmethod
def internally_instrumented(fn):
"""Tag the method as instrumented.
This tag will prevent any decoration from being applied to the
method. Use this if you are orchestrating your own calls to
:func:`.collection_adapter` in one of the basic SQLAlchemy
interface methods, or to prevent an automatic ABC method
decoration from wrapping your implementation::
# normally an 'extend' method on a list-like class would be
# automatically intercepted and re-implemented in terms of
# SQLAlchemy events and append(). your implementation will
# never be called, unless:
@collection.internally_instrumented
def extend(self, items): ...
"""
fn._sa_instrumented = True
return fn
@staticmethod
@util.deprecated(
"1.0",
"The :meth:`.collection.linker` handler is deprecated and will "
"be removed in a future release. Please refer to the "
":meth:`.AttributeEvents.init_collection` "
"and :meth:`.AttributeEvents.dispose_collection` event handlers. ",
)
def linker(fn):
"""Tag the method as a "linked to attribute" event handler.
This optional event handler will be called when the collection class
is linked to or unlinked from the InstrumentedAttribute. It is
invoked immediately after the '_sa_adapter' property is set on
the instance. A single argument is passed: the collection adapter
that has been linked, or None if unlinking.
"""
fn._sa_instrument_role = "linker"
return fn
link = linker
"""Synonym for :meth:`.collection.linker`.
.. deprecated:: 1.0 - :meth:`.collection.link` is deprecated and will be
removed in a future release.
"""
@staticmethod
@util.deprecated(
"1.3",
"The :meth:`.collection.converter` handler is deprecated and will "
"be removed in a future release. Please refer to the "
":class:`.AttributeEvents.bulk_replace` listener interface in "
"conjunction with the :func:`.event.listen` function.",
)
def converter(fn):
"""Tag the method as the collection converter.
This optional method will be called when a collection is being
replaced entirely, as in::
myobj.acollection = [newvalue1, newvalue2]
The converter method will receive the object being assigned and should
return an iterable of values suitable for use by the ``appender``
method. A converter must not assign values or mutate the collection,
its sole job is to adapt the value the user provides into an iterable
of values for the ORM's use.
The default converter implementation will use duck-typing to do the
conversion. A dict-like collection will be convert into an iterable
of dictionary values, and other types will simply be iterated::
@collection.converter
def convert(self, other): ...
If the duck-typing of the object does not match the type of this
collection, a TypeError is raised.
Supply an implementation of this method if you want to expand the
range of possible types that can be assigned in bulk or perform
validation on the values about to be assigned.
"""
fn._sa_instrument_role = "converter"
return fn
@staticmethod
def adds(arg):
"""Mark the method as adding an entity to the collection.
Adds "add to collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value. Arguments can be specified positionally (i.e. integer) or by
name::
@collection.adds(1)
def push(self, item): ...
@collection.adds('entity')
def do_stuff(self, thing, entity=None): ...
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_append_event", arg)
return fn
return decorator
@staticmethod
def replaces(arg):
"""Mark the method as replacing an entity in the collection.
Adds "add to collection" and "remove from collection" handling to
the method. The decorator argument indicates which method argument
holds the SQLAlchemy-relevant value to be added, and return value, if
any will be considered the value to remove.
Arguments can be specified positionally (i.e. integer) or by name::
@collection.replaces(2)
def __setitem__(self, index, item): ...
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_append_event", arg)
fn._sa_instrument_after = "fire_remove_event"
return fn
return decorator
@staticmethod
def removes(arg):
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value to be removed. Arguments can be specified positionally (i.e.
integer) or by name::
@collection.removes(1)
def zap(self, item): ...
For methods where the value to remove is not known at call-time, use
collection.removes_return.
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_remove_event", arg)
return fn
return decorator
@staticmethod
def removes_return():
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The return
value of the method, if any, is considered the value to remove. The
method arguments are not inspected::
@collection.removes_return()
def pop(self): ...
For methods where the value to remove is known at call-time, use
collection.remove.
"""
def decorator(fn):
fn._sa_instrument_after = "fire_remove_event"
return fn
return decorator
collection_adapter = operator.attrgetter("_sa_adapter")
"""Fetch the :class:`.CollectionAdapter` for a collection."""
class CollectionAdapter(object):
"""Bridges between the ORM and arbitrary Python collections.
Proxies base-level collection operations (append, remove, iterate)
to the underlying Python collection, and emits add/remove events for
entities entering or leaving the collection.
The ORM uses :class:`.CollectionAdapter` exclusively for interaction with
entity collections.
"""
__slots__ = (
"attr",
"_key",
"_data",
"owner_state",
"_converter",
"invalidated",
)
def __init__(self, attr, owner_state, data):
self.attr = attr
self._key = attr.key
self._data = weakref.ref(data)
self.owner_state = owner_state
data._sa_adapter = self
self._converter = data._sa_converter
self.invalidated = False
def _warn_invalidated(self):
util.warn("This collection has been invalidated.")
@property
def data(self):
"The entity collection being adapted."
return self._data()
@property
def _referenced_by_owner(self):
"""return True if the owner state still refers to this collection.
This will return False within a bulk replace operation,
where this collection is the one being replaced.
"""
return self.owner_state.dict[self._key] is self._data()
def bulk_appender(self):
return self._data()._sa_appender
def append_with_event(self, item, initiator=None):
"""Add an entity to the collection, firing mutation events."""
self._data()._sa_appender(item, _sa_initiator=initiator)
def append_without_event(self, item):
"""Add or restore an entity to the collection, firing no events."""
self._data()._sa_appender(item, _sa_initiator=False)
def append_multiple_without_event(self, items):
"""Add or restore an entity to the collection, firing no events."""
appender = self._data()._sa_appender
for item in items:
appender(item, _sa_initiator=False)
def bulk_remover(self):
return self._data()._sa_remover
def remove_with_event(self, item, initiator=None):
"""Remove an entity from the collection, firing mutation events."""
self._data()._sa_remover(item, _sa_initiator=initiator)
def remove_without_event(self, item):
"""Remove an entity from the collection, firing no events."""
self._data()._sa_remover(item, _sa_initiator=False)
def clear_with_event(self, initiator=None):
"""Empty the collection, firing a mutation event for each entity."""
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=initiator)
def clear_without_event(self):
"""Empty the collection, firing no events."""
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=False)
def __iter__(self):
"""Iterate over entities in the collection."""
return iter(self._data()._sa_iterator())
def __len__(self):
"""Count entities in the collection."""
return len(list(self._data()._sa_iterator()))
def __bool__(self):
return True
__nonzero__ = __bool__
def fire_append_event(self, item, initiator=None):
"""Notify that a entity has entered the collection.
Initiator is a token owned by the InstrumentedAttribute that
initiated the membership mutation, and should be left as None
unless you are passing along an initiator value from a chained
operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
return self.attr.fire_append_event(
self.owner_state, self.owner_state.dict, item, initiator
)
else:
return item
def fire_remove_event(self, item, initiator=None):
"""Notify that a entity has been removed from the collection.
Initiator is the InstrumentedAttribute that initiated the membership
mutation, and should be left as None unless you are passing along
an initiator value from a chained operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
self.attr.fire_remove_event(
self.owner_state, self.owner_state.dict, item, initiator
)
def fire_pre_remove_event(self, initiator=None):
"""Notify that an entity is about to be removed from the collection.
Only called if the entity cannot be removed after calling
fire_remove_event().
"""
if self.invalidated:
self._warn_invalidated()
self.attr.fire_pre_remove_event(
self.owner_state, self.owner_state.dict, initiator=initiator
)
def __getstate__(self):
return {
"key": self._key,
"owner_state": self.owner_state,
"owner_cls": self.owner_state.class_,
"data": self.data,
"invalidated": self.invalidated,
}
def __setstate__(self, d):
self._key = d["key"]
self.owner_state = d["owner_state"]
self._data = weakref.ref(d["data"])
self._converter = d["data"]._sa_converter
d["data"]._sa_adapter = self
self.invalidated = d["invalidated"]
self.attr = getattr(d["owner_cls"], self._key).impl
def bulk_replace(values, existing_adapter, new_adapter, initiator=None):
"""Load a new collection, firing events based on prior like membership.
Appends instances in ``values`` onto the ``new_adapter``. Events will be
fired for any instance not present in the ``existing_adapter``. Any
instances in ``existing_adapter`` not present in ``values`` will have
remove events fired upon them.
:param values: An iterable of collection member instances
:param existing_adapter: A :class:`.CollectionAdapter` of
instances to be replaced
:param new_adapter: An empty :class:`.CollectionAdapter`
to load with ``values``
"""
assert isinstance(values, list)
idset = util.IdentitySet
existing_idset = idset(existing_adapter or ())
constants = existing_idset.intersection(values or ())
additions = idset(values or ()).difference(constants)
removals = existing_idset.difference(constants)
appender = new_adapter.bulk_appender()
for member in values or ():
if member in additions:
appender(member, _sa_initiator=initiator)
elif member in constants:
appender(member, _sa_initiator=False)
if existing_adapter:
for member in removals:
existing_adapter.fire_remove_event(member, initiator=initiator)
def prepare_instrumentation(factory):
"""Prepare a callable for future use as a collection class factory.
Given a collection class factory (either a type or no-arg callable),
return another factory that will produce compatible instances when
called.
This function is responsible for converting collection_class=list
into the run-time behavior of collection_class=InstrumentedList.
"""
# Convert a builtin to 'Instrumented*'
if factory in __canned_instrumentation:
factory = __canned_instrumentation[factory]
# Create a specimen
cls = type(factory())
# Did factory callable return a builtin?
if cls in __canned_instrumentation:
# Wrap it so that it returns our 'Instrumented*'
factory = __converting_factory(cls, factory)
cls = factory()
# Instrument the class if needed.
if __instrumentation_mutex.acquire():
try:
if getattr(cls, "_sa_instrumented", None) != id(cls):
_instrument_class(cls)
finally:
__instrumentation_mutex.release()
return factory
def __converting_factory(specimen_cls, original_factory):
"""Return a wrapper that converts a "canned" collection like
set, dict, list into the Instrumented* version.
"""
instrumented_cls = __canned_instrumentation[specimen_cls]
def wrapper():
collection = original_factory()
return instrumented_cls(collection)
# often flawed but better than nothing
wrapper.__name__ = "%sWrapper" % original_factory.__name__
wrapper.__doc__ = original_factory.__doc__
return wrapper
def _instrument_class(cls):
"""Modify methods in a class and install instrumentation."""
# In the normal call flow, a request for any of the 3 basic collection
# types is transformed into one of our trivial subclasses
# (e.g. InstrumentedList). Catch anything else that sneaks in here...
if cls.__module__ == "__builtin__":
raise sa_exc.ArgumentError(
"Can not instrument a built-in type. Use a "
"subclass, even a trivial one."
)
roles, methods = _locate_roles_and_methods(cls)
_setup_canned_roles(cls, roles, methods)
_assert_required_roles(cls, roles, methods)
_set_collection_attributes(cls, roles, methods)
def _locate_roles_and_methods(cls):
"""search for _sa_instrument_role-decorated methods in
method resolution order, assign to roles.
"""
roles = {}
methods = {}
for supercls in cls.__mro__:
for name, method in vars(supercls).items():
if not util.callable(method):
continue
# note role declarations
if hasattr(method, "_sa_instrument_role"):
role = method._sa_instrument_role
assert role in (
"appender",
"remover",
"iterator",
"linker",
"converter",
)
roles.setdefault(role, name)
# transfer instrumentation requests from decorated function
# to the combined queue
before, after = None, None
if hasattr(method, "_sa_instrument_before"):
op, argument = method._sa_instrument_before
assert op in ("fire_append_event", "fire_remove_event")
before = op, argument
if hasattr(method, "_sa_instrument_after"):
op = method._sa_instrument_after
assert op in ("fire_append_event", "fire_remove_event")
after = op
if before:
methods[name] = before + (after,)
elif after:
methods[name] = None, None, after
return roles, methods
def _setup_canned_roles(cls, roles, methods):
"""see if this class has "canned" roles based on a known
collection type (dict, set, list). Apply those roles
as needed to the "roles" dictionary, and also
prepare "decorator" methods
"""
collection_type = util.duck_type_collection(cls)
if collection_type in __interfaces:
canned_roles, decorators = __interfaces[collection_type]
for role, name in canned_roles.items():
roles.setdefault(role, name)
# apply ABC auto-decoration to methods that need it
for method, decorator in decorators.items():
fn = getattr(cls, method, None)
if (
fn
and method not in methods
and not hasattr(fn, "_sa_instrumented")
):
setattr(cls, method, decorator(fn))
def _assert_required_roles(cls, roles, methods):
"""ensure all roles are present, and apply implicit instrumentation if
needed
"""
if "appender" not in roles or not hasattr(cls, roles["appender"]):
raise sa_exc.ArgumentError(
"Type %s must elect an appender method to be "
"a collection class" % cls.__name__
)
elif roles["appender"] not in methods and not hasattr(
getattr(cls, roles["appender"]), "_sa_instrumented"
):
methods[roles["appender"]] = ("fire_append_event", 1, None)
if "remover" not in roles or not hasattr(cls, roles["remover"]):
raise sa_exc.ArgumentError(
"Type %s must elect a remover method to be "
"a collection class" % cls.__name__
)
elif roles["remover"] not in methods and not hasattr(
getattr(cls, roles["remover"]), "_sa_instrumented"
):
methods[roles["remover"]] = ("fire_remove_event", 1, None)
if "iterator" not in roles or not hasattr(cls, roles["iterator"]):
raise sa_exc.ArgumentError(
"Type %s must elect an iterator method to be "
"a collection class" % cls.__name__
)
def _set_collection_attributes(cls, roles, methods):
"""apply ad-hoc instrumentation from decorators, class-level defaults
and implicit role declarations
"""
for method_name, (before, argument, after) in methods.items():
setattr(
cls,
method_name,
_instrument_membership_mutator(
getattr(cls, method_name), before, argument, after
),
)
# intern the role map
for role, method_name in roles.items():
setattr(cls, "_sa_%s" % role, getattr(cls, method_name))
cls._sa_adapter = None
if not hasattr(cls, "_sa_converter"):
cls._sa_converter = None
cls._sa_instrumented = id(cls)
def _instrument_membership_mutator(method, before, argument, after):
"""Route method args and/or return value through the collection
adapter."""
# This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))'
if before:
fn_args = list(
util.flatten_iterator(inspect_getfullargspec(method)[0])
)
if isinstance(argument, int):
pos_arg = argument
named_arg = len(fn_args) > argument and fn_args[argument] or None
else:
if argument in fn_args:
pos_arg = fn_args.index(argument)
else:
pos_arg = None
named_arg = argument
del fn_args
def wrapper(*args, **kw):
if before:
if pos_arg is None:
if named_arg not in kw:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument
)
value = kw[named_arg]
else:
if len(args) > pos_arg:
value = args[pos_arg]
elif named_arg in kw:
value = kw[named_arg]
else:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument
)
initiator = kw.pop("_sa_initiator", None)
if initiator is False:
executor = None
else:
executor = args[0]._sa_adapter
if before and executor:
getattr(executor, before)(value, initiator)
if not after or not executor:
return method(*args, **kw)
else:
res = method(*args, **kw)
if res is not None:
getattr(executor, after)(res, initiator)
return res
wrapper._sa_instrumented = True
if hasattr(method, "_sa_instrument_role"):
wrapper._sa_instrument_role = method._sa_instrument_role
wrapper.__name__ = method.__name__
wrapper.__doc__ = method.__doc__
return wrapper
def __set(collection, item, _sa_initiator=None):
"""Run set events.
This event always occurs before the collection is actually mutated.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
item = executor.fire_append_event(item, _sa_initiator)
return item
def __del(collection, item, _sa_initiator=None):
"""Run del events.
This event occurs before the collection is actually mutated, *except*
in the case of a pop operation, in which case it occurs afterwards.
For pop operations, the __before_pop hook is called before the
operation occurs.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
executor.fire_remove_event(item, _sa_initiator)
def __before_pop(collection, _sa_initiator=None):
"""An event which occurs on a before a pop() operation occurs."""
executor = collection._sa_adapter
if executor:
executor.fire_pre_remove_event(_sa_initiator)
def _list_decorators():
"""Tailored instrumentation wrappers for any list-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(list, fn.__name__).__doc__
def append(fn):
def append(self, item, _sa_initiator=None):
item = __set(self, item, _sa_initiator)
fn(self, item)
_tidy(append)
return append
def remove(fn):
def remove(self, value, _sa_initiator=None):
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__eq__
fn(self, value)
_tidy(remove)
return remove
def insert(fn):
def insert(self, index, value):
value = __set(self, value)
fn(self, index, value)
_tidy(insert)
return insert
def __setitem__(fn):
def __setitem__(self, index, value):
if not isinstance(index, slice):
existing = self[index]
if existing is not None:
__del(self, existing)
value = __set(self, value)
fn(self, index, value)
else:
# slice assignment requires __delitem__, insert, __len__
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
if index.stop is not None:
stop = index.stop
else:
stop = len(self)
if stop < 0:
stop += len(self)
if step == 1:
if value is self:
return
for i in range(start, stop, step):
if len(self) > start:
del self[start]
for i, item in enumerate(value):
self.insert(i + start, item)
else:
rng = list(range(start, stop, step))
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s"
% (len(value), len(rng))
)
for i, item in zip(rng, value):
self.__setitem__(i, item)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, index):
if not isinstance(index, slice):
item = self[index]
__del(self, item)
fn(self, index)
else:
# slice deletion requires __getslice__ and a slice-groking
# __getitem__ for stepped deletion
# note: not breaking this into atomic dels
for item in self[index]:
__del(self, item)
fn(self, index)
_tidy(__delitem__)
return __delitem__
if util.py2k:
def __setslice__(fn):
def __setslice__(self, start, end, values):
for value in self[start:end]:
__del(self, value)
values = [__set(self, value) for value in values]
fn(self, start, end, values)
_tidy(__setslice__)
return __setslice__
def __delslice__(fn):
def __delslice__(self, start, end):
for value in self[start:end]:
__del(self, value)
fn(self, start, end)
_tidy(__delslice__)
return __delslice__
def extend(fn):
def extend(self, iterable):
for value in iterable:
self.append(value)
_tidy(extend)
return extend
def __iadd__(fn):
def __iadd__(self, iterable):
# list.__iadd__ takes any iterable and seems to let TypeError
# raise as-is instead of returning NotImplemented
for value in iterable:
self.append(value)
return self
_tidy(__iadd__)
return __iadd__
def pop(fn):
def pop(self, index=-1):
__before_pop(self)
item = fn(self, index)
__del(self, item)
return item
_tidy(pop)
return pop
if not util.py2k:
def clear(fn):
def clear(self, index=-1):
for item in self:
__del(self, item)
fn(self)
_tidy(clear)
return clear
# __imul__ : not wrapping this. all members of the collection are already
# present, so no need to fire appends... wrapping it with an explicit
# decorator is still possible, so events on *= can be had if they're
# desired. hard to imagine a use case for __imul__, though.
l = locals().copy()
l.pop("_tidy")
return l
def _dict_decorators():
"""Tailored instrumentation wrappers for any dict-like mapping class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(dict, fn.__name__).__doc__
Unspecified = util.symbol("Unspecified")
def __setitem__(fn):
def __setitem__(self, key, value, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator)
value = __set(self, value, _sa_initiator)
fn(self, key, value)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, key, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator)
fn(self, key)
_tidy(__delitem__)
return __delitem__
def clear(fn):
def clear(self):
for key in self:
__del(self, self[key])
fn(self)
_tidy(clear)
return clear
def pop(fn):
def pop(self, key, default=Unspecified):
__before_pop(self)
_to_del = key in self
if default is Unspecified:
item = fn(self, key)
else:
item = fn(self, key, default)
if _to_del:
__del(self, item)
return item
_tidy(pop)
return pop
def popitem(fn):
def popitem(self):
__before_pop(self)
item = fn(self)
__del(self, item[1])
return item
_tidy(popitem)
return popitem
def setdefault(fn):
def setdefault(self, key, default=None):
if key not in self:
self.__setitem__(key, default)
return default
else:
return self.__getitem__(key)
_tidy(setdefault)
return setdefault
def update(fn):
def update(self, __other=Unspecified, **kw):
if __other is not Unspecified:
if hasattr(__other, "keys"):
for key in list(__other):
if key not in self or self[key] is not __other[key]:
self[key] = __other[key]
else:
for key, value in __other:
if key not in self or self[key] is not value:
self[key] = value
for key in kw:
if key not in self or self[key] is not kw[key]:
self[key] = kw[key]
_tidy(update)
return update
l = locals().copy()
l.pop("_tidy")
l.pop("Unspecified")
return l
_set_binop_bases = (set, frozenset)
def _set_binops_check_strict(self, obj):
"""Allow only set, frozenset and self.__class__-derived
objects in binops."""
return isinstance(obj, _set_binop_bases + (self.__class__,))
def _set_binops_check_loose(self, obj):
"""Allow anything set-like to participate in set binops."""
return (
isinstance(obj, _set_binop_bases + (self.__class__,))
or util.duck_type_collection(obj) == set
)
def _set_decorators():
"""Tailored instrumentation wrappers for any set-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(set, fn.__name__).__doc__
Unspecified = util.symbol("Unspecified")
def add(fn):
def add(self, value, _sa_initiator=None):
if value not in self:
value = __set(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(add)
return add
def discard(fn):
def discard(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(discard)
return discard
def remove(fn):
def remove(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(remove)
return remove
def pop(fn):
def pop(self):
__before_pop(self)
item = fn(self)
# for set in particular, we have no way to access the item
# that will be popped before pop is called.
__del(self, item)
return item
_tidy(pop)
return pop
def clear(fn):
def clear(self):
for item in list(self):
self.remove(item)
_tidy(clear)
return clear
def update(fn):
def update(self, value):
for item in value:
self.add(item)
_tidy(update)
return update
def __ior__(fn):
def __ior__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.add(item)
return self
_tidy(__ior__)
return __ior__
def difference_update(fn):
def difference_update(self, value):
for item in value:
self.discard(item)
_tidy(difference_update)
return difference_update
def __isub__(fn):
def __isub__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.discard(item)
return self
_tidy(__isub__)
return __isub__
def intersection_update(fn):
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(intersection_update)
return intersection_update
def __iand__(fn):
def __iand__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__iand__)
return __iand__
def symmetric_difference_update(fn):
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(symmetric_difference_update)
return symmetric_difference_update
def __ixor__(fn):
def __ixor__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__ixor__)
return __ixor__
l = locals().copy()
l.pop("_tidy")
l.pop("Unspecified")
return l
class InstrumentedList(list):
"""An instrumented version of the built-in list."""
class InstrumentedSet(set):
"""An instrumented version of the built-in set."""
class InstrumentedDict(dict):
"""An instrumented version of the built-in dict."""
__canned_instrumentation = {
list: InstrumentedList,
set: InstrumentedSet,
dict: InstrumentedDict,
}
__interfaces = {
list: (
{"appender": "append", "remover": "remove", "iterator": "__iter__"},
_list_decorators(),
),
set: (
{"appender": "add", "remover": "remove", "iterator": "__iter__"},
_set_decorators(),
),
# decorators are required for dicts and object collections.
dict: ({"iterator": "values"}, _dict_decorators())
if util.py3k
else ({"iterator": "itervalues"}, _dict_decorators()),
}
class MappedCollection(dict):
"""A basic dictionary-based collection class.
Extends dict with the minimal bag semantics that collection
classes require. ``set`` and ``remove`` are implemented in terms
of a keying function: any callable that takes an object and
returns an object for use as a dictionary key.
"""
def __init__(self, keyfunc):
"""Create a new collection with keying provided by keyfunc.
keyfunc may be any callable that takes an object and returns an object
for use as a dictionary key.
The keyfunc will be called every time the ORM needs to add a member by
value-only (such as when loading instances from the database) or
remove a member. The usual cautions about dictionary keying apply-
``keyfunc(object)`` should return the same output for the life of the
collection. Keying based on mutable properties can result in
unreachable instances "lost" in the collection.
"""
self.keyfunc = keyfunc
@collection.appender
@collection.internally_instrumented
def set(self, value, _sa_initiator=None):
"""Add an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
self.__setitem__(key, value, _sa_initiator)
@collection.remover
@collection.internally_instrumented
def remove(self, value, _sa_initiator=None):
"""Remove an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
# Let self[key] raise if key is not in this collection
# testlib.pragma exempt:__ne__
if self[key] != value:
raise sa_exc.InvalidRequestError(
"Can not remove '%s': collection holds '%s' for key '%s'. "
"Possible cause: is the MappedCollection key function "
"based on mutable properties or properties that only obtain "
"values after flush?" % (value, self[key], key)
)
self.__delitem__(key, _sa_initiator)
# ensure instrumentation is associated with
# these built-in classes; if a user-defined class
# subclasses these and uses @internally_instrumented,
# the superclass is otherwise not instrumented.
# see [ticket:2406].
_instrument_class(MappedCollection)
_instrument_class(InstrumentedList)
_instrument_class(InstrumentedSet)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/dependency.py
|
# orm/dependency.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Relationship dependencies.
"""
from . import attributes
from . import exc
from . import sync
from . import unitofwork
from . import util as mapperutil
from .interfaces import MANYTOMANY
from .interfaces import MANYTOONE
from .interfaces import ONETOMANY
from .. import exc as sa_exc
from .. import sql
from .. import util
class DependencyProcessor(object):
def __init__(self, prop):
self.prop = prop
self.cascade = prop.cascade
self.mapper = prop.mapper
self.parent = prop.parent
self.secondary = prop.secondary
self.direction = prop.direction
self.post_update = prop.post_update
self.passive_deletes = prop.passive_deletes
self.passive_updates = prop.passive_updates
self.enable_typechecks = prop.enable_typechecks
if self.passive_deletes:
self._passive_delete_flag = attributes.PASSIVE_NO_INITIALIZE
else:
self._passive_delete_flag = attributes.PASSIVE_OFF
if self.passive_updates:
self._passive_update_flag = attributes.PASSIVE_NO_INITIALIZE
else:
self._passive_update_flag = attributes.PASSIVE_OFF
self.key = prop.key
if not self.prop.synchronize_pairs:
raise sa_exc.ArgumentError(
"Can't build a DependencyProcessor for relationship %s. "
"No target attributes to populate between parent and "
"child are present" % self.prop
)
@classmethod
def from_relationship(cls, prop):
return _direction_to_processor[prop.direction](prop)
def hasparent(self, state):
"""return True if the given object instance has a parent,
according to the ``InstrumentedAttribute`` handled by this
``DependencyProcessor``.
"""
return self.parent.class_manager.get_impl(self.key).hasparent(state)
def per_property_preprocessors(self, uow):
"""establish actions and dependencies related to a flush.
These actions will operate on all relevant states in
the aggregate.
"""
uow.register_preprocessor(self, True)
def per_property_flush_actions(self, uow):
after_save = unitofwork.ProcessAll(uow, self, False, True)
before_delete = unitofwork.ProcessAll(uow, self, True, True)
parent_saves = unitofwork.SaveUpdateAll(
uow, self.parent.primary_base_mapper
)
child_saves = unitofwork.SaveUpdateAll(
uow, self.mapper.primary_base_mapper
)
parent_deletes = unitofwork.DeleteAll(
uow, self.parent.primary_base_mapper
)
child_deletes = unitofwork.DeleteAll(
uow, self.mapper.primary_base_mapper
)
self.per_property_dependencies(
uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete,
)
def per_state_flush_actions(self, uow, states, isdelete):
"""establish actions and dependencies related to a flush.
These actions will operate on all relevant states
individually. This occurs only if there are cycles
in the 'aggregated' version of events.
"""
child_base_mapper = self.mapper.primary_base_mapper
child_saves = unitofwork.SaveUpdateAll(uow, child_base_mapper)
child_deletes = unitofwork.DeleteAll(uow, child_base_mapper)
# locate and disable the aggregate processors
# for this dependency
if isdelete:
before_delete = unitofwork.ProcessAll(uow, self, True, True)
before_delete.disabled = True
else:
after_save = unitofwork.ProcessAll(uow, self, False, True)
after_save.disabled = True
# check if the "child" side is part of the cycle
if child_saves not in uow.cycles:
# based on the current dependencies we use, the saves/
# deletes should always be in the 'cycles' collection
# together. if this changes, we will have to break up
# this method a bit more.
assert child_deletes not in uow.cycles
# child side is not part of the cycle, so we will link per-state
# actions to the aggregate "saves", "deletes" actions
child_actions = [(child_saves, False), (child_deletes, True)]
child_in_cycles = False
else:
child_in_cycles = True
# check if the "parent" side is part of the cycle
if not isdelete:
parent_saves = unitofwork.SaveUpdateAll(
uow, self.parent.base_mapper
)
parent_deletes = before_delete = None
if parent_saves in uow.cycles:
parent_in_cycles = True
else:
parent_deletes = unitofwork.DeleteAll(uow, self.parent.base_mapper)
parent_saves = after_save = None
if parent_deletes in uow.cycles:
parent_in_cycles = True
# now create actions /dependencies for each state.
for state in states:
# detect if there's anything changed or loaded
# by a preprocessor on this state/attribute. In the
# case of deletes we may try to load missing items here as well.
sum_ = state.manager[self.key].impl.get_all_pending(
state,
state.dict,
self._passive_delete_flag
if isdelete
else attributes.PASSIVE_NO_INITIALIZE,
)
if not sum_:
continue
if isdelete:
before_delete = unitofwork.ProcessState(uow, self, True, state)
if parent_in_cycles:
parent_deletes = unitofwork.DeleteState(uow, state)
else:
after_save = unitofwork.ProcessState(uow, self, False, state)
if parent_in_cycles:
parent_saves = unitofwork.SaveUpdateState(uow, state)
if child_in_cycles:
child_actions = []
for child_state, child in sum_:
if child_state not in uow.states:
child_action = (None, None)
else:
(deleted, listonly) = uow.states[child_state]
if deleted:
child_action = (
unitofwork.DeleteState(uow, child_state),
True,
)
else:
child_action = (
unitofwork.SaveUpdateState(uow, child_state),
False,
)
child_actions.append(child_action)
# establish dependencies between our possibly per-state
# parent action and our possibly per-state child action.
for child_action, childisdelete in child_actions:
self.per_state_dependencies(
uow,
parent_saves,
parent_deletes,
child_action,
after_save,
before_delete,
isdelete,
childisdelete,
)
def presort_deletes(self, uowcommit, states):
return False
def presort_saves(self, uowcommit, states):
return False
def process_deletes(self, uowcommit, states):
pass
def process_saves(self, uowcommit, states):
pass
def prop_has_changes(self, uowcommit, states, isdelete):
if not isdelete or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
elif self.direction is MANYTOONE:
passive = attributes.PASSIVE_NO_FETCH_RELATED
else:
passive = attributes.PASSIVE_OFF
for s in states:
# TODO: add a high speed method
# to InstanceState which returns: attribute
# has a non-None value, or had one
history = uowcommit.get_attribute_history(s, self.key, passive)
if history and not history.empty():
return True
else:
return (
states
and not self.prop._is_self_referential
and self.mapper in uowcommit.mappers
)
def _verify_canload(self, state):
if self.prop.uselist and state is None:
raise exc.FlushError(
"Can't flush None value found in "
"collection %s" % (self.prop,)
)
elif state is not None and not self.mapper._canload(
state, allow_subtypes=not self.enable_typechecks
):
if self.mapper._canload(state, allow_subtypes=True):
raise exc.FlushError(
"Attempting to flush an item of type "
"%(x)s as a member of collection "
'"%(y)s". Expected an object of type '
"%(z)s or a polymorphic subclass of "
"this type. If %(x)s is a subclass of "
'%(z)s, configure mapper "%(zm)s" to '
"load this subtype polymorphically, or "
"set enable_typechecks=False to allow "
"any subtype to be accepted for flush. "
% {
"x": state.class_,
"y": self.prop,
"z": self.mapper.class_,
"zm": self.mapper,
}
)
else:
raise exc.FlushError(
"Attempting to flush an item of type "
"%(x)s as a member of collection "
'"%(y)s". Expected an object of type '
"%(z)s or a polymorphic subclass of "
"this type."
% {
"x": state.class_,
"y": self.prop,
"z": self.mapper.class_,
}
)
def _synchronize(self, state, child, associationrow, clearkeys, uowcommit):
raise NotImplementedError()
def _get_reversed_processed_set(self, uow):
if not self.prop._reverse_property:
return None
process_key = tuple(
sorted([self.key] + [p.key for p in self.prop._reverse_property])
)
return uow.memo(("reverse_key", process_key), set)
def _post_update(self, state, uowcommit, related, is_m2o_delete=False):
for x in related:
if not is_m2o_delete or x is not None:
uowcommit.register_post_update(
state, [r for l, r in self.prop.synchronize_pairs]
)
break
def _pks_changed(self, uowcommit, state):
raise NotImplementedError()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.prop)
class OneToManyDP(DependencyProcessor):
def per_property_dependencies(
self,
uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete,
):
if self.post_update:
child_post_updates = unitofwork.PostUpdateAll(
uow, self.mapper.primary_base_mapper, False
)
child_pre_updates = unitofwork.PostUpdateAll(
uow, self.mapper.primary_base_mapper, True
)
uow.dependencies.update(
[
(child_saves, after_save),
(parent_saves, after_save),
(after_save, child_post_updates),
(before_delete, child_pre_updates),
(child_pre_updates, parent_deletes),
(child_pre_updates, child_deletes),
]
)
else:
uow.dependencies.update(
[
(parent_saves, after_save),
(after_save, child_saves),
(after_save, child_deletes),
(child_saves, parent_deletes),
(child_deletes, parent_deletes),
(before_delete, child_saves),
(before_delete, child_deletes),
]
)
def per_state_dependencies(
self,
uow,
save_parent,
delete_parent,
child_action,
after_save,
before_delete,
isdelete,
childisdelete,
):
if self.post_update:
child_post_updates = unitofwork.PostUpdateAll(
uow, self.mapper.primary_base_mapper, False
)
child_pre_updates = unitofwork.PostUpdateAll(
uow, self.mapper.primary_base_mapper, True
)
# TODO: this whole block is not covered
# by any tests
if not isdelete:
if childisdelete:
uow.dependencies.update(
[
(child_action, after_save),
(after_save, child_post_updates),
]
)
else:
uow.dependencies.update(
[
(save_parent, after_save),
(child_action, after_save),
(after_save, child_post_updates),
]
)
else:
if childisdelete:
uow.dependencies.update(
[
(before_delete, child_pre_updates),
(child_pre_updates, delete_parent),
]
)
else:
uow.dependencies.update(
[
(before_delete, child_pre_updates),
(child_pre_updates, delete_parent),
]
)
elif not isdelete:
uow.dependencies.update(
[
(save_parent, after_save),
(after_save, child_action),
(save_parent, child_action),
]
)
else:
uow.dependencies.update(
[(before_delete, child_action), (child_action, delete_parent)]
)
def presort_deletes(self, uowcommit, states):
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their
# foreign key to the parent set to NULL
should_null_fks = (
not self.cascade.delete and not self.passive_deletes == "all"
)
for state in states:
history = uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
if history:
for child in history.deleted:
if child is not None and self.hasparent(child) is False:
if self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=True)
else:
uowcommit.register_object(child)
if should_null_fks:
for child in history.unchanged:
if child is not None:
uowcommit.register_object(
child, operation="delete", prop=self.prop
)
def presort_saves(self, uowcommit, states):
children_added = uowcommit.memo(("children_added", self), set)
should_null_fks = (
not self.cascade.delete_orphan
and not self.passive_deletes == "all"
)
for state in states:
pks_changed = self._pks_changed(uowcommit, state)
if not pks_changed or self.passive_updates:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
history = uowcommit.get_attribute_history(state, self.key, passive)
if history:
for child in history.added:
if child is not None:
uowcommit.register_object(
child,
cancel_delete=True,
operation="add",
prop=self.prop,
)
children_added.update(history.added)
for child in history.deleted:
if not self.cascade.delete_orphan:
if should_null_fks:
uowcommit.register_object(
child,
isdelete=False,
operation="delete",
prop=self.prop,
)
elif self.hasparent(child) is False:
uowcommit.register_object(
child,
isdelete=True,
operation="delete",
prop=self.prop,
)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
"delete", child
):
uowcommit.register_object(st_, isdelete=True)
if pks_changed:
if history:
for child in history.unchanged:
if child is not None:
uowcommit.register_object(
child,
False,
self.passive_updates,
operation="pk change",
prop=self.prop,
)
def process_deletes(self, uowcommit, states):
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their foreign
# key to the parent set to NULL this phase can be called
# safely for any cascade but is unnecessary if delete cascade
# is on.
if self.post_update or not self.passive_deletes == "all":
children_added = uowcommit.memo(("children_added", self), set)
for state in states:
history = uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
if history:
for child in history.deleted:
if (
child is not None
and self.hasparent(child) is False
):
self._synchronize(
state, child, None, True, uowcommit, False
)
if self.post_update and child:
self._post_update(child, uowcommit, [state])
if self.post_update or not self.cascade.delete:
for child in set(history.unchanged).difference(
children_added
):
if child is not None:
self._synchronize(
state, child, None, True, uowcommit, False
)
if self.post_update and child:
self._post_update(
child, uowcommit, [state]
)
# technically, we can even remove each child from the
# collection here too. but this would be a somewhat
# inconsistent behavior since it wouldn't happen
# if the old parent wasn't deleted but child was moved.
def process_saves(self, uowcommit, states):
should_null_fks = (
not self.cascade.delete_orphan
and not self.passive_deletes == "all"
)
for state in states:
history = uowcommit.get_attribute_history(
state, self.key, attributes.PASSIVE_NO_INITIALIZE
)
if history:
for child in history.added:
self._synchronize(
state, child, None, False, uowcommit, False
)
if child is not None and self.post_update:
self._post_update(child, uowcommit, [state])
for child in history.deleted:
if (
should_null_fks
and not self.cascade.delete_orphan
and not self.hasparent(child)
):
self._synchronize(
state, child, None, True, uowcommit, False
)
if self._pks_changed(uowcommit, state):
for child in history.unchanged:
self._synchronize(
state, child, None, False, uowcommit, True
)
def _synchronize(
self, state, child, associationrow, clearkeys, uowcommit, pks_changed
):
source = state
dest = child
self._verify_canload(child)
if dest is None or (
not self.post_update and uowcommit.is_deleted(dest)
):
return
if clearkeys:
sync.clear(dest, self.mapper, self.prop.synchronize_pairs)
else:
sync.populate(
source,
self.parent,
dest,
self.mapper,
self.prop.synchronize_pairs,
uowcommit,
self.passive_updates and pks_changed,
)
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
uowcommit, state, self.parent, self.prop.synchronize_pairs
)
class ManyToOneDP(DependencyProcessor):
def __init__(self, prop):
DependencyProcessor.__init__(self, prop)
for mapper in self.mapper.self_and_descendants:
mapper._dependency_processors.append(DetectKeySwitch(prop))
def per_property_dependencies(
self,
uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete,
):
if self.post_update:
parent_post_updates = unitofwork.PostUpdateAll(
uow, self.parent.primary_base_mapper, False
)
parent_pre_updates = unitofwork.PostUpdateAll(
uow, self.parent.primary_base_mapper, True
)
uow.dependencies.update(
[
(child_saves, after_save),
(parent_saves, after_save),
(after_save, parent_post_updates),
(after_save, parent_pre_updates),
(before_delete, parent_pre_updates),
(parent_pre_updates, child_deletes),
(parent_pre_updates, parent_deletes),
]
)
else:
uow.dependencies.update(
[
(child_saves, after_save),
(after_save, parent_saves),
(parent_saves, child_deletes),
(parent_deletes, child_deletes),
]
)
def per_state_dependencies(
self,
uow,
save_parent,
delete_parent,
child_action,
after_save,
before_delete,
isdelete,
childisdelete,
):
if self.post_update:
if not isdelete:
parent_post_updates = unitofwork.PostUpdateAll(
uow, self.parent.primary_base_mapper, False
)
if childisdelete:
uow.dependencies.update(
[
(after_save, parent_post_updates),
(parent_post_updates, child_action),
]
)
else:
uow.dependencies.update(
[
(save_parent, after_save),
(child_action, after_save),
(after_save, parent_post_updates),
]
)
else:
parent_pre_updates = unitofwork.PostUpdateAll(
uow, self.parent.primary_base_mapper, True
)
uow.dependencies.update(
[
(before_delete, parent_pre_updates),
(parent_pre_updates, delete_parent),
(parent_pre_updates, child_action),
]
)
elif not isdelete:
if not childisdelete:
uow.dependencies.update(
[(child_action, after_save), (after_save, save_parent)]
)
else:
uow.dependencies.update([(after_save, save_parent)])
else:
if childisdelete:
uow.dependencies.update([(delete_parent, child_action)])
def presort_deletes(self, uowcommit, states):
if self.cascade.delete or self.cascade.delete_orphan:
for state in states:
history = uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
if history:
if self.cascade.delete_orphan:
todelete = history.sum()
else:
todelete = history.non_deleted()
for child in todelete:
if child is None:
continue
uowcommit.register_object(
child,
isdelete=True,
operation="delete",
prop=self.prop,
)
t = self.mapper.cascade_iterator("delete", child)
for c, m, st_, dct_ in t:
uowcommit.register_object(st_, isdelete=True)
def presort_saves(self, uowcommit, states):
for state in states:
uowcommit.register_object(state, operation="add", prop=self.prop)
if self.cascade.delete_orphan:
history = uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(
child,
isdelete=True,
operation="delete",
prop=self.prop,
)
t = self.mapper.cascade_iterator("delete", child)
for c, m, st_, dct_ in t:
uowcommit.register_object(st_, isdelete=True)
def process_deletes(self, uowcommit, states):
if (
self.post_update
and not self.cascade.delete_orphan
and not self.passive_deletes == "all"
):
# post_update means we have to update our
# row to not reference the child object
# before we can DELETE the row
for state in states:
self._synchronize(state, None, None, True, uowcommit)
if state and self.post_update:
history = uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
if history:
self._post_update(
state, uowcommit, history.sum(), is_m2o_delete=True
)
def process_saves(self, uowcommit, states):
for state in states:
history = uowcommit.get_attribute_history(
state, self.key, attributes.PASSIVE_NO_INITIALIZE
)
if history:
if history.added:
for child in history.added:
self._synchronize(
state, child, None, False, uowcommit, "add"
)
elif history.deleted:
self._synchronize(
state, None, None, True, uowcommit, "delete"
)
if self.post_update:
self._post_update(state, uowcommit, history.sum())
def _synchronize(
self,
state,
child,
associationrow,
clearkeys,
uowcommit,
operation=None,
):
if state is None or (
not self.post_update and uowcommit.is_deleted(state)
):
return
if (
operation is not None
and child is not None
and not uowcommit.session._contains_state(child)
):
util.warn(
"Object of type %s not in session, %s "
"operation along '%s' won't proceed"
% (mapperutil.state_class_str(child), operation, self.prop)
)
return
if clearkeys or child is None:
sync.clear(state, self.parent, self.prop.synchronize_pairs)
else:
self._verify_canload(child)
sync.populate(
child,
self.mapper,
state,
self.parent,
self.prop.synchronize_pairs,
uowcommit,
False,
)
class DetectKeySwitch(DependencyProcessor):
"""For many-to-one relationships with no one-to-many backref,
searches for parents through the unit of work when a primary
key has changed and updates them.
Theoretically, this approach could be expanded to support transparent
deletion of objects referenced via many-to-one as well, although
the current attribute system doesn't do enough bookkeeping for this
to be efficient.
"""
def per_property_preprocessors(self, uow):
if self.prop._reverse_property:
if self.passive_updates:
return
else:
if False in (
prop.passive_updates
for prop in self.prop._reverse_property
):
return
uow.register_preprocessor(self, False)
def per_property_flush_actions(self, uow):
parent_saves = unitofwork.SaveUpdateAll(uow, self.parent.base_mapper)
after_save = unitofwork.ProcessAll(uow, self, False, False)
uow.dependencies.update([(parent_saves, after_save)])
def per_state_flush_actions(self, uow, states, isdelete):
pass
def presort_deletes(self, uowcommit, states):
pass
def presort_saves(self, uow, states):
if not self.passive_updates:
# for non-passive updates, register in the preprocess stage
# so that mapper save_obj() gets a hold of changes
self._process_key_switches(states, uow)
def prop_has_changes(self, uow, states, isdelete):
if not isdelete and self.passive_updates:
d = self._key_switchers(uow, states)
return bool(d)
return False
def process_deletes(self, uowcommit, states):
assert False
def process_saves(self, uowcommit, states):
# for passive updates, register objects in the process stage
# so that we avoid ManyToOneDP's registering the object without
# the listonly flag in its own preprocess stage (results in UPDATE)
# statements being emitted
assert self.passive_updates
self._process_key_switches(states, uowcommit)
def _key_switchers(self, uow, states):
switched, notswitched = uow.memo(
("pk_switchers", self), lambda: (set(), set())
)
allstates = switched.union(notswitched)
for s in states:
if s not in allstates:
if self._pks_changed(uow, s):
switched.add(s)
else:
notswitched.add(s)
return switched
def _process_key_switches(self, deplist, uowcommit):
switchers = self._key_switchers(uowcommit, deplist)
if switchers:
# if primary key values have actually changed somewhere, perform
# a linear search through the UOW in search of a parent.
for state in uowcommit.session.identity_map.all_states():
if not issubclass(state.class_, self.parent.class_):
continue
dict_ = state.dict
related = state.get_impl(self.key).get(
state, dict_, passive=self._passive_update_flag
)
if (
related is not attributes.PASSIVE_NO_RESULT
and related is not None
):
if self.prop.uselist:
if not related:
continue
related_obj = related[0]
else:
related_obj = related
related_state = attributes.instance_state(related_obj)
if related_state in switchers:
uowcommit.register_object(
state, False, self.passive_updates
)
sync.populate(
related_state,
self.mapper,
state,
self.parent,
self.prop.synchronize_pairs,
uowcommit,
self.passive_updates,
)
def _pks_changed(self, uowcommit, state):
return bool(state.key) and sync.source_modified(
uowcommit, state, self.mapper, self.prop.synchronize_pairs
)
class ManyToManyDP(DependencyProcessor):
def per_property_dependencies(
self,
uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete,
):
uow.dependencies.update(
[
(parent_saves, after_save),
(child_saves, after_save),
(after_save, child_deletes),
# a rowswitch on the parent from deleted to saved
# can make this one occur, as the "save" may remove
# an element from the
# "deleted" list before we have a chance to
# process its child rows
(before_delete, parent_saves),
(before_delete, parent_deletes),
(before_delete, child_deletes),
(before_delete, child_saves),
]
)
def per_state_dependencies(
self,
uow,
save_parent,
delete_parent,
child_action,
after_save,
before_delete,
isdelete,
childisdelete,
):
if not isdelete:
if childisdelete:
uow.dependencies.update(
[(save_parent, after_save), (after_save, child_action)]
)
else:
uow.dependencies.update(
[(save_parent, after_save), (child_action, after_save)]
)
else:
uow.dependencies.update(
[(before_delete, child_action), (before_delete, delete_parent)]
)
def presort_deletes(self, uowcommit, states):
# TODO: no tests fail if this whole
# thing is removed !!!!
if not self.passive_deletes:
# if no passive deletes, load history on
# the collection, so that prop_has_changes()
# returns True
for state in states:
uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
def presort_saves(self, uowcommit, states):
if not self.passive_updates:
# if no passive updates, load history on
# each collection where parent has changed PK,
# so that prop_has_changes() returns True
for state in states:
if self._pks_changed(uowcommit, state):
history = uowcommit.get_attribute_history(
state, self.key, attributes.PASSIVE_OFF
)
if not self.cascade.delete_orphan:
return
# check for child items removed from the collection
# if delete_orphan check is turned on.
for state in states:
history = uowcommit.get_attribute_history(
state, self.key, attributes.PASSIVE_NO_INITIALIZE
)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(
child,
isdelete=True,
operation="delete",
prop=self.prop,
)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
"delete", child
):
uowcommit.register_object(st_, isdelete=True)
def process_deletes(self, uowcommit, states):
secondary_delete = []
secondary_insert = []
secondary_update = []
processed = self._get_reversed_processed_set(uowcommit)
tmp = set()
for state in states:
# this history should be cached already, as
# we loaded it in preprocess_deletes
history = uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
if history:
for child in history.non_added():
if child is None or (
processed is not None and (state, child) in processed
):
continue
associationrow = {}
if not self._synchronize(
state,
child,
associationrow,
False,
uowcommit,
"delete",
):
continue
secondary_delete.append(associationrow)
tmp.update((c, state) for c in history.non_added())
if processed is not None:
processed.update(tmp)
self._run_crud(
uowcommit, secondary_insert, secondary_update, secondary_delete
)
def process_saves(self, uowcommit, states):
secondary_delete = []
secondary_insert = []
secondary_update = []
processed = self._get_reversed_processed_set(uowcommit)
tmp = set()
for state in states:
need_cascade_pks = not self.passive_updates and self._pks_changed(
uowcommit, state
)
if need_cascade_pks:
passive = attributes.PASSIVE_OFF
else:
passive = attributes.PASSIVE_NO_INITIALIZE
history = uowcommit.get_attribute_history(state, self.key, passive)
if history:
for child in history.added:
if processed is not None and (state, child) in processed:
continue
associationrow = {}
if not self._synchronize(
state, child, associationrow, False, uowcommit, "add"
):
continue
secondary_insert.append(associationrow)
for child in history.deleted:
if processed is not None and (state, child) in processed:
continue
associationrow = {}
if not self._synchronize(
state,
child,
associationrow,
False,
uowcommit,
"delete",
):
continue
secondary_delete.append(associationrow)
tmp.update((c, state) for c in history.added + history.deleted)
if need_cascade_pks:
for child in history.unchanged:
associationrow = {}
sync.update(
state,
self.parent,
associationrow,
"old_",
self.prop.synchronize_pairs,
)
sync.update(
child,
self.mapper,
associationrow,
"old_",
self.prop.secondary_synchronize_pairs,
)
secondary_update.append(associationrow)
if processed is not None:
processed.update(tmp)
self._run_crud(
uowcommit, secondary_insert, secondary_update, secondary_delete
)
def _run_crud(
self, uowcommit, secondary_insert, secondary_update, secondary_delete
):
connection = uowcommit.transaction.connection(self.mapper)
if secondary_delete:
associationrow = secondary_delete[0]
statement = self.secondary.delete(
sql.and_(
*[
c == sql.bindparam(c.key, type_=c.type)
for c in self.secondary.c
if c.key in associationrow
]
)
)
result = connection.execute(statement, secondary_delete)
if (
result.supports_sane_multi_rowcount()
) and result.rowcount != len(secondary_delete):
raise exc.StaleDataError(
"DELETE statement on table '%s' expected to delete "
"%d row(s); Only %d were matched."
% (
self.secondary.description,
len(secondary_delete),
result.rowcount,
)
)
if secondary_update:
associationrow = secondary_update[0]
statement = self.secondary.update(
sql.and_(
*[
c == sql.bindparam("old_" + c.key, type_=c.type)
for c in self.secondary.c
if c.key in associationrow
]
)
)
result = connection.execute(statement, secondary_update)
if (
result.supports_sane_multi_rowcount()
) and result.rowcount != len(secondary_update):
raise exc.StaleDataError(
"UPDATE statement on table '%s' expected to update "
"%d row(s); Only %d were matched."
% (
self.secondary.description,
len(secondary_update),
result.rowcount,
)
)
if secondary_insert:
statement = self.secondary.insert()
connection.execute(statement, secondary_insert)
def _synchronize(
self, state, child, associationrow, clearkeys, uowcommit, operation
):
# this checks for None if uselist=True
self._verify_canload(child)
# but if uselist=False we get here. If child is None,
# no association row can be generated, so return.
if child is None:
return False
if child is not None and not uowcommit.session._contains_state(child):
if not child.deleted:
util.warn(
"Object of type %s not in session, %s "
"operation along '%s' won't proceed"
% (mapperutil.state_class_str(child), operation, self.prop)
)
return False
sync.populate_dict(
state, self.parent, associationrow, self.prop.synchronize_pairs
)
sync.populate_dict(
child,
self.mapper,
associationrow,
self.prop.secondary_synchronize_pairs,
)
return True
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
uowcommit, state, self.parent, self.prop.synchronize_pairs
)
_direction_to_processor = {
ONETOMANY: OneToManyDP,
MANYTOONE: ManyToOneDP,
MANYTOMANY: ManyToManyDP,
}
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/deprecated_interfaces.py
|
# orm/deprecated_interfaces.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .interfaces import EXT_CONTINUE
from .. import event
from .. import util
@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces")
class MapperExtension(object):
"""Base implementation for :class:`_orm.Mapper` event hooks.
.. deprecated:: 0.7
:class:`.MapperExtension` is deprecated and will be removed in a future
release. Please refer to :func:`.event.listen` in conjunction with
the :class:`.MapperEvents` listener interface.
New extension classes subclass :class:`.MapperExtension` and are specified
using the ``extension`` mapper() argument, which is a single
:class:`.MapperExtension` or a list of such::
from sqlalchemy.orm.interfaces import MapperExtension
class MyExtension(MapperExtension):
def before_insert(self, mapper, connection, instance):
print "instance %s before insert !" % instance
m = mapper(User, users_table, extension=MyExtension())
A single mapper can maintain a chain of ``MapperExtension``
objects. When a particular mapping event occurs, the
corresponding method on each ``MapperExtension`` is invoked
serially, and each method has the ability to halt the chain
from proceeding further::
m = mapper(User, users_table, extension=[ext1, ext2, ext3])
Each ``MapperExtension`` method returns the symbol
EXT_CONTINUE by default. This symbol generally means "move
to the next ``MapperExtension`` for processing". For methods
that return objects like translated rows or new object
instances, EXT_CONTINUE means the result of the method
should be ignored. In some cases it's required for a
default mapper activity to be performed, such as adding a
new instance to a result list.
The symbol EXT_STOP has significance within a chain
of ``MapperExtension`` objects that the chain will be stopped
when this symbol is returned. Like EXT_CONTINUE, it also
has additional significance in some cases that a default
mapper activity will not be performed.
"""
@classmethod
def _adapt_instrument_class(cls, self, listener):
cls._adapt_listener_methods(self, listener, ("instrument_class",))
@classmethod
def _adapt_listener(cls, self, listener):
cls._adapt_listener_methods(
self,
listener,
(
"init_instance",
"init_failed",
"reconstruct_instance",
"before_insert",
"after_insert",
"before_update",
"after_update",
"before_delete",
"after_delete",
),
)
@classmethod
def _adapt_listener_methods(cls, self, listener, methods):
for meth in methods:
me_meth = getattr(MapperExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
util.warn_deprecated(
"MapperExtension.%s is deprecated. The "
"MapperExtension class will be removed in a future "
"release. Please transition to the @event interface, "
"using @event.listens_for(mapped_class, '%s')."
% (meth, meth)
)
if meth == "reconstruct_instance":
def go(ls_meth):
def reconstruct(instance, ctx):
ls_meth(self, instance)
return reconstruct
event.listen(
self.class_manager,
"load",
go(ls_meth),
raw=False,
propagate=True,
)
elif meth == "init_instance":
def go(ls_meth):
def init_instance(instance, args, kwargs):
ls_meth(
self,
self.class_,
self.class_manager.original_init,
instance,
args,
kwargs,
)
return init_instance
event.listen(
self.class_manager,
"init",
go(ls_meth),
raw=False,
propagate=True,
)
elif meth == "init_failed":
def go(ls_meth):
def init_failed(instance, args, kwargs):
util.warn_exception(
ls_meth,
self,
self.class_,
self.class_manager.original_init,
instance,
args,
kwargs,
)
return init_failed
event.listen(
self.class_manager,
"init_failure",
go(ls_meth),
raw=False,
propagate=True,
)
else:
event.listen(
self,
"%s" % meth,
ls_meth,
raw=False,
retval=True,
propagate=True,
)
def instrument_class(self, mapper, class_):
"""Receive a class when the mapper is first constructed, and has
applied instrumentation to the mapped class.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when its constructor is called.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_failed(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when its constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def reconstruct_instance(self, mapper, instance):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_insert(self, mapper, connection, instance):
"""Receive an object instance before that instance is inserted
into its table.
This is a good place to set up primary key values and such
that aren't handled otherwise.
Column-based attributes can be modified within this method
which will result in the new value being inserted. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_insert(self, mapper, connection, instance):
"""Receive an object instance after that instance is inserted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_update(self, mapper, connection, instance):
"""Receive an object instance before that instance is updated.
Note that this method is called for all instances that are marked as
"dirty", even those which have no net changes to their column-based
attributes. An object is marked as dirty when any of its column-based
attributes have a "set attribute" operation called or when any of its
collections are modified. If, at update time, no column-based
attributes have any net changes, no UPDATE statement will be issued.
This means that an instance being sent to before_update is *not* a
guarantee that an UPDATE statement will be issued (although you can
affect the outcome here).
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
Column-based attributes can be modified within this method
which will result in the new value being updated. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_update(self, mapper, connection, instance):
"""Receive an object instance after that instance is updated.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_delete(self, mapper, connection, instance):
"""Receive an object instance before that instance is deleted.
Note that *no* changes to the overall flush plan can be made
here; and manipulation of the ``Session`` will not have the
desired effect. To manipulate the ``Session`` within an
extension, use ``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
"""Receive an object instance after that instance is deleted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces")
class SessionExtension(object):
"""Base implementation for :class:`.Session` event hooks.
.. deprecated:: 0.7
:class:`.SessionExtension` is deprecated and will be removed in a future
release. Please refer to :func:`.event.listen` in conjunction with
the :class:`.SessionEvents` listener interface.
Subclasses may be installed into a :class:`.Session` (or
:class:`.sessionmaker`) using the ``extension`` keyword
argument::
from sqlalchemy.orm.interfaces import SessionExtension
class MySessionExtension(SessionExtension):
def before_commit(self, session):
print "before commit!"
Session = sessionmaker(extension=MySessionExtension())
The same :class:`.SessionExtension` instance can be used
with any number of sessions.
"""
@classmethod
def _adapt_listener(cls, self, listener):
for meth in [
"before_commit",
"after_commit",
"after_rollback",
"before_flush",
"after_flush",
"after_flush_postexec",
"after_begin",
"after_attach",
"after_bulk_update",
"after_bulk_delete",
]:
me_meth = getattr(SessionExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
util.warn_deprecated(
"SessionExtension.%s is deprecated. The "
"SessionExtension class will be removed in a future "
"release. Please transition to the @event interface, "
"using @event.listens_for(Session, '%s')." % (meth, meth)
)
event.listen(self, meth, getattr(listener, meth))
def before_commit(self, session):
"""Execute right before commit is called.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_commit(self, session):
"""Execute after a commit has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_rollback(self, session):
"""Execute after a rollback has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def before_flush(self, session, flush_context, instances):
"""Execute before flush process has started.
`instances` is an optional list of objects which were passed to
the ``flush()`` method. """
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes."""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction. """
def after_begin(self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
`transaction` is the SessionTransaction. This method is called
after an engine level transaction is begun on a connection. """
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge. """
def after_bulk_update(self, session, query, query_context, result):
"""Execute after a bulk update operation to the session.
This is called after a session.query(...).update()
`query` is the query object that this update operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
def after_bulk_delete(self, session, query, query_context, result):
"""Execute after a bulk delete operation to the session.
This is called after a session.query(...).delete()
`query` is the query object that this delete operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces")
class AttributeExtension(object):
"""Base implementation for :class:`.AttributeImpl` event hooks, events
that fire upon attribute mutations in user code.
.. deprecated:: 0.7
:class:`.AttributeExtension` is deprecated and will be removed in a
future release. Please refer to :func:`.event.listen` in conjunction
with the :class:`.AttributeEvents` listener interface.
:class:`.AttributeExtension` is used to listen for set,
remove, and append events on individual mapped attributes.
It is established on an individual mapped attribute using
the `extension` argument, available on
:func:`.column_property`, :func:`_orm.relationship`, and
others::
from sqlalchemy.orm.interfaces import AttributeExtension
from sqlalchemy.orm import mapper, relationship, column_property
class MyAttrExt(AttributeExtension):
def append(self, state, value, initiator):
print "append event !"
return value
def set(self, state, value, oldvalue, initiator):
print "set event !"
return value
mapper(SomeClass, sometable, properties={
'foo':column_property(sometable.c.foo, extension=MyAttrExt()),
'bar':relationship(Bar, extension=MyAttrExt())
})
Note that the :class:`.AttributeExtension` methods
:meth:`~.AttributeExtension.append` and
:meth:`~.AttributeExtension.set` need to return the
``value`` parameter. The returned value is used as the
effective value, and allows the extension to change what is
ultimately persisted.
AttributeExtension is assembled within the descriptors associated
with a mapped class.
"""
active_history = True
"""indicates that the set() method would like to receive the 'old' value,
even if it means firing lazy callables.
Note that ``active_history`` can also be set directly via
:func:`.column_property` and :func:`_orm.relationship`.
"""
@classmethod
def _adapt_listener(cls, self, listener):
for meth in ["append", "remove", "set"]:
me_meth = getattr(AttributeExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
util.warn_deprecated(
"AttributeExtension.%s is deprecated. The "
"AttributeExtension class will be removed in a future "
"release. Please transition to the @event interface, "
"using @event.listens_for(Class.attribute, '%s')."
% (meth, meth)
)
event.listen(
self,
"append",
listener.append,
active_history=listener.active_history,
raw=True,
retval=True,
)
event.listen(
self,
"remove",
listener.remove,
active_history=listener.active_history,
raw=True,
retval=True,
)
event.listen(
self,
"set",
listener.set,
active_history=listener.active_history,
raw=True,
retval=True,
)
def append(self, state, value, initiator):
"""Receive a collection append event.
The returned value will be used as the actual value to be
appended.
"""
return value
def remove(self, state, value, initiator):
"""Receive a remove event.
No return value is defined.
"""
pass
def set(self, state, value, oldvalue, initiator):
"""Receive a set event.
The returned value will be used as the actual value to be
set.
"""
return value
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/descriptor_props.py
|
# orm/descriptor_props.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Descriptor properties are more "auxiliary" properties
that exist as configurational elements, but don't participate
as actively in the load/persist ORM loop.
"""
from . import attributes
from . import properties
from . import query
from .interfaces import MapperProperty
from .interfaces import PropComparator
from .util import _none_set
from .. import event
from .. import exc as sa_exc
from .. import schema
from .. import sql
from .. import util
from ..sql import expression
class DescriptorProperty(MapperProperty):
""":class:`.MapperProperty` which proxies access to a
user-defined descriptor."""
doc = None
uses_objects = False
def instrument_class(self, mapper):
prop = self
class _ProxyImpl(object):
accepts_scalar_loader = False
expire_missing = True
collection = False
@property
def uses_objects(self):
return prop.uses_objects
def __init__(self, key):
self.key = key
if hasattr(prop, "get_history"):
def get_history(
self, state, dict_, passive=attributes.PASSIVE_OFF
):
return prop.get_history(state, dict_, passive)
if self.descriptor is None:
desc = getattr(mapper.class_, self.key, None)
if mapper._is_userland_descriptor(desc):
self.descriptor = desc
if self.descriptor is None:
def fset(obj, value):
setattr(obj, self.name, value)
def fdel(obj):
delattr(obj, self.name)
def fget(obj):
return getattr(obj, self.name)
self.descriptor = property(fget=fget, fset=fset, fdel=fdel)
proxy_attr = attributes.create_proxied_attribute(self.descriptor)(
self.parent.class_,
self.key,
self.descriptor,
lambda: self._comparator_factory(mapper),
doc=self.doc,
original_property=self,
)
proxy_attr.impl = _ProxyImpl(self.key)
mapper.class_manager.instrument_attribute(self.key, proxy_attr)
@util.langhelpers.dependency_for("sqlalchemy.orm.properties", add_to_all=True)
class CompositeProperty(DescriptorProperty):
"""Defines a "composite" mapped attribute, representing a collection
of columns as one attribute.
:class:`.CompositeProperty` is constructed using the :func:`.composite`
function.
.. seealso::
:ref:`mapper_composite`
"""
@util.deprecated_params(
extension=(
"0.7",
":class:`.AttributeExtension` is deprecated in favor of the "
":class:`.AttributeEvents` listener interface. The "
":paramref:`.composite.extension` parameter will be "
"removed in a future release.",
)
)
def __init__(self, class_, *attrs, **kwargs):
r"""Return a composite column-based property for use with a Mapper.
See the mapping documentation section :ref:`mapper_composite` for a
full usage example.
The :class:`.MapperProperty` returned by :func:`.composite`
is the :class:`.CompositeProperty`.
:param class\_:
The "composite type" class, or any classmethod or callable which
will produce a new instance of the composite object given the
column values in order.
:param \*cols:
List of Column objects to be mapped.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
scalar attribute should be loaded when replaced, if not
already loaded. See the same flag on :func:`.column_property`.
:param group:
A group name for this property when marked as deferred.
:param deferred:
When True, the column property is "deferred", meaning that it does
not load immediately, and is instead loaded when the attribute is
first accessed on an instance. See also
:func:`~sqlalchemy.orm.deferred`.
:param comparator_factory: a class which extends
:class:`.CompositeProperty.Comparator` which provides custom SQL
clause generation for comparison operations.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
:param extension:
an :class:`.AttributeExtension` instance,
or list of extensions, which will be prepended to the list of
attribute listeners for the resulting descriptor placed on the
class.
"""
super(CompositeProperty, self).__init__()
self.attrs = attrs
self.composite_class = class_
self.active_history = kwargs.get("active_history", False)
self.deferred = kwargs.get("deferred", False)
self.group = kwargs.get("group", None)
self.comparator_factory = kwargs.pop(
"comparator_factory", self.__class__.Comparator
)
if "info" in kwargs:
self.info = kwargs.pop("info")
util.set_creation_order(self)
self._create_descriptor()
def instrument_class(self, mapper):
super(CompositeProperty, self).instrument_class(mapper)
self._setup_event_handlers()
def do_init(self):
"""Initialization which occurs after the :class:`.CompositeProperty`
has been associated with its parent mapper.
"""
self._setup_arguments_on_columns()
def _create_descriptor(self):
"""Create the Python descriptor that will serve as
the access point on instances of the mapped class.
"""
def fget(instance):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
if self.key not in dict_:
# key not present. Iterate through related
# attributes, retrieve their values. This
# ensures they all load.
values = [
getattr(instance, key) for key in self._attribute_keys
]
# current expected behavior here is that the composite is
# created on access if the object is persistent or if
# col attributes have non-None. This would be better
# if the composite were created unconditionally,
# but that would be a behavioral change.
if self.key not in dict_ and (
state.key is not None or not _none_set.issuperset(values)
):
dict_[self.key] = self.composite_class(*values)
state.manager.dispatch.refresh(state, None, [self.key])
return dict_.get(self.key, None)
def fset(instance, value):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
attr = state.manager[self.key]
previous = dict_.get(self.key, attributes.NO_VALUE)
for fn in attr.dispatch.set:
value = fn(state, value, previous, attr.impl)
dict_[self.key] = value
if value is None:
for key in self._attribute_keys:
setattr(instance, key, None)
else:
for key, value in zip(
self._attribute_keys, value.__composite_values__()
):
setattr(instance, key, value)
def fdel(instance):
state = attributes.instance_state(instance)
dict_ = attributes.instance_dict(instance)
previous = dict_.pop(self.key, attributes.NO_VALUE)
attr = state.manager[self.key]
attr.dispatch.remove(state, previous, attr.impl)
for key in self._attribute_keys:
setattr(instance, key, None)
self.descriptor = property(fget, fset, fdel)
@util.memoized_property
def _comparable_elements(self):
return [getattr(self.parent.class_, prop.key) for prop in self.props]
@util.memoized_property
def props(self):
props = []
for attr in self.attrs:
if isinstance(attr, str):
prop = self.parent.get_property(attr, _configure_mappers=False)
elif isinstance(attr, schema.Column):
prop = self.parent._columntoproperty[attr]
elif isinstance(attr, attributes.InstrumentedAttribute):
prop = attr.property
else:
raise sa_exc.ArgumentError(
"Composite expects Column objects or mapped "
"attributes/attribute names as arguments, got: %r"
% (attr,)
)
props.append(prop)
return props
@property
def columns(self):
return [a for a in self.attrs if isinstance(a, schema.Column)]
def _setup_arguments_on_columns(self):
"""Propagate configuration arguments made on this composite
to the target columns, for those that apply.
"""
for prop in self.props:
prop.active_history = self.active_history
if self.deferred:
prop.deferred = self.deferred
prop.strategy_key = (("deferred", True), ("instrument", True))
prop.group = self.group
def _setup_event_handlers(self):
"""Establish events that populate/expire the composite attribute."""
def load_handler(state, *args):
_load_refresh_handler(state, args, is_refresh=False)
def refresh_handler(state, *args):
_load_refresh_handler(state, args, is_refresh=True)
def _load_refresh_handler(state, args, is_refresh):
dict_ = state.dict
if not is_refresh and self.key in dict_:
return
# if column elements aren't loaded, skip.
# __get__() will initiate a load for those
# columns
for k in self._attribute_keys:
if k not in dict_:
return
dict_[self.key] = self.composite_class(
*[state.dict[key] for key in self._attribute_keys]
)
def expire_handler(state, keys):
if keys is None or set(self._attribute_keys).intersection(keys):
state.dict.pop(self.key, None)
def insert_update_handler(mapper, connection, state):
"""After an insert or update, some columns may be expired due
to server side defaults, or re-populated due to client side
defaults. Pop out the composite value here so that it
recreates.
"""
state.dict.pop(self.key, None)
event.listen(
self.parent, "after_insert", insert_update_handler, raw=True
)
event.listen(
self.parent, "after_update", insert_update_handler, raw=True
)
event.listen(
self.parent, "load", load_handler, raw=True, propagate=True
)
event.listen(
self.parent, "refresh", refresh_handler, raw=True, propagate=True
)
event.listen(
self.parent, "expire", expire_handler, raw=True, propagate=True
)
# TODO: need a deserialize hook here
@util.memoized_property
def _attribute_keys(self):
return [prop.key for prop in self.props]
def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
"""Provided for userland code that uses attributes.get_history()."""
added = []
deleted = []
has_history = False
for prop in self.props:
key = prop.key
hist = state.manager[key].impl.get_history(state, dict_)
if hist.has_changes():
has_history = True
non_deleted = hist.non_deleted()
if non_deleted:
added.extend(non_deleted)
else:
added.append(None)
if hist.deleted:
deleted.extend(hist.deleted)
else:
deleted.append(None)
if has_history:
return attributes.History(
[self.composite_class(*added)],
(),
[self.composite_class(*deleted)],
)
else:
return attributes.History((), [self.composite_class(*added)], ())
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
class CompositeBundle(query.Bundle):
def __init__(self, property_, expr):
self.property = property_
super(CompositeProperty.CompositeBundle, self).__init__(
property_.key, *expr
)
def create_row_processor(self, query, procs, labels):
def proc(row):
return self.property.composite_class(
*[proc(row) for proc in procs]
)
return proc
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.CompositeProperty` attributes.
See the example in :ref:`composite_operations` for an overview
of usage , as well as the documentation for :class:`.PropComparator`.
.. seealso::
:class:`.PropComparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__hash__ = None
@property
def clauses(self):
return self.__clause_element__()
def __clause_element__(self):
return expression.ClauseList(
group=False, *self._comparable_elements
)
def _query_clause_element(self):
return CompositeProperty.CompositeBundle(
self.prop, self.__clause_element__()
)
def _bulk_update_tuples(self, value):
if value is None:
values = [None for key in self.prop._attribute_keys]
elif isinstance(value, self.prop.composite_class):
values = value.__composite_values__()
else:
raise sa_exc.ArgumentError(
"Can't UPDATE composite attribute %s to %r"
% (self.prop, value)
)
return zip(self._comparable_elements, values)
@util.memoized_property
def _comparable_elements(self):
if self._adapt_to_entity:
return [
getattr(self._adapt_to_entity.entity, prop.key)
for prop in self.prop._comparable_elements
]
else:
return self.prop._comparable_elements
def __eq__(self, other):
if other is None:
values = [None] * len(self.prop._comparable_elements)
else:
values = other.__composite_values__()
comparisons = [
a == b for a, b in zip(self.prop._comparable_elements, values)
]
if self._adapt_to_entity:
comparisons = [self.adapter(x) for x in comparisons]
return sql.and_(*comparisons)
def __ne__(self, other):
return sql.not_(self.__eq__(other))
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
@util.langhelpers.dependency_for("sqlalchemy.orm.properties", add_to_all=True)
class ConcreteInheritedProperty(DescriptorProperty):
"""A 'do nothing' :class:`.MapperProperty` that disables
an attribute on a concrete subclass that is only present
on the inherited mapper, not the concrete classes' mapper.
Cases where this occurs include:
* When the superclass mapper is mapped against a
"polymorphic union", which includes all attributes from
all subclasses.
* When a relationship() is configured on an inherited mapper,
but not on the subclass mapper. Concrete mappers require
that relationship() is configured explicitly on each
subclass.
"""
def _comparator_factory(self, mapper):
comparator_callable = None
for m in self.parent.iterate_to_root():
p = m._props[self.key]
if not isinstance(p, ConcreteInheritedProperty):
comparator_callable = p.comparator_factory
break
return comparator_callable
def __init__(self):
super(ConcreteInheritedProperty, self).__init__()
def warn():
raise AttributeError(
"Concrete %s does not implement "
"attribute %r at the instance level. Add "
"this property explicitly to %s."
% (self.parent, self.key, self.parent)
)
class NoninheritedConcreteProp(object):
def __set__(s, obj, value):
warn()
def __delete__(s, obj):
warn()
def __get__(s, obj, owner):
if obj is None:
return self.descriptor
warn()
self.descriptor = NoninheritedConcreteProp()
@util.langhelpers.dependency_for("sqlalchemy.orm.properties", add_to_all=True)
class SynonymProperty(DescriptorProperty):
def __init__(
self,
name,
map_column=None,
descriptor=None,
comparator_factory=None,
doc=None,
info=None,
):
"""Denote an attribute name as a synonym to a mapped property,
in that the attribute will mirror the value and expression behavior
of another attribute.
e.g.::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
job_status = Column(String(50))
status = synonym("job_status")
:param name: the name of the existing mapped property. This
can refer to the string name ORM-mapped attribute
configured on the class, including column-bound attributes
and relationships.
:param descriptor: a Python :term:`descriptor` that will be used
as a getter (and potentially a setter) when this attribute is
accessed at the instance level.
:param map_column: **For classical mappings and mappings against
an existing Table object only**. if ``True``, the :func:`.synonym`
construct will locate the :class:`_schema.Column`
object upon the mapped
table that would normally be associated with the attribute name of
this synonym, and produce a new :class:`.ColumnProperty` that instead
maps this :class:`_schema.Column`
to the alternate name given as the "name"
argument of the synonym; in this way, the usual step of redefining
the mapping of the :class:`_schema.Column`
to be under a different name is
unnecessary. This is usually intended to be used when a
:class:`_schema.Column`
is to be replaced with an attribute that also uses a
descriptor, that is, in conjunction with the
:paramref:`.synonym.descriptor` parameter::
my_table = Table(
"my_table", metadata,
Column('id', Integer, primary_key=True),
Column('job_status', String(50))
)
class MyClass(object):
@property
def _job_status_descriptor(self):
return "Status: %s" % self._job_status
mapper(
MyClass, my_table, properties={
"job_status": synonym(
"_job_status", map_column=True,
descriptor=MyClass._job_status_descriptor)
}
)
Above, the attribute named ``_job_status`` is automatically
mapped to the ``job_status`` column::
>>> j1 = MyClass()
>>> j1._job_status = "employed"
>>> j1.job_status
Status: employed
When using Declarative, in order to provide a descriptor in
conjunction with a synonym, use the
:func:`sqlalchemy.ext.declarative.synonym_for` helper. However,
note that the :ref:`hybrid properties <mapper_hybrids>` feature
should usually be preferred, particularly when redefining attribute
behavior.
:param info: Optional data dictionary which will be populated into the
:attr:`.InspectionAttr.info` attribute of this object.
.. versionadded:: 1.0.0
:param comparator_factory: A subclass of :class:`.PropComparator`
that will provide custom comparison behavior at the SQL expression
level.
.. note::
For the use case of providing an attribute which redefines both
Python-level and SQL-expression level behavior of an attribute,
please refer to the Hybrid attribute introduced at
:ref:`mapper_hybrids` for a more effective technique.
.. seealso::
:ref:`synonyms` - Overview of synonyms
:func:`.synonym_for` - a helper oriented towards Declarative
:ref:`mapper_hybrids` - The Hybrid Attribute extension provides an
updated approach to augmenting attribute behavior more flexibly
than can be achieved with synonyms.
"""
super(SynonymProperty, self).__init__()
self.name = name
self.map_column = map_column
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
if info:
self.info = info
util.set_creation_order(self)
@property
def uses_objects(self):
return getattr(self.parent.class_, self.name).impl.uses_objects
# TODO: when initialized, check _proxied_property,
# emit a warning if its not a column-based property
@util.memoized_property
def _proxied_property(self):
attr = getattr(self.parent.class_, self.name)
if not hasattr(attr, "property") or not isinstance(
attr.property, MapperProperty
):
raise sa_exc.InvalidRequestError(
"""synonym() attribute "%s.%s" only supports """
"""ORM mapped attributes, got %r"""
% (self.parent.class_.__name__, self.name, attr)
)
return attr.property
def _comparator_factory(self, mapper):
prop = self._proxied_property
if self.comparator_factory:
comp = self.comparator_factory(prop, mapper)
else:
comp = prop.comparator_factory(prop, mapper)
return comp
def get_history(self, *arg, **kw):
attr = getattr(self.parent.class_, self.name)
return attr.impl.get_history(*arg, **kw)
def set_parent(self, parent, init):
if self.map_column:
# implement the 'map_column' option.
if self.key not in parent.persist_selectable.c:
raise sa_exc.ArgumentError(
"Can't compile synonym '%s': no column on table "
"'%s' named '%s'"
% (
self.name,
parent.persist_selectable.description,
self.key,
)
)
elif (
parent.persist_selectable.c[self.key]
in parent._columntoproperty
and parent._columntoproperty[
parent.persist_selectable.c[self.key]
].key
== self.name
):
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r"
% (self.key, self.name, self.name, self.key)
)
p = properties.ColumnProperty(
parent.persist_selectable.c[self.key]
)
parent._configure_property(self.name, p, init=init, setparent=True)
p._mapped_by_synonym = self.key
self.parent = parent
@util.langhelpers.dependency_for("sqlalchemy.orm.properties", add_to_all=True)
@util.deprecated_cls(
"0.7",
":func:`.comparable_property` is deprecated and will be removed in a "
"future release. Please refer to the :mod:`~sqlalchemy.ext.hybrid` "
"extension.",
)
class ComparableProperty(DescriptorProperty):
"""Instruments a Python property for use in query expressions."""
def __init__(
self, comparator_factory, descriptor=None, doc=None, info=None
):
"""Provides a method of applying a :class:`.PropComparator`
to any Python descriptor attribute.
Allows any Python descriptor to behave like a SQL-enabled
attribute when used at the class level in queries, allowing
redefinition of expression operator behavior.
In the example below we redefine :meth:`.PropComparator.operate`
to wrap both sides of an expression in ``func.lower()`` to produce
case-insensitive comparison::
from sqlalchemy.orm import comparable_property
from sqlalchemy.orm.interfaces import PropComparator
from sqlalchemy.sql import func
from sqlalchemy import Integer, String, Column
from sqlalchemy.ext.declarative import declarative_base
class CaseInsensitiveComparator(PropComparator):
def __clause_element__(self):
return self.prop
def operate(self, op, other):
return op(
func.lower(self.__clause_element__()),
func.lower(other)
)
Base = declarative_base()
class SearchWord(Base):
__tablename__ = 'search_word'
id = Column(Integer, primary_key=True)
word = Column(String)
word_insensitive = comparable_property(lambda prop, mapper:
CaseInsensitiveComparator(
mapper.c.word, mapper)
)
A mapping like the above allows the ``word_insensitive`` attribute
to render an expression like::
>>> print(SearchWord.word_insensitive == "Trucks")
lower(search_word.word) = lower(:lower_1)
:param comparator_factory:
A PropComparator subclass or factory that defines operator behavior
for this property.
:param descriptor:
Optional when used in a ``properties={}`` declaration. The Python
descriptor or property to layer comparison behavior on top of.
The like-named descriptor will be automatically retrieved from the
mapped class if left blank in a ``properties`` declaration.
:param info: Optional data dictionary which will be populated into the
:attr:`.InspectionAttr.info` attribute of this object.
.. versionadded:: 1.0.0
"""
super(ComparableProperty, self).__init__()
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
if info:
self.info = info
util.set_creation_order(self)
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/identity.py
|
# orm/identity.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import weakref
from . import attributes
from . import util as orm_util
from .. import exc as sa_exc
from .. import util
class IdentityMap(object):
def __init__(self):
self._dict = {}
self._modified = set()
self._wr = weakref.ref(self)
def keys(self):
return self._dict.keys()
def replace(self, state):
raise NotImplementedError()
def add(self, state):
raise NotImplementedError()
def _add_unpresent(self, state, key):
"""optional inlined form of add() which can assume item isn't present
in the map"""
self.add(state)
def update(self, dict_):
raise NotImplementedError("IdentityMap uses add() to insert data")
def clear(self):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def _manage_incoming_state(self, state):
state._instance_dict = self._wr
if state.modified:
self._modified.add(state)
def _manage_removed_state(self, state):
del state._instance_dict
if state.modified:
self._modified.discard(state)
def _dirty_states(self):
return self._modified
def check_modified(self):
"""return True if any InstanceStates present have been marked
as 'modified'.
"""
return bool(self._modified)
def has_key(self, key):
return key in self
def popitem(self):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def pop(self, key, *args):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def setdefault(self, key, default=None):
raise NotImplementedError("IdentityMap uses add() to insert data")
def __len__(self):
return len(self._dict)
def copy(self):
raise NotImplementedError()
def __setitem__(self, key, value):
raise NotImplementedError("IdentityMap uses add() to insert data")
def __delitem__(self, key):
raise NotImplementedError("IdentityMap uses remove() to remove data")
class WeakInstanceDict(IdentityMap):
def __getitem__(self, key):
state = self._dict[key]
o = state.obj()
if o is None:
raise KeyError(key)
return o
def __contains__(self, key):
try:
if key in self._dict:
state = self._dict[key]
o = state.obj()
else:
return False
except KeyError:
return False
else:
return o is not None
def contains_state(self, state):
if state.key in self._dict:
try:
return self._dict[state.key] is state
except KeyError:
return False
else:
return False
def replace(self, state):
if state.key in self._dict:
try:
existing = self._dict[state.key]
except KeyError:
# catch gc removed the key after we just checked for it
pass
else:
if existing is not state:
self._manage_removed_state(existing)
else:
return None
else:
existing = None
self._dict[state.key] = state
self._manage_incoming_state(state)
return existing
def add(self, state):
key = state.key
# inline of self.__contains__
if key in self._dict:
try:
existing_state = self._dict[key]
except KeyError:
# catch gc removed the key after we just checked for it
pass
else:
if existing_state is not state:
o = existing_state.obj()
if o is not None:
raise sa_exc.InvalidRequestError(
"Can't attach instance "
"%s; another instance with key %s is already "
"present in this session."
% (orm_util.state_str(state), state.key)
)
else:
return False
self._dict[key] = state
self._manage_incoming_state(state)
return True
def _add_unpresent(self, state, key):
# inlined form of add() called by loading.py
self._dict[key] = state
state._instance_dict = self._wr
def get(self, key, default=None):
if key not in self._dict:
return default
try:
state = self._dict[key]
except KeyError:
# catch gc removed the key after we just checked for it
return default
else:
o = state.obj()
if o is None:
return default
return o
def items(self):
values = self.all_states()
result = []
for state in values:
value = state.obj()
if value is not None:
result.append((state.key, value))
return result
def values(self):
values = self.all_states()
result = []
for state in values:
value = state.obj()
if value is not None:
result.append(value)
return result
def __iter__(self):
return iter(self.keys())
if util.py2k:
def iteritems(self):
return iter(self.items())
def itervalues(self):
return iter(self.values())
def all_states(self):
if util.py2k:
return self._dict.values()
else:
return list(self._dict.values())
def _fast_discard(self, state):
# used by InstanceState for state being
# GC'ed, inlines _managed_removed_state
try:
st = self._dict[state.key]
except KeyError:
# catch gc removed the key after we just checked for it
pass
else:
if st is state:
self._dict.pop(state.key, None)
def discard(self, state):
self.safe_discard(state)
def safe_discard(self, state):
if state.key in self._dict:
try:
st = self._dict[state.key]
except KeyError:
# catch gc removed the key after we just checked for it
pass
else:
if st is state:
self._dict.pop(state.key, None)
self._manage_removed_state(state)
def prune(self):
return 0
class StrongInstanceDict(IdentityMap):
"""A 'strong-referencing' version of the identity map.
.. deprecated 1.1::
The strong
reference identity map is legacy. See the
recipe at :ref:`session_referencing_behavior` for
an event-based approach to maintaining strong identity
references.
"""
if util.py2k:
def itervalues(self):
return self._dict.itervalues()
def iteritems(self):
return self._dict.iteritems()
def __iter__(self):
return iter(self.dict_)
def __getitem__(self, key):
return self._dict[key]
def __contains__(self, key):
return key in self._dict
def get(self, key, default=None):
return self._dict.get(key, default)
def values(self):
return self._dict.values()
def items(self):
return self._dict.items()
def all_states(self):
return [attributes.instance_state(o) for o in self.values()]
def contains_state(self, state):
return (
state.key in self
and attributes.instance_state(self[state.key]) is state
)
def replace(self, state):
if state.key in self._dict:
existing = self._dict[state.key]
existing = attributes.instance_state(existing)
if existing is not state:
self._manage_removed_state(existing)
else:
return
else:
existing = None
self._dict[state.key] = state.obj()
self._manage_incoming_state(state)
return existing
def add(self, state):
if state.key in self:
if attributes.instance_state(self._dict[state.key]) is not state:
raise sa_exc.InvalidRequestError(
"Can't attach instance "
"%s; another instance with key %s is already "
"present in this session."
% (orm_util.state_str(state), state.key)
)
return False
else:
self._dict[state.key] = state.obj()
self._manage_incoming_state(state)
return True
def _add_unpresent(self, state, key):
# inlined form of add() called by loading.py
self._dict[key] = state.obj()
state._instance_dict = self._wr
def _fast_discard(self, state):
# used by InstanceState for state being
# GC'ed, inlines _managed_removed_state
try:
obj = self._dict[state.key]
except KeyError:
# catch gc removed the key after we just checked for it
pass
else:
if attributes.instance_state(obj) is state:
self._dict.pop(state.key, None)
def discard(self, state):
self.safe_discard(state)
def safe_discard(self, state):
if state.key in self._dict:
obj = self._dict[state.key]
st = attributes.instance_state(obj)
if st is state:
self._dict.pop(state.key, None)
self._manage_removed_state(state)
def prune(self):
"""prune unreferenced, non-dirty states."""
ref_count = len(self)
dirty = [s.obj() for s in self.all_states() if s.modified]
# work around http://bugs.python.org/issue6149
keepers = weakref.WeakValueDictionary()
keepers.update(self)
self._dict.clear()
self._dict.update(keepers)
self.modified = bool(dirty)
return ref_count - len(self)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/evaluator.py
|
# orm/evaluator.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import operator
from .. import inspect
from .. import util
from ..sql import operators
class UnevaluatableError(Exception):
pass
_straight_ops = set(
getattr(operators, op)
for op in (
"add",
"mul",
"sub",
"div",
"mod",
"truediv",
"lt",
"le",
"ne",
"gt",
"ge",
"eq",
)
)
_notimplemented_ops = set(
getattr(operators, op)
for op in (
"like_op",
"notlike_op",
"ilike_op",
"notilike_op",
"between_op",
"in_op",
"notin_op",
"endswith_op",
"concat_op",
)
)
class EvaluatorCompiler(object):
def __init__(self, target_cls=None):
self.target_cls = target_cls
def process(self, clause):
meth = getattr(self, "visit_%s" % clause.__visit_name__, None)
if not meth:
raise UnevaluatableError(
"Cannot evaluate %s" % type(clause).__name__
)
return meth(clause)
def visit_grouping(self, clause):
return self.process(clause.element)
def visit_null(self, clause):
return lambda obj: None
def visit_false(self, clause):
return lambda obj: False
def visit_true(self, clause):
return lambda obj: True
def visit_column(self, clause):
if "parentmapper" in clause._annotations:
parentmapper = clause._annotations["parentmapper"]
if self.target_cls and not issubclass(
self.target_cls, parentmapper.class_
):
raise UnevaluatableError(
"Can't evaluate criteria against alternate class %s"
% parentmapper.class_
)
key = parentmapper._columntoproperty[clause].key
else:
key = clause.key
if (
self.target_cls
and key in inspect(self.target_cls).column_attrs
):
util.warn(
"Evaluating non-mapped column expression '%s' onto "
"ORM instances; this is a deprecated use case. Please "
"make use of the actual mapped columns in ORM-evaluated "
"UPDATE / DELETE expressions." % clause
)
else:
raise UnevaluatableError("Cannot evaluate column: %s" % clause)
get_corresponding_attr = operator.attrgetter(key)
return lambda obj: get_corresponding_attr(obj)
def visit_clauselist(self, clause):
evaluators = list(map(self.process, clause.clauses))
if clause.operator is operators.or_:
def evaluate(obj):
has_null = False
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if value:
return True
has_null = has_null or value is None
if has_null:
return None
return False
elif clause.operator is operators.and_:
def evaluate(obj):
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if not value:
if value is None:
return None
return False
return True
else:
raise UnevaluatableError(
"Cannot evaluate clauselist with operator %s" % clause.operator
)
return evaluate
def visit_binary(self, clause):
eval_left, eval_right = list(
map(self.process, [clause.left, clause.right])
)
operator = clause.operator
if operator is operators.is_:
def evaluate(obj):
return eval_left(obj) == eval_right(obj)
elif operator is operators.isnot:
def evaluate(obj):
return eval_left(obj) != eval_right(obj)
elif operator in _straight_ops:
def evaluate(obj):
left_val = eval_left(obj)
right_val = eval_right(obj)
if left_val is None or right_val is None:
return None
return operator(eval_left(obj), eval_right(obj))
else:
raise UnevaluatableError(
"Cannot evaluate %s with operator %s"
% (type(clause).__name__, clause.operator)
)
return evaluate
def visit_unary(self, clause):
eval_inner = self.process(clause.element)
if clause.operator is operators.inv:
def evaluate(obj):
value = eval_inner(obj)
if value is None:
return None
return not value
return evaluate
raise UnevaluatableError(
"Cannot evaluate %s with operator %s"
% (type(clause).__name__, clause.operator)
)
def visit_bindparam(self, clause):
if clause.callable:
val = clause.callable()
else:
val = clause.value
return lambda obj: val
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/unitofwork.py
|
# orm/unitofwork.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The internals for the unit of work system.
The session's flush() process passes objects to a contextual object
here, which assembles flush tasks based on mappers and their properties,
organizes them in order of dependency, and executes.
"""
from . import attributes
from . import exc as orm_exc
from . import persistence
from . import util as orm_util
from .. import event
from .. import util
from ..util import topological
def track_cascade_events(descriptor, prop):
"""Establish event listeners on object attributes which handle
cascade-on-set/append.
"""
key = prop.key
def append(state, item, initiator):
# process "save_update" cascade rules for when
# an instance is appended to the list of another instance
if item is None:
return
sess = state.session
if sess:
if sess._warn_on_events:
sess._flush_warning("collection append")
prop = state.manager.mapper._props[key]
item_state = attributes.instance_state(item)
if (
prop._cascade.save_update
and (prop.cascade_backrefs or key == initiator.key)
and not sess._contains_state(item_state)
):
sess._save_or_update_state(item_state)
return item
def remove(state, item, initiator):
if item is None:
return
sess = state.session
prop = state.manager.mapper._props[key]
if sess and sess._warn_on_events:
sess._flush_warning(
"collection remove"
if prop.uselist
else "related attribute delete"
)
if (
item is not None
and item is not attributes.NEVER_SET
and item is not attributes.PASSIVE_NO_RESULT
and prop._cascade.delete_orphan
):
# expunge pending orphans
item_state = attributes.instance_state(item)
if prop.mapper._is_orphan(item_state):
if sess and item_state in sess._new:
sess.expunge(item)
else:
# the related item may or may not itself be in a
# Session, however the parent for which we are catching
# the event is not in a session, so memoize this on the
# item
item_state._orphaned_outside_of_session = True
def set_(state, newvalue, oldvalue, initiator):
# process "save_update" cascade rules for when an instance
# is attached to another instance
if oldvalue is newvalue:
return newvalue
sess = state.session
if sess:
if sess._warn_on_events:
sess._flush_warning("related attribute set")
prop = state.manager.mapper._props[key]
if newvalue is not None:
newvalue_state = attributes.instance_state(newvalue)
if (
prop._cascade.save_update
and (prop.cascade_backrefs or key == initiator.key)
and not sess._contains_state(newvalue_state)
):
sess._save_or_update_state(newvalue_state)
if (
oldvalue is not None
and oldvalue is not attributes.NEVER_SET
and oldvalue is not attributes.PASSIVE_NO_RESULT
and prop._cascade.delete_orphan
):
# possible to reach here with attributes.NEVER_SET ?
oldvalue_state = attributes.instance_state(oldvalue)
if oldvalue_state in sess._new and prop.mapper._is_orphan(
oldvalue_state
):
sess.expunge(oldvalue)
return newvalue
event.listen(descriptor, "append", append, raw=True, retval=True)
event.listen(descriptor, "remove", remove, raw=True, retval=True)
event.listen(descriptor, "set", set_, raw=True, retval=True)
class UOWTransaction(object):
def __init__(self, session):
self.session = session
# dictionary used by external actors to
# store arbitrary state information.
self.attributes = {}
# dictionary of mappers to sets of
# DependencyProcessors, which are also
# set to be part of the sorted flush actions,
# which have that mapper as a parent.
self.deps = util.defaultdict(set)
# dictionary of mappers to sets of InstanceState
# items pending for flush which have that mapper
# as a parent.
self.mappers = util.defaultdict(set)
# a dictionary of Preprocess objects, which gather
# additional states impacted by the flush
# and determine if a flush action is needed
self.presort_actions = {}
# dictionary of PostSortRec objects, each
# one issues work during the flush within
# a certain ordering.
self.postsort_actions = {}
# a set of 2-tuples, each containing two
# PostSortRec objects where the second
# is dependent on the first being executed
# first
self.dependencies = set()
# dictionary of InstanceState-> (isdelete, listonly)
# tuples, indicating if this state is to be deleted
# or insert/updated, or just refreshed
self.states = {}
# tracks InstanceStates which will be receiving
# a "post update" call. Keys are mappers,
# values are a set of states and a set of the
# columns which should be included in the update.
self.post_update_states = util.defaultdict(lambda: (set(), set()))
@property
def has_work(self):
return bool(self.states)
def was_already_deleted(self, state):
"""return true if the given state is expired and was deleted
previously.
"""
if state.expired:
try:
state._load_expired(state, attributes.PASSIVE_OFF)
except orm_exc.ObjectDeletedError:
self.session._remove_newly_deleted([state])
return True
return False
def is_deleted(self, state):
"""return true if the given state is marked as deleted
within this uowtransaction."""
return state in self.states and self.states[state][0]
def memo(self, key, callable_):
if key in self.attributes:
return self.attributes[key]
else:
self.attributes[key] = ret = callable_()
return ret
def remove_state_actions(self, state):
"""remove pending actions for a state from the uowtransaction."""
isdelete = self.states[state][0]
self.states[state] = (isdelete, True)
def get_attribute_history(
self, state, key, passive=attributes.PASSIVE_NO_INITIALIZE
):
"""facade to attributes.get_state_history(), including
caching of results."""
hashkey = ("history", state, key)
# cache the objects, not the states; the strong reference here
# prevents newly loaded objects from being dereferenced during the
# flush process
if hashkey in self.attributes:
history, state_history, cached_passive = self.attributes[hashkey]
# if the cached lookup was "passive" and now
# we want non-passive, do a non-passive lookup and re-cache
if (
not cached_passive & attributes.SQL_OK
and passive & attributes.SQL_OK
):
impl = state.manager[key].impl
history = impl.get_history(
state,
state.dict,
attributes.PASSIVE_OFF | attributes.LOAD_AGAINST_COMMITTED,
)
if history and impl.uses_objects:
state_history = history.as_state()
else:
state_history = history
self.attributes[hashkey] = (history, state_history, passive)
else:
impl = state.manager[key].impl
# TODO: store the history as (state, object) tuples
# so we don't have to keep converting here
history = impl.get_history(
state, state.dict, passive | attributes.LOAD_AGAINST_COMMITTED
)
if history and impl.uses_objects:
state_history = history.as_state()
else:
state_history = history
self.attributes[hashkey] = (history, state_history, passive)
return state_history
def has_dep(self, processor):
return (processor, True) in self.presort_actions
def register_preprocessor(self, processor, fromparent):
key = (processor, fromparent)
if key not in self.presort_actions:
self.presort_actions[key] = Preprocess(processor, fromparent)
def register_object(
self,
state,
isdelete=False,
listonly=False,
cancel_delete=False,
operation=None,
prop=None,
):
if not self.session._contains_state(state):
# this condition is normal when objects are registered
# as part of a relationship cascade operation. it should
# not occur for the top-level register from Session.flush().
if not state.deleted and operation is not None:
util.warn(
"Object of type %s not in session, %s operation "
"along '%s' will not proceed"
% (orm_util.state_class_str(state), operation, prop)
)
return False
if state not in self.states:
mapper = state.manager.mapper
if mapper not in self.mappers:
self._per_mapper_flush_actions(mapper)
self.mappers[mapper].add(state)
self.states[state] = (isdelete, listonly)
else:
if not listonly and (isdelete or cancel_delete):
self.states[state] = (isdelete, False)
return True
def register_post_update(self, state, post_update_cols):
mapper = state.manager.mapper.base_mapper
states, cols = self.post_update_states[mapper]
states.add(state)
cols.update(post_update_cols)
def _per_mapper_flush_actions(self, mapper):
saves = SaveUpdateAll(self, mapper.base_mapper)
deletes = DeleteAll(self, mapper.base_mapper)
self.dependencies.add((saves, deletes))
for dep in mapper._dependency_processors:
dep.per_property_preprocessors(self)
for prop in mapper.relationships:
if prop.viewonly:
continue
dep = prop._dependency_processor
dep.per_property_preprocessors(self)
@util.memoized_property
def _mapper_for_dep(self):
"""return a dynamic mapping of (Mapper, DependencyProcessor) to
True or False, indicating if the DependencyProcessor operates
on objects of that Mapper.
The result is stored in the dictionary persistently once
calculated.
"""
return util.PopulateDict(
lambda tup: tup[0]._props.get(tup[1].key) is tup[1].prop
)
def filter_states_for_dep(self, dep, states):
"""Filter the given list of InstanceStates to those relevant to the
given DependencyProcessor.
"""
mapper_for_dep = self._mapper_for_dep
return [s for s in states if mapper_for_dep[(s.manager.mapper, dep)]]
def states_for_mapper_hierarchy(self, mapper, isdelete, listonly):
checktup = (isdelete, listonly)
for mapper in mapper.base_mapper.self_and_descendants:
for state in self.mappers[mapper]:
if self.states[state] == checktup:
yield state
def _generate_actions(self):
"""Generate the full, unsorted collection of PostSortRecs as
well as dependency pairs for this UOWTransaction.
"""
# execute presort_actions, until all states
# have been processed. a presort_action might
# add new states to the uow.
while True:
ret = False
for action in list(self.presort_actions.values()):
if action.execute(self):
ret = True
if not ret:
break
# see if the graph of mapper dependencies has cycles.
self.cycles = cycles = topological.find_cycles(
self.dependencies, list(self.postsort_actions.values())
)
if cycles:
# if yes, break the per-mapper actions into
# per-state actions
convert = dict(
(rec, set(rec.per_state_flush_actions(self))) for rec in cycles
)
# rewrite the existing dependencies to point to
# the per-state actions for those per-mapper actions
# that were broken up.
for edge in list(self.dependencies):
if (
None in edge
or edge[0].disabled
or edge[1].disabled
or cycles.issuperset(edge)
):
self.dependencies.remove(edge)
elif edge[0] in cycles:
self.dependencies.remove(edge)
for dep in convert[edge[0]]:
self.dependencies.add((dep, edge[1]))
elif edge[1] in cycles:
self.dependencies.remove(edge)
for dep in convert[edge[1]]:
self.dependencies.add((edge[0], dep))
return set(
[a for a in self.postsort_actions.values() if not a.disabled]
).difference(cycles)
def execute(self):
postsort_actions = self._generate_actions()
# sort = topological.sort(self.dependencies, postsort_actions)
# print "--------------"
# print "\ndependencies:", self.dependencies
# print "\ncycles:", self.cycles
# print "\nsort:", list(sort)
# print "\nCOUNT OF POSTSORT ACTIONS", len(postsort_actions)
# execute
if self.cycles:
for set_ in topological.sort_as_subsets(
self.dependencies, postsort_actions
):
while set_:
n = set_.pop()
n.execute_aggregate(self, set_)
else:
for rec in topological.sort(self.dependencies, postsort_actions):
rec.execute(self)
def finalize_flush_changes(self):
"""mark processed objects as clean / deleted after a successful
flush().
this method is called within the flush() method after the
execute() method has succeeded and the transaction has been committed.
"""
if not self.states:
return
states = set(self.states)
isdel = set(
s for (s, (isdelete, listonly)) in self.states.items() if isdelete
)
other = states.difference(isdel)
if isdel:
self.session._remove_newly_deleted(isdel)
if other:
self.session._register_persistent(other)
class IterateMappersMixin(object):
def _mappers(self, uow):
if self.fromparent:
return iter(
m
for m in self.dependency_processor.parent.self_and_descendants
if uow._mapper_for_dep[(m, self.dependency_processor)]
)
else:
return self.dependency_processor.mapper.self_and_descendants
class Preprocess(IterateMappersMixin):
__slots__ = (
"dependency_processor",
"fromparent",
"processed",
"setup_flush_actions",
)
def __init__(self, dependency_processor, fromparent):
self.dependency_processor = dependency_processor
self.fromparent = fromparent
self.processed = set()
self.setup_flush_actions = False
def execute(self, uow):
delete_states = set()
save_states = set()
for mapper in self._mappers(uow):
for state in uow.mappers[mapper].difference(self.processed):
(isdelete, listonly) = uow.states[state]
if not listonly:
if isdelete:
delete_states.add(state)
else:
save_states.add(state)
if delete_states:
self.dependency_processor.presort_deletes(uow, delete_states)
self.processed.update(delete_states)
if save_states:
self.dependency_processor.presort_saves(uow, save_states)
self.processed.update(save_states)
if delete_states or save_states:
if not self.setup_flush_actions and (
self.dependency_processor.prop_has_changes(
uow, delete_states, True
)
or self.dependency_processor.prop_has_changes(
uow, save_states, False
)
):
self.dependency_processor.per_property_flush_actions(uow)
self.setup_flush_actions = True
return True
else:
return False
class PostSortRec(object):
__slots__ = ("disabled",)
def __new__(cls, uow, *args):
key = (cls,) + args
if key in uow.postsort_actions:
return uow.postsort_actions[key]
else:
uow.postsort_actions[key] = ret = object.__new__(cls)
ret.disabled = False
return ret
def execute_aggregate(self, uow, recs):
self.execute(uow)
class ProcessAll(IterateMappersMixin, PostSortRec):
__slots__ = "dependency_processor", "isdelete", "fromparent"
def __init__(self, uow, dependency_processor, isdelete, fromparent):
self.dependency_processor = dependency_processor
self.isdelete = isdelete
self.fromparent = fromparent
uow.deps[dependency_processor.parent.base_mapper].add(
dependency_processor
)
def execute(self, uow):
states = self._elements(uow)
if self.isdelete:
self.dependency_processor.process_deletes(uow, states)
else:
self.dependency_processor.process_saves(uow, states)
def per_state_flush_actions(self, uow):
# this is handled by SaveUpdateAll and DeleteAll,
# since a ProcessAll should unconditionally be pulled
# into per-state if either the parent/child mappers
# are part of a cycle
return iter([])
def __repr__(self):
return "%s(%s, isdelete=%s)" % (
self.__class__.__name__,
self.dependency_processor,
self.isdelete,
)
def _elements(self, uow):
for mapper in self._mappers(uow):
for state in uow.mappers[mapper]:
(isdelete, listonly) = uow.states[state]
if isdelete == self.isdelete and not listonly:
yield state
class PostUpdateAll(PostSortRec):
__slots__ = "mapper", "isdelete"
def __init__(self, uow, mapper, isdelete):
self.mapper = mapper
self.isdelete = isdelete
def execute(self, uow):
states, cols = uow.post_update_states[self.mapper]
states = [s for s in states if uow.states[s][0] == self.isdelete]
persistence.post_update(self.mapper, states, uow, cols)
class SaveUpdateAll(PostSortRec):
__slots__ = ("mapper",)
def __init__(self, uow, mapper):
self.mapper = mapper
assert mapper is mapper.base_mapper
def execute(self, uow):
persistence.save_obj(
self.mapper,
uow.states_for_mapper_hierarchy(self.mapper, False, False),
uow,
)
def per_state_flush_actions(self, uow):
states = list(
uow.states_for_mapper_hierarchy(self.mapper, False, False)
)
base_mapper = self.mapper.base_mapper
delete_all = DeleteAll(uow, base_mapper)
for state in states:
# keep saves before deletes -
# this ensures 'row switch' operations work
action = SaveUpdateState(uow, state)
uow.dependencies.add((action, delete_all))
yield action
for dep in uow.deps[self.mapper]:
states_for_prop = uow.filter_states_for_dep(dep, states)
dep.per_state_flush_actions(uow, states_for_prop, False)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.mapper)
class DeleteAll(PostSortRec):
__slots__ = ("mapper",)
def __init__(self, uow, mapper):
self.mapper = mapper
assert mapper is mapper.base_mapper
def execute(self, uow):
persistence.delete_obj(
self.mapper,
uow.states_for_mapper_hierarchy(self.mapper, True, False),
uow,
)
def per_state_flush_actions(self, uow):
states = list(
uow.states_for_mapper_hierarchy(self.mapper, True, False)
)
base_mapper = self.mapper.base_mapper
save_all = SaveUpdateAll(uow, base_mapper)
for state in states:
# keep saves before deletes -
# this ensures 'row switch' operations work
action = DeleteState(uow, state)
uow.dependencies.add((save_all, action))
yield action
for dep in uow.deps[self.mapper]:
states_for_prop = uow.filter_states_for_dep(dep, states)
dep.per_state_flush_actions(uow, states_for_prop, True)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.mapper)
class ProcessState(PostSortRec):
__slots__ = "dependency_processor", "isdelete", "state"
def __init__(self, uow, dependency_processor, isdelete, state):
self.dependency_processor = dependency_processor
self.isdelete = isdelete
self.state = state
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
dependency_processor = self.dependency_processor
isdelete = self.isdelete
our_recs = [
r
for r in recs
if r.__class__ is cls_
and r.dependency_processor is dependency_processor
and r.isdelete is isdelete
]
recs.difference_update(our_recs)
states = [self.state] + [r.state for r in our_recs]
if isdelete:
dependency_processor.process_deletes(uow, states)
else:
dependency_processor.process_saves(uow, states)
def __repr__(self):
return "%s(%s, %s, delete=%s)" % (
self.__class__.__name__,
self.dependency_processor,
orm_util.state_str(self.state),
self.isdelete,
)
class SaveUpdateState(PostSortRec):
__slots__ = "state", "mapper"
def __init__(self, uow, state):
self.state = state
self.mapper = state.mapper.base_mapper
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
mapper = self.mapper
our_recs = [
r for r in recs if r.__class__ is cls_ and r.mapper is mapper
]
recs.difference_update(our_recs)
persistence.save_obj(
mapper, [self.state] + [r.state for r in our_recs], uow
)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
orm_util.state_str(self.state),
)
class DeleteState(PostSortRec):
__slots__ = "state", "mapper"
def __init__(self, uow, state):
self.state = state
self.mapper = state.mapper.base_mapper
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
mapper = self.mapper
our_recs = [
r for r in recs if r.__class__ is cls_ and r.mapper is mapper
]
recs.difference_update(our_recs)
states = [self.state] + [r.state for r in our_recs]
persistence.delete_obj(
mapper, [s for s in states if uow.states[s][0]], uow
)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
orm_util.state_str(self.state),
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/base.py
|
# orm/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Constants and rudimental functions used throughout the ORM.
"""
import operator
from . import exc
from .. import exc as sa_exc
from .. import inspection
from .. import util
from ..sql import expression
PASSIVE_NO_RESULT = util.symbol(
"PASSIVE_NO_RESULT",
"""Symbol returned by a loader callable or other attribute/history
retrieval operation when a value could not be determined, based
on loader callable flags.
""",
)
PASSIVE_CLASS_MISMATCH = util.symbol(
"PASSIVE_CLASS_MISMATCH",
"""Symbol indicating that an object is locally present for a given
primary key identity but it is not of the requested class. The
return value is therefore None and no SQL should be emitted.""",
)
ATTR_WAS_SET = util.symbol(
"ATTR_WAS_SET",
"""Symbol returned by a loader callable to indicate the
retrieved value, or values, were assigned to their attributes
on the target object.
""",
)
ATTR_EMPTY = util.symbol(
"ATTR_EMPTY",
"""Symbol used internally to indicate an attribute had no callable.""",
)
NO_VALUE = util.symbol(
"NO_VALUE",
"""Symbol which may be placed as the 'previous' value of an attribute,
indicating no value was loaded for an attribute when it was modified,
and flags indicated we were not to load it.
""",
)
NEVER_SET = util.symbol(
"NEVER_SET",
"""Symbol which may be placed as the 'previous' value of an attribute
indicating that the attribute had not been assigned to previously.
""",
)
NO_CHANGE = util.symbol(
"NO_CHANGE",
"""No callables or SQL should be emitted on attribute access
and no state should change
""",
canonical=0,
)
CALLABLES_OK = util.symbol(
"CALLABLES_OK",
"""Loader callables can be fired off if a value
is not present.
""",
canonical=1,
)
SQL_OK = util.symbol(
"SQL_OK",
"""Loader callables can emit SQL at least on scalar value attributes.""",
canonical=2,
)
RELATED_OBJECT_OK = util.symbol(
"RELATED_OBJECT_OK",
"""Callables can use SQL to load related objects as well
as scalar value attributes.
""",
canonical=4,
)
INIT_OK = util.symbol(
"INIT_OK",
"""Attributes should be initialized with a blank
value (None or an empty collection) upon get, if no other
value can be obtained.
""",
canonical=8,
)
NON_PERSISTENT_OK = util.symbol(
"NON_PERSISTENT_OK",
"""Callables can be emitted if the parent is not persistent.""",
canonical=16,
)
LOAD_AGAINST_COMMITTED = util.symbol(
"LOAD_AGAINST_COMMITTED",
"""Callables should use committed values as primary/foreign keys during a
load.
""",
canonical=32,
)
NO_AUTOFLUSH = util.symbol(
"NO_AUTOFLUSH",
"""Loader callables should disable autoflush.""",
canonical=64,
)
NO_RAISE = util.symbol(
"NO_RAISE",
"""Loader callables should not raise any assertions""",
canonical=128,
)
# pre-packaged sets of flags used as inputs
PASSIVE_OFF = util.symbol(
"PASSIVE_OFF",
"Callables can be emitted in all cases.",
canonical=(
RELATED_OBJECT_OK | NON_PERSISTENT_OK | INIT_OK | CALLABLES_OK | SQL_OK
),
)
PASSIVE_RETURN_NEVER_SET = util.symbol(
"PASSIVE_RETURN_NEVER_SET",
"""PASSIVE_OFF ^ INIT_OK""",
canonical=PASSIVE_OFF ^ INIT_OK,
)
PASSIVE_NO_INITIALIZE = util.symbol(
"PASSIVE_NO_INITIALIZE",
"PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK",
canonical=PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK,
)
PASSIVE_NO_FETCH = util.symbol(
"PASSIVE_NO_FETCH", "PASSIVE_OFF ^ SQL_OK", canonical=PASSIVE_OFF ^ SQL_OK
)
PASSIVE_NO_FETCH_RELATED = util.symbol(
"PASSIVE_NO_FETCH_RELATED",
"PASSIVE_OFF ^ RELATED_OBJECT_OK",
canonical=PASSIVE_OFF ^ RELATED_OBJECT_OK,
)
PASSIVE_ONLY_PERSISTENT = util.symbol(
"PASSIVE_ONLY_PERSISTENT",
"PASSIVE_OFF ^ NON_PERSISTENT_OK",
canonical=PASSIVE_OFF ^ NON_PERSISTENT_OK,
)
DEFAULT_MANAGER_ATTR = "_sa_class_manager"
DEFAULT_STATE_ATTR = "_sa_instance_state"
_INSTRUMENTOR = ("mapper", "instrumentor")
EXT_CONTINUE = util.symbol("EXT_CONTINUE")
EXT_STOP = util.symbol("EXT_STOP")
EXT_SKIP = util.symbol("EXT_SKIP")
ONETOMANY = util.symbol(
"ONETOMANY",
"""Indicates the one-to-many direction for a :func:`_orm.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""",
)
MANYTOONE = util.symbol(
"MANYTOONE",
"""Indicates the many-to-one direction for a :func:`_orm.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""",
)
MANYTOMANY = util.symbol(
"MANYTOMANY",
"""Indicates the many-to-many direction for a :func:`_orm.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""",
)
NOT_EXTENSION = util.symbol(
"NOT_EXTENSION",
"""Symbol indicating an :class:`InspectionAttr` that's
not part of sqlalchemy.ext.
Is assigned to the :attr:`.InspectionAttr.extension_type`
attribute.
""",
)
_never_set = frozenset([NEVER_SET])
_none_set = frozenset([None, NEVER_SET, PASSIVE_NO_RESULT])
_SET_DEFERRED_EXPIRED = util.symbol("SET_DEFERRED_EXPIRED")
_DEFER_FOR_STATE = util.symbol("DEFER_FOR_STATE")
def _generative(*assertions):
"""Mark a method as generative, e.g. method-chained."""
@util.decorator
def generate(fn, *args, **kw):
self = args[0]._clone()
for assertion in assertions:
assertion(self, fn.__name__)
fn(self, *args[1:], **kw)
return self
return generate
# these can be replaced by sqlalchemy.ext.instrumentation
# if augmented class instrumentation is enabled.
def manager_of_class(cls):
return cls.__dict__.get(DEFAULT_MANAGER_ATTR, None)
instance_state = operator.attrgetter(DEFAULT_STATE_ATTR)
instance_dict = operator.attrgetter("__dict__")
def instance_str(instance):
"""Return a string describing an instance."""
return state_str(instance_state(instance))
def state_str(state):
"""Return a string describing an instance via its InstanceState."""
if state is None:
return "None"
else:
return "<%s at 0x%x>" % (state.class_.__name__, id(state.obj()))
def state_class_str(state):
"""Return a string describing an instance's class via its
InstanceState.
"""
if state is None:
return "None"
else:
return "<%s>" % (state.class_.__name__,)
def attribute_str(instance, attribute):
return instance_str(instance) + "." + attribute
def state_attribute_str(state, attribute):
return state_str(state) + "." + attribute
def object_mapper(instance):
"""Given an object, return the primary Mapper associated with the object
instance.
Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError`
if no mapping is configured.
This function is available via the inspection system as::
inspect(instance).mapper
Using the inspection system will raise
:class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is
not part of a mapping.
"""
return object_state(instance).mapper
def object_state(instance):
"""Given an object, return the :class:`.InstanceState`
associated with the object.
Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError`
if no mapping is configured.
Equivalent functionality is available via the :func:`_sa.inspect`
function as::
inspect(instance)
Using the inspection system will raise
:class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is
not part of a mapping.
"""
state = _inspect_mapped_object(instance)
if state is None:
raise exc.UnmappedInstanceError(instance)
else:
return state
@inspection._inspects(object)
def _inspect_mapped_object(instance):
try:
return instance_state(instance)
# TODO: whats the py-2/3 syntax to catch two
# different kinds of exceptions at once ?
except exc.UnmappedClassError:
return None
except exc.NO_STATE:
return None
def _class_to_mapper(class_or_mapper):
insp = inspection.inspect(class_or_mapper, False)
if insp is not None:
return insp.mapper
else:
raise exc.UnmappedClassError(class_or_mapper)
def _mapper_or_none(entity):
"""Return the :class:`_orm.Mapper` for the given class or None if the
class is not mapped.
"""
insp = inspection.inspect(entity, False)
if insp is not None:
return insp.mapper
else:
return None
def _is_mapped_class(entity):
"""Return True if the given object is a mapped class,
:class:`_orm.Mapper`, or :class:`.AliasedClass`.
"""
insp = inspection.inspect(entity, False)
return (
insp is not None
and not insp.is_clause_element
and (insp.is_mapper or insp.is_aliased_class)
)
def _attr_as_key(attr):
if hasattr(attr, "key"):
return attr.key
else:
return expression._column_as_key(attr)
def _orm_columns(entity):
insp = inspection.inspect(entity, False)
if hasattr(insp, "selectable") and hasattr(insp.selectable, "c"):
return [c for c in insp.selectable.c]
else:
return [entity]
def _is_aliased_class(entity):
insp = inspection.inspect(entity, False)
return insp is not None and getattr(insp, "is_aliased_class", False)
def _entity_descriptor(entity, key):
"""Return a class attribute given an entity and string name.
May return :class:`.InstrumentedAttribute` or user-defined
attribute.
"""
insp = inspection.inspect(entity)
if insp.is_selectable:
description = entity
entity = insp.c
elif insp.is_aliased_class:
entity = insp.entity
description = entity
elif hasattr(insp, "mapper"):
description = entity = insp.mapper.class_
else:
description = entity
try:
return getattr(entity, key)
except AttributeError as err:
util.raise_(
sa_exc.InvalidRequestError(
"Entity '%s' has no property '%s'" % (description, key)
),
replace_context=err,
)
_state_mapper = util.dottedgetter("manager.mapper")
@inspection._inspects(type)
def _inspect_mapped_class(class_, configure=False):
try:
class_manager = manager_of_class(class_)
if not class_manager.is_mapped:
return None
mapper = class_manager.mapper
except exc.NO_STATE:
return None
else:
if configure and mapper._new_mappers:
mapper._configure_all()
return mapper
def class_mapper(class_, configure=True):
"""Given a class, return the primary :class:`_orm.Mapper` associated
with the key.
Raises :exc:`.UnmappedClassError` if no mapping is configured
on the given class, or :exc:`.ArgumentError` if a non-class
object is passed.
Equivalent functionality is available via the :func:`_sa.inspect`
function as::
inspect(some_mapped_class)
Using the inspection system will raise
:class:`sqlalchemy.exc.NoInspectionAvailable` if the class is not mapped.
"""
mapper = _inspect_mapped_class(class_, configure=configure)
if mapper is None:
if not isinstance(class_, type):
raise sa_exc.ArgumentError(
"Class object expected, got '%r'." % (class_,)
)
raise exc.UnmappedClassError(class_)
else:
return mapper
class InspectionAttr(object):
"""A base class applied to all ORM objects that can be returned
by the :func:`_sa.inspect` function.
The attributes defined here allow the usage of simple boolean
checks to test basic facts about the object returned.
While the boolean checks here are basically the same as using
the Python isinstance() function, the flags here can be used without
the need to import all of these classes, and also such that
the SQLAlchemy class system can change while leaving the flags
here intact for forwards-compatibility.
"""
__slots__ = ()
is_selectable = False
"""Return True if this object is an instance of """
""":class:`expression.Selectable`."""
is_aliased_class = False
"""True if this object is an instance of :class:`.AliasedClass`."""
is_instance = False
"""True if this object is an instance of :class:`.InstanceState`."""
is_mapper = False
"""True if this object is an instance of :class:`_orm.Mapper`."""
is_property = False
"""True if this object is an instance of :class:`.MapperProperty`."""
is_attribute = False
"""True if this object is a Python :term:`descriptor`.
This can refer to one of many types. Usually a
:class:`.QueryableAttribute` which handles attributes events on behalf
of a :class:`.MapperProperty`. But can also be an extension type
such as :class:`.AssociationProxy` or :class:`.hybrid_property`.
The :attr:`.InspectionAttr.extension_type` will refer to a constant
identifying the specific subtype.
.. seealso::
:attr:`_orm.Mapper.all_orm_descriptors`
"""
_is_internal_proxy = False
"""True if this object is an internal proxy object.
.. versionadded:: 1.2.12
"""
is_clause_element = False
"""True if this object is an instance of """
""":class:`_expression.ClauseElement`."""
extension_type = NOT_EXTENSION
"""The extension type, if any.
Defaults to :data:`.interfaces.NOT_EXTENSION`
.. seealso::
:data:`.HYBRID_METHOD`
:data:`.HYBRID_PROPERTY`
:data:`.ASSOCIATION_PROXY`
"""
class InspectionAttrInfo(InspectionAttr):
"""Adds the ``.info`` attribute to :class:`.InspectionAttr`.
The rationale for :class:`.InspectionAttr` vs. :class:`.InspectionAttrInfo`
is that the former is compatible as a mixin for classes that specify
``__slots__``; this is essentially an implementation artifact.
"""
@util.memoized_property
def info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.InspectionAttr`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`_orm.relationship`, or
:func:`.composite`
functions.
.. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
available on extension types via the
:attr:`.InspectionAttrInfo.info` attribute, so that it can apply
to a wider variety of ORM and extension constructs.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
class _MappedAttribute(object):
"""Mixin for attributes which should be replaced by mapper-assigned
attributes.
"""
__slots__ = ()
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/relationships.py
|
# orm/relationships.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Heuristics related to join conditions as used in
:func:`_orm.relationship`.
Provides the :class:`.JoinCondition` object, which encapsulates
SQL annotation and aliasing behavior focused on the `primaryjoin`
and `secondaryjoin` aspects of :func:`_orm.relationship`.
"""
from __future__ import absolute_import
import collections
import weakref
from . import attributes
from . import dependency
from . import mapper as mapperlib
from .base import state_str
from .interfaces import MANYTOMANY
from .interfaces import MANYTOONE
from .interfaces import ONETOMANY
from .interfaces import PropComparator
from .interfaces import StrategizedProperty
from .util import _orm_annotate
from .util import _orm_deannotate
from .util import CascadeOptions
from .. import exc as sa_exc
from .. import log
from .. import schema
from .. import sql
from .. import util
from ..inspection import inspect
from ..sql import expression
from ..sql import operators
from ..sql import visitors
from ..sql.util import _deep_deannotate
from ..sql.util import _shallow_annotate
from ..sql.util import adapt_criterion_to_null
from ..sql.util import ClauseAdapter
from ..sql.util import join_condition
from ..sql.util import selectables_overlap
from ..sql.util import visit_binary_product
def remote(expr):
"""Annotate a portion of a primaryjoin expression
with a 'remote' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.foreign`
"""
return _annotate_columns(
expression._clause_element_as_expr(expr), {"remote": True}
)
def foreign(expr):
"""Annotate a portion of a primaryjoin expression
with a 'foreign' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.remote`
"""
return _annotate_columns(
expression._clause_element_as_expr(expr), {"foreign": True}
)
@log.class_logger
@util.langhelpers.dependency_for("sqlalchemy.orm.properties", add_to_all=True)
class RelationshipProperty(StrategizedProperty):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`_orm.relationship` function.
.. seealso::
:ref:`relationship_config_toplevel`
"""
strategy_wildcard_key = "relationship"
_persistence_only = dict(
passive_deletes=False,
passive_updates=True,
enable_typechecks=True,
active_history=False,
cascade_backrefs=True,
)
_dependency_processor = None
@util.deprecated_params(
extension=(
"0.7",
":class:`.AttributeExtension` is deprecated in favor of the "
":class:`.AttributeEvents` listener interface. The "
":paramref:`_orm.relationship.extension` parameter will be "
"removed in a future release.",
)
)
def __init__(
self,
argument,
secondary=None,
primaryjoin=None,
secondaryjoin=None,
foreign_keys=None,
uselist=None,
order_by=False,
backref=None,
back_populates=None,
post_update=False,
cascade=False,
extension=None,
viewonly=False,
lazy="select",
collection_class=None,
passive_deletes=_persistence_only["passive_deletes"],
passive_updates=_persistence_only["passive_updates"],
remote_side=None,
enable_typechecks=_persistence_only["enable_typechecks"],
join_depth=None,
comparator_factory=None,
single_parent=False,
innerjoin=False,
distinct_target_key=None,
doc=None,
active_history=_persistence_only["active_history"],
cascade_backrefs=_persistence_only["cascade_backrefs"],
load_on_pending=False,
bake_queries=True,
_local_remote_pairs=None,
query_class=None,
info=None,
omit_join=None,
sync_backref=None,
):
"""Provide a relationship between two mapped classes.
This corresponds to a parent-child or associative table relationship.
The constructed class is an instance of
:class:`.RelationshipProperty`.
A typical :func:`_orm.relationship`, used in a classical mapping::
mapper(Parent, properties={
'children': relationship(Child)
})
Some arguments accepted by :func:`_orm.relationship`
optionally accept a
callable function, which when called produces the desired value.
The callable is invoked by the parent :class:`_orm.Mapper` at "mapper
initialization" time, which happens only when mappers are first used,
and is assumed to be after all mappings have been constructed. This
can be used to resolve order-of-declaration and other dependency
issues, such as if ``Child`` is declared below ``Parent`` in the same
file::
mapper(Parent, properties={
"children":relationship(lambda: Child,
order_by=lambda: Child.id)
})
When using the :ref:`declarative_toplevel` extension, the Declarative
initializer allows string arguments to be passed to
:func:`_orm.relationship`. These string arguments are converted into
callables that evaluate the string as Python code, using the
Declarative class-registry as a namespace. This allows the lookup of
related classes to be automatic via their string name, and removes the
need for related classes to be imported into the local module space
before the dependent classes have been declared. It is still required
that the modules in which these related classes appear are imported
anywhere in the application at some point before the related mappings
are actually used, else a lookup error will be raised when the
:func:`_orm.relationship`
attempts to resolve the string reference to the
related class. An example of a string- resolved class is as
follows::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
children = relationship("Child", order_by="Child.id")
.. seealso::
:ref:`relationship_config_toplevel` - Full introductory and
reference documentation for :func:`_orm.relationship`.
:ref:`orm_tutorial_relationship` - ORM tutorial introduction.
:param argument:
A mapped class, or actual :class:`_orm.Mapper` instance,
representing
the target of the relationship.
:paramref:`_orm.relationship.argument`
may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a string name when using Declarative.
.. warning:: Prior to SQLAlchemy 1.3.16, this value is interpreted
using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. versionchanged 1.3.16::
The string evaluation of the main "argument" no longer accepts an
open ended Python expression, instead only accepting a string
class name or dotted package-qualified name.
.. seealso::
:ref:`declarative_configuring_relationships` - further detail
on relationship configuration when using Declarative.
:param secondary:
For a many-to-many relationship, specifies the intermediary
table, and is typically an instance of :class:`_schema.Table`.
In less common circumstances, the argument may also be specified
as an :class:`_expression.Alias` construct, or even a
:class:`_expression.Join` construct.
:paramref:`_orm.relationship.secondary` may
also be passed as a callable function which is evaluated at
mapper initialization time. When using Declarative, it may also
be a string argument noting the name of a :class:`_schema.Table`
that is
present in the :class:`_schema.MetaData`
collection associated with the
parent-mapped :class:`_schema.Table`.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
The :paramref:`_orm.relationship.secondary` keyword argument is
typically applied in the case where the intermediary
:class:`_schema.Table`
is not otherwise expressed in any direct class mapping. If the
"secondary" table is also explicitly mapped elsewhere (e.g. as in
:ref:`association_pattern`), one should consider applying the
:paramref:`_orm.relationship.viewonly` flag so that this
:func:`_orm.relationship`
is not used for persistence operations which
may conflict with those of the association object pattern.
.. seealso::
:ref:`relationships_many_to_many` - Reference example of "many
to many".
:ref:`orm_tutorial_many_to_many` - ORM tutorial introduction to
many-to-many relationships.
:ref:`self_referential_many_to_many` - Specifics on using
many-to-many in a self-referential case.
:ref:`declarative_many_to_many` - Additional options when using
Declarative.
:ref:`association_pattern` - an alternative to
:paramref:`_orm.relationship.secondary`
when composing association
table relationships, allowing additional attributes to be
specified on the association table.
:ref:`composite_secondary_join` - a lesser-used pattern which
in some cases can enable complex :func:`_orm.relationship` SQL
conditions to be used.
.. versionadded:: 0.9.2 :paramref:`_orm.relationship.secondary`
works
more effectively when referring to a :class:`_expression.Join`
instance.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
many-to-one reference should be loaded when replaced, if
not already loaded. Normally, history tracking logic for
simple many-to-ones only needs to be aware of the "new"
value in order to perform a flush. This flag is available
for applications that make use of
:func:`.attributes.get_history` which also need to know
the "previous" value of the attribute.
:param backref:
Indicates the string name of a property to be placed on the related
mapper's class that will handle this relationship in the other
direction. The other property will be created automatically
when the mappers are configured. Can also be passed as a
:func:`.backref` object to control the configuration of the
new relationship.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`_orm.relationship.back_populates` - alternative form
of backref specification.
:func:`.backref` - allows control over :func:`_orm.relationship`
configuration when using :paramref:`_orm.relationship.backref`.
:param back_populates:
Takes a string name and has the same meaning as
:paramref:`_orm.relationship.backref`, except the complementing
property is **not** created automatically, and instead must be
configured explicitly on the other mapper. The complementing
property should also indicate
:paramref:`_orm.relationship.back_populates` to this relationship to
ensure proper functioning.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`_orm.relationship.backref` - alternative form
of backref specification.
:param bake_queries=True:
Use the :class:`.BakedQuery` cache to cache the construction of SQL
used in lazy loads. True by default. Set to False if the
join condition of the relationship has unusual features that
might not respond well to statement caching.
.. versionchanged:: 1.2
"Baked" loading is the default implementation for the "select",
a.k.a. "lazy" loading strategy for relationships.
.. versionadded:: 1.0.0
.. seealso::
:ref:`baked_toplevel`
:param cascade:
A comma-separated list of cascade rules which determines how
Session operations should be "cascaded" from parent to child.
This defaults to ``False``, which means the default cascade
should be used - this default cascade is ``"save-update, merge"``.
The available cascades are ``save-update``, ``merge``,
``expunge``, ``delete``, ``delete-orphan``, and ``refresh-expire``.
An additional option, ``all`` indicates shorthand for
``"save-update, merge, refresh-expire,
expunge, delete"``, and is often used as in ``"all, delete-orphan"``
to indicate that related objects should follow along with the
parent object in all cases, and be deleted when de-associated.
.. seealso::
:ref:`unitofwork_cascades` - Full detail on each of the available
cascade options.
:ref:`tutorial_delete_cascade` - Tutorial example describing
a delete cascade.
:param cascade_backrefs=True:
A boolean value indicating if the ``save-update`` cascade should
operate along an assignment event intercepted by a backref.
When set to ``False``, the attribute managed by this relationship
will not cascade an incoming transient object into the session of a
persistent parent, if the event is received via backref.
.. seealso::
:ref:`backref_cascade` - Full discussion and examples on how
the :paramref:`_orm.relationship.cascade_backrefs` option is used.
:param collection_class:
A class or callable that returns a new list-holding object. will
be used in place of a plain list for storing elements.
.. seealso::
:ref:`custom_collections` - Introductory documentation and
examples.
:param comparator_factory:
A class which extends :class:`.RelationshipProperty.Comparator`
which provides custom SQL clause generation for comparison
operations.
.. seealso::
:class:`.PropComparator` - some detail on redefining comparators
at this level.
:ref:`custom_comparators` - Brief intro to this feature.
:param distinct_target_key=None:
Indicate if a "subquery" eager load should apply the DISTINCT
keyword to the innermost SELECT statement. When left as ``None``,
the DISTINCT keyword will be applied in those cases when the target
columns do not comprise the full primary key of the target table.
When set to ``True``, the DISTINCT keyword is applied to the
innermost SELECT unconditionally.
It may be desirable to set this flag to False when the DISTINCT is
reducing performance of the innermost subquery beyond that of what
duplicate innermost rows may be causing.
.. versionchanged:: 0.9.0 -
:paramref:`_orm.relationship.distinct_target_key` now defaults to
``None``, so that the feature enables itself automatically for
those cases where the innermost query targets a non-unique
key.
.. seealso::
:ref:`loading_toplevel` - includes an introduction to subquery
eager loading.
:param doc:
Docstring which will be applied to the resulting descriptor.
:param extension:
an :class:`.AttributeExtension` instance, or list of extensions,
which will be prepended to the list of attribute listeners for
the resulting descriptor placed on the class.
:param foreign_keys:
A list of columns which are to be used as "foreign key"
columns, or columns which refer to the value in a remote
column, within the context of this :func:`_orm.relationship`
object's :paramref:`_orm.relationship.primaryjoin` condition.
That is, if the :paramref:`_orm.relationship.primaryjoin`
condition of this :func:`_orm.relationship` is ``a.id ==
b.a_id``, and the values in ``b.a_id`` are required to be
present in ``a.id``, then the "foreign key" column of this
:func:`_orm.relationship` is ``b.a_id``.
In normal cases, the :paramref:`_orm.relationship.foreign_keys`
parameter is **not required.** :func:`_orm.relationship` will
automatically determine which columns in the
:paramref:`_orm.relationship.primaryjoin` condition are to be
considered "foreign key" columns based on those
:class:`_schema.Column` objects that specify
:class:`_schema.ForeignKey`,
or are otherwise listed as referencing columns in a
:class:`_schema.ForeignKeyConstraint` construct.
:paramref:`_orm.relationship.foreign_keys` is only needed when:
1. There is more than one way to construct a join from the local
table to the remote table, as there are multiple foreign key
references present. Setting ``foreign_keys`` will limit the
:func:`_orm.relationship`
to consider just those columns specified
here as "foreign".
2. The :class:`_schema.Table` being mapped does not actually have
:class:`_schema.ForeignKey` or
:class:`_schema.ForeignKeyConstraint`
constructs present, often because the table
was reflected from a database that does not support foreign key
reflection (MySQL MyISAM).
3. The :paramref:`_orm.relationship.primaryjoin`
argument is used to
construct a non-standard join condition, which makes use of
columns or expressions that do not normally refer to their
"parent" column, such as a join condition expressed by a
complex comparison using a SQL function.
The :func:`_orm.relationship` construct will raise informative
error messages that suggest the use of the
:paramref:`_orm.relationship.foreign_keys` parameter when
presented with an ambiguous condition. In typical cases,
if :func:`_orm.relationship` doesn't raise any exceptions, the
:paramref:`_orm.relationship.foreign_keys` parameter is usually
not needed.
:paramref:`_orm.relationship.foreign_keys` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`relationship_foreign_keys`
:ref:`relationship_custom_foreign`
:func:`.foreign` - allows direct annotation of the "foreign"
columns within a :paramref:`_orm.relationship.primaryjoin`
condition.
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
:param innerjoin=False:
When ``True``, joined eager loads will use an inner join to join
against related tables instead of an outer join. The purpose
of this option is generally one of performance, as inner joins
generally perform better than outer joins.
This flag can be set to ``True`` when the relationship references an
object via many-to-one using local foreign keys that are not
nullable, or when the reference is one-to-one or a collection that
is guaranteed to have one or at least one entry.
The option supports the same "nested" and "unnested" options as
that of :paramref:`_orm.joinedload.innerjoin`. See that flag
for details on nested / unnested behaviors.
.. seealso::
:paramref:`_orm.joinedload.innerjoin` - the option as specified by
loader option, including detail on nesting behavior.
:ref:`what_kind_of_loading` - Discussion of some details of
various loader options.
:param join_depth:
When non-``None``, an integer value indicating how many levels
deep "eager" loaders should join on a self-referring or cyclical
relationship. The number counts how many times the same Mapper
shall be present in the loading condition along a particular join
branch. When left at its default of ``None``, eager loaders
will stop chaining when they encounter a the same target mapper
which is already higher up in the chain. This option applies
both to joined- and subquery- eager loaders.
.. seealso::
:ref:`self_referential_eager_loading` - Introductory documentation
and examples.
:param lazy='select': specifies
How the related items should be loaded. Default value is
``select``. Values include:
* ``select`` - items should be loaded lazily when the property is
first accessed, using a separate SELECT statement, or identity map
fetch for simple many-to-one references.
* ``immediate`` - items should be loaded as the parents are loaded,
using a separate SELECT statement, or identity map fetch for
simple many-to-one references.
* ``joined`` - items should be loaded "eagerly" in the same query as
that of the parent, using a JOIN or LEFT OUTER JOIN. Whether
the join is "outer" or not is determined by the
:paramref:`_orm.relationship.innerjoin` parameter.
* ``subquery`` - items should be loaded "eagerly" as the parents are
loaded, using one additional SQL statement, which issues a JOIN to
a subquery of the original statement, for each collection
requested.
* ``selectin`` - items should be loaded "eagerly" as the parents
are loaded, using one or more additional SQL statements, which
issues a JOIN to the immediate parent object, specifying primary
key identifiers using an IN clause.
.. versionadded:: 1.2
* ``noload`` - no loading should occur at any time. This is to
support "write-only" attributes, or attributes which are
populated in some manner specific to the application.
* ``raise`` - lazy loading is disallowed; accessing
the attribute, if its value were not already loaded via eager
loading, will raise an :exc:`~sqlalchemy.exc.InvalidRequestError`.
This strategy can be used when objects are to be detached from
their attached :class:`.Session` after they are loaded.
.. versionadded:: 1.1
* ``raise_on_sql`` - lazy loading that emits SQL is disallowed;
accessing the attribute, if its value were not already loaded via
eager loading, will raise an
:exc:`~sqlalchemy.exc.InvalidRequestError`, **if the lazy load
needs to emit SQL**. If the lazy load can pull the related value
from the identity map or determine that it should be None, the
value is loaded. This strategy can be used when objects will
remain associated with the attached :class:`.Session`, however
additional SELECT statements should be blocked.
.. versionadded:: 1.1
* ``dynamic`` - the attribute will return a pre-configured
:class:`_query.Query` object for all read
operations, onto which further filtering operations can be
applied before iterating the results. See
the section :ref:`dynamic_relationship` for more details.
* True - a synonym for 'select'
* False - a synonym for 'joined'
* None - a synonym for 'noload'
.. seealso::
:doc:`/orm/loading_relationships` - Full documentation on
relationship loader configuration.
:ref:`dynamic_relationship` - detail on the ``dynamic`` option.
:ref:`collections_noload_raiseload` - notes on "noload" and "raise"
:param load_on_pending=False:
Indicates loading behavior for transient or pending parent objects.
When set to ``True``, causes the lazy-loader to
issue a query for a parent object that is not persistent, meaning it
has never been flushed. This may take effect for a pending object
when autoflush is disabled, or for a transient object that has been
"attached" to a :class:`.Session` but is not part of its pending
collection.
The :paramref:`_orm.relationship.load_on_pending`
flag does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before a flush proceeds.
This flag is not not intended for general use.
.. seealso::
:meth:`.Session.enable_relationship_loading` - this method
establishes "load on pending" behavior for the whole object, and
also allows loading on objects that remain transient or
detached.
:param order_by:
Indicates the ordering that should be applied when loading these
items. :paramref:`_orm.relationship.order_by`
is expected to refer to
one of the :class:`_schema.Column`
objects to which the target class is
mapped, or the attribute itself bound to the target class which
refers to the column.
:paramref:`_orm.relationship.order_by`
may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a Python-evaluable string when using Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
:param passive_deletes=False:
Indicates loading behavior during delete operations.
A value of True indicates that unloaded child items should not
be loaded during a delete operation on the parent. Normally,
when a parent item is deleted, all child items are loaded so
that they can either be marked as deleted, or have their
foreign key to the parent set to NULL. Marking this flag as
True usually implies an ON DELETE <CASCADE|SET NULL> rule is in
place which will handle updating/deleting child rows on the
database side.
Additionally, setting the flag to the string value 'all' will
disable the "nulling out" of the child foreign keys, when the parent
object is deleted and there is no delete or delete-orphan cascade
enabled. This is typically used when a triggering or error raise
scenario is in place on the database side. Note that the foreign
key attributes on in-session child objects will not be changed after
a flush occurs so this is a very special use-case setting.
Additionally, the "nulling out" will still occur if the child
object is de-associated with the parent.
.. seealso::
:ref:`passive_deletes` - Introductory documentation
and examples.
:param passive_updates=True:
Indicates the persistence behavior to take when a referenced
primary key value changes in place, indicating that the referencing
foreign key columns will also need their value changed.
When True, it is assumed that ``ON UPDATE CASCADE`` is configured on
the foreign key in the database, and that the database will
handle propagation of an UPDATE from a source column to
dependent rows. When False, the SQLAlchemy
:func:`_orm.relationship`
construct will attempt to emit its own UPDATE statements to
modify related targets. However note that SQLAlchemy **cannot**
emit an UPDATE for more than one level of cascade. Also,
setting this flag to False is not compatible in the case where
the database is in fact enforcing referential integrity, unless
those constraints are explicitly "deferred", if the target backend
supports it.
It is highly advised that an application which is employing
mutable primary keys keeps ``passive_updates`` set to True,
and instead uses the referential integrity features of the database
itself in order to handle the change efficiently and fully.
.. seealso::
:ref:`passive_updates` - Introductory documentation and
examples.
:paramref:`.mapper.passive_updates` - a similar flag which
takes effect for joined-table inheritance mappings.
:param post_update:
This indicates that the relationship should be handled by a
second UPDATE statement after an INSERT or before a
DELETE. Currently, it also will issue an UPDATE after the
instance was UPDATEd as well, although this technically should
be improved. This flag is used to handle saving bi-directional
dependencies between two individual rows (i.e. each row
references the other), where it would otherwise be impossible to
INSERT or DELETE both rows fully since one row exists before the
other. Use this flag when a particular mapping arrangement will
incur two rows that are dependent on each other, such as a table
that has a one-to-many relationship to a set of child rows, and
also has a column that references a single child row within that
list (i.e. both tables contain a foreign key to each other). If
a flush operation returns an error that a "cyclical
dependency" was detected, this is a cue that you might want to
use :paramref:`_orm.relationship.post_update` to "break" the cycle.
.. seealso::
:ref:`post_update` - Introductory documentation and examples.
:param primaryjoin:
A SQL expression that will be used as the primary
join of the child object against the parent object, or in a
many-to-many relationship the join of the parent object to the
association table. By default, this value is computed based on the
foreign key relationships of the parent and child tables (or
association table).
:paramref:`_orm.relationship.primaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`relationship_primaryjoin`
:param remote_side:
Used for self-referential relationships, indicates the column or
list of columns that form the "remote side" of the relationship.
:paramref:`_orm.relationship.remote_side` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`self_referential` - in-depth explanation of how
:paramref:`_orm.relationship.remote_side`
is used to configure self-referential relationships.
:func:`.remote` - an annotation function that accomplishes the
same purpose as :paramref:`_orm.relationship.remote_side`,
typically
when a custom :paramref:`_orm.relationship.primaryjoin` condition
is used.
:param query_class:
A :class:`_query.Query`
subclass that will be used as the base of the
"appender query" returned by a "dynamic" relationship, that
is, a relationship that specifies ``lazy="dynamic"`` or was
otherwise constructed using the :func:`_orm.dynamic_loader`
function.
.. seealso::
:ref:`dynamic_relationship` - Introduction to "dynamic"
relationship loaders.
:param secondaryjoin:
A SQL expression that will be used as the join of
an association table to the child object. By default, this value is
computed based on the foreign key relationships of the association
and child tables.
:paramref:`_orm.relationship.secondaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`relationship_primaryjoin`
:param single_parent:
When True, installs a validator which will prevent objects
from being associated with more than one parent at a time.
This is used for many-to-one or many-to-many relationships that
should be treated either as one-to-one or one-to-many. Its usage
is optional, except for :func:`_orm.relationship` constructs which
are many-to-one or many-to-many and also
specify the ``delete-orphan`` cascade option. The
:func:`_orm.relationship` construct itself will raise an error
instructing when this option is required.
.. seealso::
:ref:`unitofwork_cascades` - includes detail on when the
:paramref:`_orm.relationship.single_parent`
flag may be appropriate.
:param uselist:
A boolean that indicates if this property should be loaded as a
list or a scalar. In most cases, this value is determined
automatically by :func:`_orm.relationship` at mapper configuration
time, based on the type and direction
of the relationship - one to many forms a list, many to one
forms a scalar, many to many is a list. If a scalar is desired
where normally a list would be present, such as a bi-directional
one-to-one relationship, set :paramref:`_orm.relationship.uselist`
to
False.
The :paramref:`_orm.relationship.uselist`
flag is also available on an
existing :func:`_orm.relationship`
construct as a read-only attribute,
which can be used to determine if this :func:`_orm.relationship`
deals
with collections or scalar attributes::
>>> User.addresses.property.uselist
True
.. seealso::
:ref:`relationships_one_to_one` - Introduction to the "one to
one" relationship pattern, which is typically when the
:paramref:`_orm.relationship.uselist` flag is needed.
:param viewonly=False:
When set to ``True``, the relationship is used only for loading
objects, and not for any persistence operation. A
:func:`_orm.relationship` which specifies
:paramref:`_orm.relationship.viewonly` can work
with a wider range of SQL operations within the
:paramref:`_orm.relationship.primaryjoin` condition, including
operations that feature the use of a variety of comparison operators
as well as SQL functions such as :func:`_expression.cast`. The
:paramref:`_orm.relationship.viewonly`
flag is also of general use when defining any kind of
:func:`_orm.relationship` that doesn't represent
the full set of related objects, to prevent modifications of the
collection from resulting in persistence operations.
When using the :paramref:`_orm.relationship.viewonly` flag in
conjunction with backrefs, the
:paramref:`_orm.relationship.sync_backref` should be set to False;
this indicates that the backref should not actually populate this
relationship with data when changes occur on the other side; as this
is a viewonly relationship, it cannot accommodate changes in state
correctly as these will not be persisted.
.. versionadded:: 1.3.17 - the
:paramref:`_orm.relationship.sync_backref`
flag set to False is required when using viewonly in conjunction
with backrefs. A warning is emitted when this flag is not set.
.. seealso::
:paramref:`_orm.relationship.sync_backref`
:param sync_backref:
A boolean that enables the events used to synchronize the in-Python
attributes when this relationship is target of either
:paramref:`_orm.relationship.backref` or
:paramref:`_orm.relationship.back_populates`.
Defaults to ``None``, which indicates that an automatic value should
be selected based on the value of the
:paramref:`_orm.relationship.viewonly` flag. When left at its
default, changes in state for writable relationships will be
back-populated normally. For viewonly relationships, a warning is
emitted unless the flag is set to ``False``.
.. versionadded:: 1.3.17
.. seealso::
:paramref:`_orm.relationship.viewonly`
:param omit_join:
Allows manual control over the "selectin" automatic join
optimization. Set to ``False`` to disable the "omit join" feature
added in SQLAlchemy 1.3; or leave as ``None`` to leave automatic
optimization in place.
.. note:: This flag may only be set to ``False``. It is not
necessary to set it to ``True`` as the "omit_join" optimization is
automatically detected; if it is not detected, then the
optimization is not supported.
.. versionchanged:: 1.3.11 setting ``omit_join`` to True will now
emit a warning as this was not the intended use of this flag.
.. versionadded:: 1.3
"""
super(RelationshipProperty, self).__init__()
self.uselist = uselist
self.argument = argument
self.secondary = secondary
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.post_update = post_update
self.direction = None
self.viewonly = viewonly
if viewonly:
self._warn_for_persistence_only_flags(
passive_deletes=passive_deletes,
passive_updates=passive_updates,
enable_typechecks=enable_typechecks,
active_history=active_history,
cascade_backrefs=cascade_backrefs,
)
if viewonly and sync_backref:
raise sa_exc.ArgumentError(
"sync_backref and viewonly cannot both be True"
)
self.sync_backref = sync_backref
self.lazy = lazy
self.single_parent = single_parent
self._user_defined_foreign_keys = foreign_keys
self.collection_class = collection_class
self.passive_deletes = passive_deletes
self.cascade_backrefs = cascade_backrefs
self.passive_updates = passive_updates
self.remote_side = remote_side
self.enable_typechecks = enable_typechecks
self.query_class = query_class
self.innerjoin = innerjoin
self.distinct_target_key = distinct_target_key
self.doc = doc
self.active_history = active_history
self.join_depth = join_depth
if omit_join:
util.warn(
"setting omit_join to True is not supported; selectin "
"loading of this relationship may not work correctly if this "
"flag is set explicitly. omit_join optimization is "
"automatically detected for conditions under which it is "
"supported."
)
self.omit_join = omit_join
self.local_remote_pairs = _local_remote_pairs
self.extension = extension
self.bake_queries = bake_queries
self.load_on_pending = load_on_pending
self.comparator_factory = (
comparator_factory or RelationshipProperty.Comparator
)
self.comparator = self.comparator_factory(self, None)
util.set_creation_order(self)
if info is not None:
self.info = info
self.strategy_key = (("lazy", self.lazy),)
self._reverse_property = set()
if cascade is not False:
self.cascade = cascade
else:
self._set_cascade("save-update, merge", warn=False)
self.order_by = order_by
self.back_populates = back_populates
if self.back_populates:
if backref:
raise sa_exc.ArgumentError(
"backref and back_populates keyword arguments "
"are mutually exclusive"
)
self.backref = None
else:
self.backref = backref
def _warn_for_persistence_only_flags(self, **kw):
for k, v in kw.items():
if v != self._persistence_only[k]:
# we are warning here rather than warn deprecated as this is a
# configuration mistake, and Python shows regular warnings more
# aggressively than deprecation warnings by default. Unlike the
# case of setting viewonly with cascade, the settings being
# warned about here are not actively doing the wrong thing
# against viewonly=True, so it is not as urgent to have these
# raise an error.
util.warn(
"Setting %s on relationship() while also "
"setting viewonly=True does not make sense, as a "
"viewonly=True relationship does not perform persistence "
"operations. This configuration may raise an error "
"in a future release." % (k,)
)
def instrument_class(self, mapper):
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.RelationshipProperty` attributes.
See the documentation for :class:`.PropComparator` for a brief
overview of ORM level operator definition.
.. seealso::
:class:`.PropComparator`
:class:`.ColumnProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
_of_type = None
def __init__(
self, prop, parentmapper, adapt_to_entity=None, of_type=None
):
"""Construction of :class:`.RelationshipProperty.Comparator`
is internal to the ORM's attribute mechanics.
"""
self.prop = prop
self._parententity = parentmapper
self._adapt_to_entity = adapt_to_entity
if of_type:
self._of_type = of_type
def adapt_to_entity(self, adapt_to_entity):
return self.__class__(
self.property,
self._parententity,
adapt_to_entity=adapt_to_entity,
of_type=self._of_type,
)
@util.memoized_property
def entity(self):
"""The target entity referred to by this
:class:`.RelationshipProperty.Comparator`.
This is either a :class:`_orm.Mapper` or :class:`.AliasedInsp`
object.
This is the "target" or "remote" side of the
:func:`_orm.relationship`.
"""
return self.property.entity
@util.memoized_property
def mapper(self):
"""The target :class:`_orm.Mapper` referred to by this
:class:`.RelationshipProperty.Comparator`.
This is the "target" or "remote" side of the
:func:`_orm.relationship`.
"""
return self.property.mapper
@util.memoized_property
def _parententity(self):
return self.property.parent
def _source_selectable(self):
if self._adapt_to_entity:
return self._adapt_to_entity.selectable
else:
return self.property.parent._with_polymorphic_selectable
def __clause_element__(self):
adapt_from = self._source_selectable()
if self._of_type:
of_type_mapper = inspect(self._of_type).mapper
else:
of_type_mapper = None
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = self.property._create_joins(
source_selectable=adapt_from,
source_polymorphic=True,
of_type_mapper=of_type_mapper,
alias_secondary=True,
)
if sj is not None:
return pj & sj
else:
return pj
def of_type(self, cls):
r"""Redefine this object in terms of a polymorphic subclass.
See :meth:`.PropComparator.of_type` for an example.
"""
return RelationshipProperty.Comparator(
self.property,
self._parententity,
adapt_to_entity=self._adapt_to_entity,
of_type=cls,
)
def in_(self, other):
"""Produce an IN clause - this is not implemented
for :func:`_orm.relationship`-based attributes at this time.
"""
raise NotImplementedError(
"in_() not yet supported for "
"relationships. For a simple "
"many-to-one, use in_() against "
"the set of foreign key values."
)
__hash__ = None
def __eq__(self, other):
"""Implement the ``==`` operator.
In a many-to-one context, such as::
MyClass.some_prop == <some object>
this will typically produce a
clause such as::
mytable.related_id == <some id>
Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use :meth:`~.RelationshipProperty.Comparator.contains`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce a NOT EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction in [ONETOMANY, MANYTOMANY]:
return ~self._criterion_exists()
else:
return _orm_annotate(
self.property._optimized_compare(
None, adapt_source=self.adapter
)
)
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection to an object or collection; "
"use contains() to test for membership."
)
else:
return _orm_annotate(
self.property._optimized_compare(
other, adapt_source=self.adapter
)
)
def _criterion_exists(self, criterion=None, **kwargs):
if getattr(self, "_of_type", None):
info = inspect(self._of_type)
target_mapper, to_selectable, is_aliased_class = (
info.mapper,
info.selectable,
info.is_aliased_class,
)
if self.property._is_self_referential and not is_aliased_class:
to_selectable = to_selectable.alias()
single_crit = target_mapper._single_table_criterion
if single_crit is not None:
if criterion is not None:
criterion = single_crit & criterion
else:
criterion = single_crit
else:
is_aliased_class = False
to_selectable = None
if self.adapter:
source_selectable = self._source_selectable()
else:
source_selectable = None
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = self.property._create_joins(
dest_polymorphic=True,
dest_selectable=to_selectable,
source_selectable=source_selectable,
)
for k in kwargs:
crit = getattr(self.property.mapper.class_, k) == kwargs[k]
if criterion is None:
criterion = crit
else:
criterion = criterion & crit
# annotate the *local* side of the join condition, in the case
# of pj + sj this is the full primaryjoin, in the case of just
# pj its the local side of the primaryjoin.
if sj is not None:
j = _orm_annotate(pj) & sj
else:
j = _orm_annotate(pj, exclude=self.property.remote_side)
if (
criterion is not None
and target_adapter
and not is_aliased_class
):
# limit this adapter to annotated only?
criterion = target_adapter.traverse(criterion)
# only have the "joined left side" of what we
# return be subject to Query adaption. The right
# side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if criterion is not None:
criterion = criterion._annotate(
{"no_replacement_traverse": True}
)
crit = j & sql.True_._ifnone(criterion)
if secondary is not None:
ex = sql.exists(
[1], crit, from_obj=[dest, secondary]
).correlate_except(dest, secondary)
else:
ex = sql.exists([1], crit, from_obj=dest).correlate_except(
dest
)
return ex
def any(self, criterion=None, **kwargs):
"""Produce an expression that tests a collection against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.any(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.any` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.any` is particularly
useful for testing for empty collections::
session.query(MyClass).filter(
~MyClass.somereference.any()
)
will produce::
SELECT * FROM my_table WHERE
NOT EXISTS (SELECT 1 FROM related WHERE
related.my_id=my_table.id)
:meth:`~.RelationshipProperty.Comparator.any` is only
valid for collections, i.e. a :func:`_orm.relationship`
that has ``uselist=True``. For scalar references,
use :meth:`~.RelationshipProperty.Comparator.has`.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'any()' not implemented for scalar "
"attributes. Use has()."
)
return self._criterion_exists(criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Produce an expression that tests a scalar reference against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.has(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE
related.id==my_table.related_id AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.has` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.has` is only
valid for scalar references, i.e. a :func:`_orm.relationship`
that has ``uselist=False``. For collection references,
use :meth:`~.RelationshipProperty.Comparator.any`.
"""
if self.property.uselist:
raise sa_exc.InvalidRequestError(
"'has()' not implemented for collections. " "Use any()."
)
return self._criterion_exists(criterion, **kwargs)
def contains(self, other, **kwargs):
"""Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.RelationshipProperty.Comparator.contains` is
only valid for a collection, i.e. a
:func:`_orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
Produces a clause like::
mytable.id == <some id>
Where ``<some id>`` is the value of the foreign key
attribute on ``other`` which refers to the primary
key of its parent object. From this it follows that
:meth:`~.RelationshipProperty.Comparator.contains` is
very useful when used with simple one-to-many
operations.
For many-to-many operations, the behavior of
:meth:`~.RelationshipProperty.Comparator.contains`
has more caveats. The association table will be
rendered in the statement, producing an "implicit"
join, that is, includes multiple tables in the FROM
clause which are equated in the WHERE clause::
query(MyClass).filter(MyClass.contains(other))
Produces a query like::
SELECT * FROM my_table, my_association_table AS
my_association_table_1 WHERE
my_table.id = my_association_table_1.parent_id
AND my_association_table_1.child_id = <some id>
Where ``<some id>`` would be the primary key of
``other``. From the above, it is clear that
:meth:`~.RelationshipProperty.Comparator.contains`
will **not** work with many-to-many collections when
used in queries that move beyond simple AND
conjunctions, such as multiple
:meth:`~.RelationshipProperty.Comparator.contains`
expressions joined by OR. In such cases subqueries or
explicit "outer joins" will need to be used instead.
See :meth:`~.RelationshipProperty.Comparator.any` for
a less-performant alternative using EXISTS, or refer
to :meth:`_query.Query.outerjoin`
as well as :ref:`ormtutorial_joins`
for more details on constructing outer joins.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'contains' not implemented for scalar "
"attributes. Use =="
)
clause = self.property._optimized_compare(
other, adapt_source=self.adapter
)
if self.property.secondaryjoin is not None:
clause.negation_clause = self.__negated_contains_or_equals(
other
)
return clause
def __negated_contains_or_equals(self, other):
if self.property.direction == MANYTOONE:
state = attributes.instance_state(other)
def state_bindparam(x, state, col):
dict_ = state.dict
return sql.bindparam(
x,
unique=True,
callable_=self.property._get_attr_w_warn_on_none(
self.property.mapper, state, dict_, col
),
)
def adapt(col):
if self.adapter:
return self.adapter(col)
else:
return col
if self.property._use_get:
return sql.and_(
*[
sql.or_(
adapt(x)
!= state_bindparam(adapt(x), state, y),
adapt(x) == None,
)
for (x, y) in self.property.local_remote_pairs
]
)
criterion = sql.and_(
*[
x == y
for (x, y) in zip(
self.property.mapper.primary_key,
self.property.mapper.primary_key_from_instance(other),
)
]
)
return ~self._criterion_exists(criterion)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a many-to-one context, such as::
MyClass.some_prop != <some object>
This will typically produce a clause such as::
mytable.related_id != <some id>
Where ``<some id>`` is the primary key of the
given object.
The ``!=`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use
:meth:`~.RelationshipProperty.Comparator.contains`
in conjunction with :func:`_expression.not_`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` in
conjunction with :func:`_expression.not_` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce an EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction == MANYTOONE:
return _orm_annotate(
~self.property._optimized_compare(
None, adapt_source=self.adapter
)
)
else:
return self._criterion_exists()
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection"
" to an object or collection; use "
"contains() to test for membership."
)
else:
return _orm_annotate(self.__negated_contains_or_equals(other))
@util.memoized_property
def property(self):
if mapperlib.Mapper._new_mappers:
mapperlib.Mapper._configure_all()
return self.prop
def _with_parent(self, instance, alias_secondary=True, from_entity=None):
assert instance is not None
adapt_source = None
if from_entity is not None:
insp = inspect(from_entity)
if insp.is_aliased_class:
adapt_source = insp._adapter.adapt_clause
return self._optimized_compare(
instance,
value_is_parent=True,
adapt_source=adapt_source,
alias_secondary=alias_secondary,
)
def _optimized_compare(
self,
state,
value_is_parent=False,
adapt_source=None,
alias_secondary=True,
):
if state is not None:
try:
state = inspect(state)
except sa_exc.NoInspectionAvailable:
state = None
if state is None or not getattr(state, "is_instance", False):
raise sa_exc.ArgumentError(
"Mapped instance expected for relationship "
"comparison to object. Classes, queries and other "
"SQL elements are not accepted in this context; for "
"comparison with a subquery, "
"use %s.has(**criteria)." % self
)
reverse_direction = not value_is_parent
if state is None:
return self._lazy_none_clause(
reverse_direction, adapt_source=adapt_source
)
if not reverse_direction:
criterion, bind_to_col = (
self._lazy_strategy._lazywhere,
self._lazy_strategy._bind_to_col,
)
else:
criterion, bind_to_col = (
self._lazy_strategy._rev_lazywhere,
self._lazy_strategy._rev_bind_to_col,
)
if reverse_direction:
mapper = self.mapper
else:
mapper = self.parent
dict_ = attributes.instance_dict(state.obj())
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
bindparam.callable = self._get_attr_w_warn_on_none(
mapper,
state,
dict_,
bind_to_col[bindparam._identifying_key],
)
if self.secondary is not None and alias_secondary:
criterion = ClauseAdapter(self.secondary.alias()).traverse(
criterion
)
criterion = visitors.cloned_traverse(
criterion, {}, {"bindparam": visit_bindparam}
)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _get_attr_w_warn_on_none(self, mapper, state, dict_, column):
"""Create the callable that is used in a many-to-one expression.
E.g.::
u1 = s.query(User).get(5)
expr = Address.user == u1
Above, the SQL should be "address.user_id = 5". The callable
returned by this method produces the value "5" based on the identity
of ``u1``.
"""
# in this callable, we're trying to thread the needle through
# a wide variety of scenarios, including:
#
# * the object hasn't been flushed yet and there's no value for
# the attribute as of yet
#
# * the object hasn't been flushed yet but it has a user-defined
# value
#
# * the object has a value but it's expired and not locally present
#
# * the object has a value but it's expired and not locally present,
# and the object is also detached
#
# * The object hadn't been flushed yet, there was no value, but
# later, the object has been expired and detached, and *now*
# they're trying to evaluate it
#
# * the object had a value, but it was changed to a new value, and
# then expired
#
# * the object had a value, but it was changed to a new value, and
# then expired, then the object was detached
#
# * the object has a user-set value, but it's None and we don't do
# the comparison correctly for that so warn
#
prop = mapper.get_property_by_column(column)
# by invoking this method, InstanceState will track the last known
# value for this key each time the attribute is to be expired.
# this feature was added explicitly for use in this method.
state._track_last_known_value(prop.key)
def _go():
last_known = to_return = state._last_known_values[prop.key]
existing_is_available = last_known is not attributes.NO_VALUE
# we support that the value may have changed. so here we
# try to get the most recent value including re-fetching.
# only if we can't get a value now due to detachment do we return
# the last known value
current_value = mapper._get_state_attr_by_column(
state,
dict_,
column,
passive=attributes.PASSIVE_OFF
if state.persistent
else attributes.PASSIVE_NO_FETCH ^ attributes.INIT_OK,
)
if current_value is attributes.NEVER_SET:
if not existing_is_available:
raise sa_exc.InvalidRequestError(
"Can't resolve value for column %s on object "
"%s; no value has been set for this column"
% (column, state_str(state))
)
elif current_value is attributes.PASSIVE_NO_RESULT:
if not existing_is_available:
raise sa_exc.InvalidRequestError(
"Can't resolve value for column %s on object "
"%s; the object is detached and the value was "
"expired" % (column, state_str(state))
)
else:
to_return = current_value
if to_return is None:
util.warn(
"Got None for value of column %s; this is unsupported "
"for a relationship comparison and will not "
"currently produce an IS comparison "
"(but may in a future release)" % column
)
return to_return
return _go
def _lazy_none_clause(self, reverse_direction=False, adapt_source=None):
if not reverse_direction:
criterion, bind_to_col = (
self._lazy_strategy._lazywhere,
self._lazy_strategy._bind_to_col,
)
else:
criterion, bind_to_col = (
self._lazy_strategy._rev_lazywhere,
self._lazy_strategy._rev_bind_to_col,
)
criterion = adapt_criterion_to_null(criterion, bind_to_col)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
def merge(
self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load,
_recursive,
_resolve_conflict_map,
):
if load:
for r in self._reverse_property:
if (source_state, r) in _recursive:
return
if "merge" not in self._cascade:
return
if self.key not in source_dict:
return
if self.uselist:
instances = source_state.get_impl(self.key).get(
source_state, source_dict
)
if hasattr(instances, "_sa_adapter"):
# convert collections to adapters to get a true iterator
instances = instances._sa_adapter
if load:
# for a full merge, pre-load the destination collection,
# so that individual _merge of each item pulls from identity
# map for those already present.
# also assumes CollectionAttributeImpl behavior of loading
# "old" list in any case
dest_state.get_impl(self.key).get(dest_state, dest_dict)
dest_list = []
for current in instances:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state,
current_dict,
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
if obj is not None:
dest_list.append(obj)
if not load:
coll = attributes.init_state_collection(
dest_state, dest_dict, self.key
)
for c in dest_list:
coll.append_without_event(c)
else:
dest_state.get_impl(self.key).set(
dest_state, dest_dict, dest_list, _adapt=False
)
else:
current = source_dict[self.key]
if current is not None:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state,
current_dict,
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
else:
obj = None
if not load:
dest_dict[self.key] = obj
else:
dest_state.get_impl(self.key).set(
dest_state, dest_dict, obj, None
)
def _value_as_iterable(
self, state, dict_, key, passive=attributes.PASSIVE_OFF
):
"""Return a list of tuples (state, obj) for the given
key.
returns an empty list if the value is None/empty/PASSIVE_NO_RESULT
"""
impl = state.manager[key].impl
x = impl.get(state, dict_, passive=passive)
if x is attributes.PASSIVE_NO_RESULT or x is None:
return []
elif hasattr(impl, "get_collection"):
return [
(attributes.instance_state(o), o)
for o in impl.get_collection(state, dict_, x, passive=passive)
]
else:
return [(attributes.instance_state(x), x)]
def cascade_iterator(
self, type_, state, dict_, visited_states, halt_on=None
):
# assert type_ in self._cascade
# only actively lazy load on the 'delete' cascade
if type_ != "delete" or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
if type_ == "save-update":
tuples = state.manager[self.key].impl.get_all_pending(state, dict_)
else:
tuples = self._value_as_iterable(
state, dict_, self.key, passive=passive
)
skip_pending = (
type_ == "refresh-expire" and "delete-orphan" not in self._cascade
)
for instance_state, c in tuples:
if instance_state in visited_states:
continue
if c is None:
# would like to emit a warning here, but
# would not be consistent with collection.append(None)
# current behavior of silently skipping.
# see [ticket:2229]
continue
instance_dict = attributes.instance_dict(c)
if halt_on and halt_on(instance_state):
continue
if skip_pending and not instance_state.key:
continue
instance_mapper = instance_state.manager.mapper
if not instance_mapper.isa(self.mapper.class_manager.mapper):
raise AssertionError(
"Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'"
% (self.key, self.parent.class_, c.__class__)
)
visited_states.add(instance_state)
yield c, instance_mapper, instance_state, instance_dict
@property
def _effective_sync_backref(self):
return self.sync_backref is not False
@staticmethod
def _check_sync_backref(rel_a, rel_b):
if rel_a.viewonly and rel_b.sync_backref:
raise sa_exc.InvalidRequestError(
"Relationship %s cannot specify sync_backref=True since %s "
"includes viewonly=True." % (rel_b, rel_a)
)
if rel_a.viewonly and rel_b.sync_backref is not False:
util.warn_limited(
"Setting backref / back_populates on relationship %s to refer "
"to viewonly relationship %s should include "
"sync_backref=False set on the %s relationship. ",
(rel_b, rel_a, rel_b),
)
def _add_reverse_property(self, key):
other = self.mapper.get_property(key, _configure_mappers=False)
# viewonly and sync_backref cases
# 1. self.viewonly==True and other.sync_backref==True -> error
# 2. self.viewonly==True and other.viewonly==False and
# other.sync_backref==None -> warn sync_backref=False, set to False
self._check_sync_backref(self, other)
# 3. other.viewonly==True and self.sync_backref==True -> error
# 4. other.viewonly==True and self.viewonly==False and
# self.sync_backref==None -> warn sync_backref=False, set to False
self._check_sync_backref(other, self)
self._reverse_property.add(other)
other._reverse_property.add(self)
if not other.mapper.common_parent(self.parent):
raise sa_exc.ArgumentError(
"reverse_property %r on "
"relationship %s references relationship %s, which "
"does not reference mapper %s"
% (key, self, other, self.parent)
)
if (
self.direction in (ONETOMANY, MANYTOONE)
and self.direction == other.direction
):
raise sa_exc.ArgumentError(
"%s and back-reference %s are "
"both of the same direction %r. Did you mean to "
"set remote_side on the many-to-one side ?"
% (other, self, self.direction)
)
@util.memoized_property
def entity(self): # type: () -> Union[AliasedInsp, Mapper]
"""Return the target mapped entity, which is an inspect() of the
class or aliased class tha is referred towards.
"""
if util.callable(self.argument) and not isinstance(
self.argument, (type, mapperlib.Mapper)
):
argument = self.argument()
else:
argument = self.argument
if isinstance(argument, type):
return mapperlib.class_mapper(argument, configure=False)
try:
entity = inspect(argument)
except sa_exc.NoInspectionAvailable:
pass
else:
if hasattr(entity, "mapper"):
return entity
raise sa_exc.ArgumentError(
"relationship '%s' expects "
"a class or a mapper argument (received: %s)"
% (self.key, type(argument))
)
@util.memoized_property
def mapper(self):
"""Return the targeted :class:`_orm.Mapper` for this
:class:`.RelationshipProperty`.
This is a lazy-initializing static attribute.
"""
return self.entity.mapper
def do_init(self):
self._check_conflicts()
self._process_dependent_arguments()
self._setup_join_conditions()
self._check_cascade_settings(self._cascade)
self._post_init()
self._generate_backref()
self._join_condition._warn_for_conflicting_sync_targets()
super(RelationshipProperty, self).do_init()
self._lazy_strategy = self._get_strategy((("lazy", "select"),))
def _process_dependent_arguments(self):
"""Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
"""
# accept callables for other attributes which may require
# deferred initialization. This technique is used
# by declarative "string configs" and some recipes.
for attr in (
"order_by",
"primaryjoin",
"secondaryjoin",
"secondary",
"_user_defined_foreign_keys",
"remote_side",
):
attr_value = getattr(self, attr)
if util.callable(attr_value):
setattr(self, attr, attr_value())
# remove "annotations" which are present if mapped class
# descriptors are used to create the join expression.
for attr in "primaryjoin", "secondaryjoin":
val = getattr(self, attr)
if val is not None:
setattr(
self,
attr,
_orm_deannotate(
expression._only_column_elements(val, attr)
),
)
# ensure expressions in self.order_by, foreign_keys,
# remote_side are all columns, not strings.
if self.order_by is not False and self.order_by is not None:
self.order_by = [
expression._only_column_elements(x, "order_by")
for x in util.to_list(self.order_by)
]
self._user_defined_foreign_keys = util.column_set(
expression._only_column_elements(x, "foreign_keys")
for x in util.to_column_set(self._user_defined_foreign_keys)
)
self.remote_side = util.column_set(
expression._only_column_elements(x, "remote_side")
for x in util.to_column_set(self.remote_side)
)
self.target = self.entity.persist_selectable
def _setup_join_conditions(self):
self._join_condition = jc = JoinCondition(
parent_persist_selectable=self.parent.persist_selectable,
child_persist_selectable=self.entity.persist_selectable,
parent_local_selectable=self.parent.local_table,
child_local_selectable=self.entity.local_table,
primaryjoin=self.primaryjoin,
secondary=self.secondary,
secondaryjoin=self.secondaryjoin,
parent_equivalents=self.parent._equivalent_columns,
child_equivalents=self.mapper._equivalent_columns,
consider_as_foreign_keys=self._user_defined_foreign_keys,
local_remote_pairs=self.local_remote_pairs,
remote_side=self.remote_side,
self_referential=self._is_self_referential,
prop=self,
support_sync=not self.viewonly,
can_be_synced_fn=self._columns_are_mapped,
)
self.primaryjoin = jc.primaryjoin
self.secondaryjoin = jc.secondaryjoin
self.direction = jc.direction
self.local_remote_pairs = jc.local_remote_pairs
self.remote_side = jc.remote_columns
self.local_columns = jc.local_columns
self.synchronize_pairs = jc.synchronize_pairs
self._calculated_foreign_keys = jc.foreign_key_columns
self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs
def _check_conflicts(self):
"""Test that this relationship is legal, warn about
inheritance conflicts."""
if self.parent.non_primary and not mapperlib.class_mapper(
self.parent.class_, configure=False
).has_property(self.key):
raise sa_exc.ArgumentError(
"Attempting to assign a new "
"relationship '%s' to a non-primary mapper on "
"class '%s'. New relationships can only be added "
"to the primary mapper, i.e. the very first mapper "
"created for class '%s' "
% (
self.key,
self.parent.class_.__name__,
self.parent.class_.__name__,
)
)
@property
def cascade(self):
"""Return the current cascade setting for this
:class:`.RelationshipProperty`.
"""
return self._cascade
@cascade.setter
def cascade(self, cascade):
self._set_cascade(cascade)
def _set_cascade(self, cascade, warn=True):
cascade = CascadeOptions(cascade)
if warn and self.viewonly:
non_viewonly = set(cascade).difference(
CascadeOptions._viewonly_cascades
)
if non_viewonly:
# we are warning here rather than warn deprecated as this
# setting actively does the wrong thing and Python shows
# regular warnings more aggressively than deprecation warnings
# by default. There's no other guard against setting active
# persistence cascades under viewonly=True so this will raise
# in 1.4.
util.warn(
'Cascade settings "%s" should not be combined with a '
"viewonly=True relationship. This configuration will "
"raise an error in version 1.4. Note that in versions "
"prior to 1.4, "
"these cascade settings may still produce a mutating "
"effect even though this relationship is marked as "
"viewonly=True." % (", ".join(sorted(non_viewonly)))
)
if "mapper" in self.__dict__:
self._check_cascade_settings(cascade)
self._cascade = cascade
if self._dependency_processor:
self._dependency_processor.cascade = cascade
def _check_cascade_settings(self, cascade):
if (
cascade.delete_orphan
and not self.single_parent
and (self.direction is MANYTOMANY or self.direction is MANYTOONE)
):
raise sa_exc.ArgumentError(
"For %(direction)s relationship %(rel)s, delete-orphan "
"cascade is normally "
'configured only on the "one" side of a one-to-many '
"relationship, "
'and not on the "many" side of a many-to-one or many-to-many '
"relationship. "
"To force this relationship to allow a particular "
'"%(relatedcls)s" object to be referred towards by only '
'a single "%(clsname)s" object at a time via the '
"%(rel)s relationship, which "
"would allow "
"delete-orphan cascade to take place in this direction, set "
"the single_parent=True flag."
% {
"rel": self,
"direction": "many-to-one"
if self.direction is MANYTOONE
else "many-to-many",
"clsname": self.parent.class_.__name__,
"relatedcls": self.mapper.class_.__name__,
},
code="bbf0",
)
if self.direction is MANYTOONE and self.passive_deletes:
util.warn(
"On %s, 'passive_deletes' is normally configured "
"on one-to-many, one-to-one, many-to-many "
"relationships only." % self
)
if self.passive_deletes == "all" and (
"delete" in cascade or "delete-orphan" in cascade
):
raise sa_exc.ArgumentError(
"On %s, can't set passive_deletes='all' in conjunction "
"with 'delete' or 'delete-orphan' cascade" % self
)
if cascade.delete_orphan:
self.mapper.primary_mapper()._delete_orphans.append(
(self.key, self.parent.class_)
)
def _persists_for(self, mapper):
"""Return True if this property will persist values on behalf
of the given mapper.
"""
return (
self.key in mapper.relationships
and mapper.relationships[self.key] is self
)
def _columns_are_mapped(self, *cols):
"""Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.Relationship`.
"""
for c in cols:
if (
self.secondary is not None
and self.secondary.c.contains_column(c)
):
continue
if not self.parent.persist_selectable.c.contains_column(
c
) and not self.target.c.contains_column(c):
return False
return True
def _generate_backref(self):
"""Interpret the 'backref' instruction to create a
:func:`_orm.relationship` complementary to this one."""
if self.parent.non_primary:
return
if self.backref is not None and not self.back_populates:
if isinstance(self.backref, util.string_types):
backref_key, kwargs = self.backref, {}
else:
backref_key, kwargs = self.backref
mapper = self.mapper.primary_mapper()
if not mapper.concrete:
check = set(mapper.iterate_to_root()).union(
mapper.self_and_descendants
)
for m in check:
if m.has_property(backref_key) and not m.concrete:
raise sa_exc.ArgumentError(
"Error creating backref "
"'%s' on relationship '%s': property of that "
"name exists on mapper '%s'"
% (backref_key, self, m)
)
# determine primaryjoin/secondaryjoin for the
# backref. Use the one we had, so that
# a custom join doesn't have to be specified in
# both directions.
if self.secondary is not None:
# for many to many, just switch primaryjoin/
# secondaryjoin. use the annotated
# pj/sj on the _join_condition.
pj = kwargs.pop(
"primaryjoin",
self._join_condition.secondaryjoin_minus_local,
)
sj = kwargs.pop(
"secondaryjoin",
self._join_condition.primaryjoin_minus_local,
)
else:
pj = kwargs.pop(
"primaryjoin",
self._join_condition.primaryjoin_reverse_remote,
)
sj = kwargs.pop("secondaryjoin", None)
if sj:
raise sa_exc.InvalidRequestError(
"Can't assign 'secondaryjoin' on a backref "
"against a non-secondary relationship."
)
foreign_keys = kwargs.pop(
"foreign_keys", self._user_defined_foreign_keys
)
parent = self.parent.primary_mapper()
kwargs.setdefault("viewonly", self.viewonly)
kwargs.setdefault("post_update", self.post_update)
kwargs.setdefault("passive_updates", self.passive_updates)
kwargs.setdefault("sync_backref", self.sync_backref)
self.back_populates = backref_key
relationship = RelationshipProperty(
parent,
self.secondary,
pj,
sj,
foreign_keys=foreign_keys,
back_populates=self.key,
**kwargs
)
mapper._configure_property(backref_key, relationship)
if self.back_populates:
self._add_reverse_property(self.back_populates)
def _post_init(self):
if self.uselist is None:
self.uselist = self.direction is not MANYTOONE
if not self.viewonly:
self._dependency_processor = (
dependency.DependencyProcessor.from_relationship
)(self)
@util.memoized_property
def _use_get(self):
"""memoize the 'use_get' attribute of this RelationshipLoader's
lazyloader."""
strategy = self._lazy_strategy
return strategy.use_get
@util.memoized_property
def _is_self_referential(self):
return self.mapper.common_parent(self.parent)
def _create_joins(
self,
source_polymorphic=False,
source_selectable=None,
dest_polymorphic=False,
dest_selectable=None,
of_type_mapper=None,
alias_secondary=False,
):
aliased = False
if alias_secondary and self.secondary is not None:
aliased = True
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
source_selectable = self.parent._with_polymorphic_selectable
if dest_selectable is None:
dest_selectable = self.entity.selectable
if dest_polymorphic and self.mapper.with_polymorphic:
aliased = True
if self._is_self_referential and source_selectable is None:
dest_selectable = dest_selectable.alias()
aliased = True
elif (
dest_selectable is not self.mapper._with_polymorphic_selectable
or self.mapper.with_polymorphic
):
aliased = True
dest_mapper = of_type_mapper or self.mapper
single_crit = dest_mapper._single_table_criterion
aliased = aliased or (
source_selectable is not None
and (
source_selectable
is not self.parent._with_polymorphic_selectable
or source_selectable._is_from_container # e.g an alias
)
)
(
primaryjoin,
secondaryjoin,
secondary,
target_adapter,
dest_selectable,
) = self._join_condition.join_targets(
source_selectable, dest_selectable, aliased, single_crit
)
if source_selectable is None:
source_selectable = self.parent.local_table
if dest_selectable is None:
dest_selectable = self.entity.local_table
return (
primaryjoin,
secondaryjoin,
source_selectable,
dest_selectable,
secondary,
target_adapter,
)
def _annotate_columns(element, annotations):
def clone(elem):
if isinstance(elem, expression.ColumnClause):
elem = elem._annotate(annotations.copy())
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
clone = None # remove gc cycles
return element
class JoinCondition(object):
def __init__(
self,
parent_persist_selectable,
child_persist_selectable,
parent_local_selectable,
child_local_selectable,
primaryjoin=None,
secondary=None,
secondaryjoin=None,
parent_equivalents=None,
child_equivalents=None,
consider_as_foreign_keys=None,
local_remote_pairs=None,
remote_side=None,
self_referential=False,
prop=None,
support_sync=True,
can_be_synced_fn=lambda *c: True,
):
self.parent_persist_selectable = parent_persist_selectable
self.parent_local_selectable = parent_local_selectable
self.child_persist_selectable = child_persist_selectable
self.child_local_selectable = child_local_selectable
self.parent_equivalents = parent_equivalents
self.child_equivalents = child_equivalents
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.secondary = secondary
self.consider_as_foreign_keys = consider_as_foreign_keys
self._local_remote_pairs = local_remote_pairs
self._remote_side = remote_side
self.prop = prop
self.self_referential = self_referential
self.support_sync = support_sync
self.can_be_synced_fn = can_be_synced_fn
self._determine_joins()
self._sanitize_joins()
self._annotate_fks()
self._annotate_remote()
self._annotate_local()
self._annotate_parentmapper()
self._setup_pairs()
self._check_foreign_cols(self.primaryjoin, True)
if self.secondaryjoin is not None:
self._check_foreign_cols(self.secondaryjoin, False)
self._determine_direction()
self._check_remote_side()
self._log_joins()
def _log_joins(self):
if self.prop is None:
return
log = self.prop.logger
log.info("%s setup primary join %s", self.prop, self.primaryjoin)
log.info("%s setup secondary join %s", self.prop, self.secondaryjoin)
log.info(
"%s synchronize pairs [%s]",
self.prop,
",".join(
"(%s => %s)" % (l, r) for (l, r) in self.synchronize_pairs
),
)
log.info(
"%s secondary synchronize pairs [%s]",
self.prop,
",".join(
"(%s => %s)" % (l, r)
for (l, r) in self.secondary_synchronize_pairs or []
),
)
log.info(
"%s local/remote pairs [%s]",
self.prop,
",".join(
"(%s / %s)" % (l, r) for (l, r) in self.local_remote_pairs
),
)
log.info(
"%s remote columns [%s]",
self.prop,
",".join("%s" % col for col in self.remote_columns),
)
log.info(
"%s local columns [%s]",
self.prop,
",".join("%s" % col for col in self.local_columns),
)
log.info("%s relationship direction %s", self.prop, self.direction)
def _sanitize_joins(self):
"""remove the parententity annotation from our join conditions which
can leak in here based on some declarative patterns and maybe others.
We'd want to remove "parentmapper" also, but apparently there's
an exotic use case in _join_fixture_inh_selfref_w_entity
that relies upon it being present, see :ticket:`3364`.
"""
self.primaryjoin = _deep_deannotate(
self.primaryjoin, values=("parententity",)
)
if self.secondaryjoin is not None:
self.secondaryjoin = _deep_deannotate(
self.secondaryjoin, values=("parententity",)
)
def _determine_joins(self):
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError(
"Property %s specified with secondary "
"join condition but "
"no secondary argument" % self.prop
)
# find a join between the given mapper's mapped table and
# the given table. will try the mapper's local table first
# for more specificity, then if not found will try the more
# general mapped table, which in the case of inheritance is
# a join.
try:
consider_as_foreign_keys = self.consider_as_foreign_keys or None
if self.secondary is not None:
if self.secondaryjoin is None:
self.secondaryjoin = join_condition(
self.child_persist_selectable,
self.secondary,
a_subset=self.child_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
if self.primaryjoin is None:
self.primaryjoin = join_condition(
self.parent_persist_selectable,
self.secondary,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
else:
if self.primaryjoin is None:
self.primaryjoin = join_condition(
self.parent_persist_selectable,
self.child_persist_selectable,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
except sa_exc.NoForeignKeysError as nfe:
if self.secondary is not None:
util.raise_(
sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify 'primaryjoin' and 'secondaryjoin' "
"expressions." % (self.prop, self.secondary)
),
from_=nfe,
)
else:
util.raise_(
sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify a 'primaryjoin' expression." % self.prop
),
from_=nfe,
)
except sa_exc.AmbiguousForeignKeysError as afe:
if self.secondary is not None:
util.raise_(
sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables via secondary table '%s'. "
"Specify the 'foreign_keys' "
"argument, providing a list of those columns which "
"should be counted as containing a foreign key "
"reference from the secondary table to each of the "
"parent and child tables."
% (self.prop, self.secondary)
),
from_=afe,
)
else:
util.raise_(
sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables. Specify the "
"'foreign_keys' argument, providing a list of those "
"columns which should be counted as containing a "
"foreign key reference to the parent table."
% self.prop
),
from_=afe,
)
@property
def primaryjoin_minus_local(self):
return _deep_deannotate(self.primaryjoin, values=("local", "remote"))
@property
def secondaryjoin_minus_local(self):
return _deep_deannotate(self.secondaryjoin, values=("local", "remote"))
@util.memoized_property
def primaryjoin_reverse_remote(self):
"""Return the primaryjoin condition suitable for the
"reverse" direction.
If the primaryjoin was delivered here with pre-existing
"remote" annotations, the local/remote annotations
are reversed. Otherwise, the local/remote annotations
are removed.
"""
if self._has_remote_annotations:
def replace(element):
if "remote" in element._annotations:
v = element._annotations.copy()
del v["remote"]
v["local"] = True
return element._with_annotations(v)
elif "local" in element._annotations:
v = element._annotations.copy()
del v["local"]
v["remote"] = True
return element._with_annotations(v)
return visitors.replacement_traverse(self.primaryjoin, {}, replace)
else:
if self._has_foreign_annotations:
# TODO: coverage
return _deep_deannotate(
self.primaryjoin, values=("local", "remote")
)
else:
return _deep_deannotate(self.primaryjoin)
def _has_annotation(self, clause, annotation):
for col in visitors.iterate(clause, {}):
if annotation in col._annotations:
return True
else:
return False
@util.memoized_property
def _has_foreign_annotations(self):
return self._has_annotation(self.primaryjoin, "foreign")
@util.memoized_property
def _has_remote_annotations(self):
return self._has_annotation(self.primaryjoin, "remote")
def _annotate_fks(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'foreign' annotations marking columns
considered as foreign.
"""
if self._has_foreign_annotations:
return
if self.consider_as_foreign_keys:
self._annotate_from_fk_list()
else:
self._annotate_present_fks()
def _annotate_from_fk_list(self):
def check_fk(col):
if col in self.consider_as_foreign_keys:
return col._annotate({"foreign": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, check_fk
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, check_fk
)
def _annotate_present_fks(self):
if self.secondary is not None:
secondarycols = util.column_set(self.secondary.c)
else:
secondarycols = set()
def is_foreign(a, b):
if isinstance(a, schema.Column) and isinstance(b, schema.Column):
if a.references(b):
return a
elif b.references(a):
return b
if secondarycols:
if a in secondarycols and b not in secondarycols:
return a
elif b in secondarycols and a not in secondarycols:
return b
def visit_binary(binary):
if not isinstance(
binary.left, sql.ColumnElement
) or not isinstance(binary.right, sql.ColumnElement):
return
if (
"foreign" not in binary.left._annotations
and "foreign" not in binary.right._annotations
):
col = is_foreign(binary.left, binary.right)
if col is not None:
if col.compare(binary.left):
binary.left = binary.left._annotate({"foreign": True})
elif col.compare(binary.right):
binary.right = binary.right._annotate(
{"foreign": True}
)
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.cloned_traverse(
self.secondaryjoin, {}, {"binary": visit_binary}
)
def _refers_to_parent_table(self):
"""Return True if the join condition contains column
comparisons where both columns are in both tables.
"""
pt = self.parent_persist_selectable
mt = self.child_persist_selectable
result = [False]
def visit_binary(binary):
c, f = binary.left, binary.right
if (
isinstance(c, expression.ColumnClause)
and isinstance(f, expression.ColumnClause)
and pt.is_derived_from(c.table)
and pt.is_derived_from(f.table)
and mt.is_derived_from(c.table)
and mt.is_derived_from(f.table)
):
result[0] = True
visitors.traverse(self.primaryjoin, {}, {"binary": visit_binary})
return result[0]
def _tables_overlap(self):
"""Return True if parent/child tables have some overlap."""
return selectables_overlap(
self.parent_persist_selectable, self.child_persist_selectable
)
def _annotate_remote(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'remote' annotations marking columns
considered as part of the 'remote' side.
"""
if self._has_remote_annotations:
return
if self.secondary is not None:
self._annotate_remote_secondary()
elif self._local_remote_pairs or self._remote_side:
self._annotate_remote_from_args()
elif self._refers_to_parent_table():
self._annotate_selfref(
lambda col: "foreign" in col._annotations, False
)
elif self._tables_overlap():
self._annotate_remote_with_overlap()
else:
self._annotate_remote_distinct_selectables()
def _annotate_remote_secondary(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when 'secondary' is present.
"""
def repl(element):
if self.secondary.c.contains_column(element):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, repl
)
def _annotate_selfref(self, fn, remote_side_given):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the relationship is detected as self-referential.
"""
def visit_binary(binary):
equated = binary.left.compare(binary.right)
if isinstance(binary.left, expression.ColumnClause) and isinstance(
binary.right, expression.ColumnClause
):
# assume one to many - FKs are "remote"
if fn(binary.left):
binary.left = binary.left._annotate({"remote": True})
if fn(binary.right) and not equated:
binary.right = binary.right._annotate({"remote": True})
elif not remote_side_given:
self._warn_non_column_elements()
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
def _annotate_remote_from_args(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the 'remote_side' or '_local_remote_pairs'
arguments are used.
"""
if self._local_remote_pairs:
if self._remote_side:
raise sa_exc.ArgumentError(
"remote_side argument is redundant "
"against more detailed _local_remote_side "
"argument."
)
remote_side = [r for (l, r) in self._local_remote_pairs]
else:
remote_side = self._remote_side
if self._refers_to_parent_table():
self._annotate_selfref(lambda col: col in remote_side, True)
else:
def repl(element):
if element in remote_side:
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
def _annotate_remote_with_overlap(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables have some set of
tables in common, though is not a fully self-referential
relationship.
"""
def visit_binary(binary):
binary.left, binary.right = proc_left_right(
binary.left, binary.right
)
binary.right, binary.left = proc_left_right(
binary.right, binary.left
)
check_entities = (
self.prop is not None and self.prop.mapper is not self.prop.parent
)
def proc_left_right(left, right):
if isinstance(left, expression.ColumnClause) and isinstance(
right, expression.ColumnClause
):
if self.child_persist_selectable.c.contains_column(
right
) and self.parent_persist_selectable.c.contains_column(left):
right = right._annotate({"remote": True})
elif (
check_entities
and right._annotations.get("parentmapper") is self.prop.mapper
):
right = right._annotate({"remote": True})
elif (
check_entities
and left._annotations.get("parentmapper") is self.prop.mapper
):
left = left._annotate({"remote": True})
else:
self._warn_non_column_elements()
return left, right
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
def _annotate_remote_distinct_selectables(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables are entirely
separate.
"""
def repl(element):
if self.child_persist_selectable.c.contains_column(element) and (
not self.parent_local_selectable.c.contains_column(element)
or self.child_local_selectable.c.contains_column(element)
):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
def _warn_non_column_elements(self):
util.warn(
"Non-simple column elements in primary "
"join condition for property %s - consider using "
"remote() annotations to mark the remote side." % self.prop
)
def _annotate_local(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'local' annotations.
This annotates all column elements found
simultaneously in the parent table
and the join condition that don't have a
'remote' annotation set up from
_annotate_remote() or user-defined.
"""
if self._has_annotation(self.primaryjoin, "local"):
return
if self._local_remote_pairs:
local_side = util.column_set(
[l for (l, r) in self._local_remote_pairs]
)
else:
local_side = util.column_set(self.parent_persist_selectable.c)
def locals_(elem):
if "remote" not in elem._annotations and elem in local_side:
return elem._annotate({"local": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, locals_
)
def _annotate_parentmapper(self):
if self.prop is None:
return
def parentmappers_(elem):
if "remote" in elem._annotations:
return elem._annotate({"parentmapper": self.prop.mapper})
elif "local" in elem._annotations:
return elem._annotate({"parentmapper": self.prop.parent})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, parentmappers_
)
def _check_remote_side(self):
if not self.local_remote_pairs:
raise sa_exc.ArgumentError(
"Relationship %s could "
"not determine any unambiguous local/remote column "
"pairs based on join condition and remote_side "
"arguments. "
"Consider using the remote() annotation to "
"accurately mark those elements of the join "
"condition that are on the remote side of "
"the relationship." % (self.prop,)
)
def _check_foreign_cols(self, join_condition, primary):
"""Check the foreign key columns collected and emit error
messages."""
can_sync = False
foreign_cols = self._gather_columns_with_annotation(
join_condition, "foreign"
)
has_foreign = bool(foreign_cols)
if primary:
can_sync = bool(self.synchronize_pairs)
else:
can_sync = bool(self.secondary_synchronize_pairs)
if (
self.support_sync
and can_sync
or (not self.support_sync and has_foreign)
):
return
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if self.support_sync and has_foreign and not can_sync:
err = (
"Could not locate any simple equality expressions "
"involving locally mapped foreign key columns for "
"%s join condition "
"'%s' on relationship %s."
% (
primary and "primary" or "secondary",
join_condition,
self.prop,
)
)
err += (
" Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or are "
"annotated in the join condition with the foreign() "
"annotation. To allow comparison operators other than "
"'==', the relationship can be marked as viewonly=True."
)
raise sa_exc.ArgumentError(err)
else:
err = (
"Could not locate any relevant foreign key columns "
"for %s join condition '%s' on relationship %s."
% (
primary and "primary" or "secondary",
join_condition,
self.prop,
)
)
err += (
" Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or are "
"annotated in the join condition with the foreign() "
"annotation."
)
raise sa_exc.ArgumentError(err)
def _determine_direction(self):
"""Determine if this relationship is one to many, many to one,
many to many.
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
else:
parentcols = util.column_set(self.parent_persist_selectable.c)
targetcols = util.column_set(self.child_persist_selectable.c)
# fk collection which suggests ONETOMANY.
onetomany_fk = targetcols.intersection(self.foreign_key_columns)
# fk collection which suggests MANYTOONE.
manytoone_fk = parentcols.intersection(self.foreign_key_columns)
if onetomany_fk and manytoone_fk:
# fks on both sides. test for overlap of local/remote
# with foreign key.
# we will gather columns directly from their annotations
# without deannotating, so that we can distinguish on a column
# that refers to itself.
# 1. columns that are both remote and FK suggest
# onetomany.
onetomany_local = self._gather_columns_with_annotation(
self.primaryjoin, "remote", "foreign"
)
# 2. columns that are FK but are not remote (e.g. local)
# suggest manytoone.
manytoone_local = set(
[
c
for c in self._gather_columns_with_annotation(
self.primaryjoin, "foreign"
)
if "remote" not in c._annotations
]
)
# 3. if both collections are present, remove columns that
# refer to themselves. This is for the case of
# and_(Me.id == Me.remote_id, Me.version == Me.version)
if onetomany_local and manytoone_local:
self_equated = self.remote_columns.intersection(
self.local_columns
)
onetomany_local = onetomany_local.difference(self_equated)
manytoone_local = manytoone_local.difference(self_equated)
# at this point, if only one or the other collection is
# present, we know the direction, otherwise it's still
# ambiguous.
if onetomany_local and not manytoone_local:
self.direction = ONETOMANY
elif manytoone_local and not onetomany_local:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns within the join condition are present "
"in both the parent and the child's mapped tables. "
"Ensure that only those columns referring "
"to a parent column are marked as foreign, "
"either via the foreign() annotation or "
"via the foreign_keys argument." % self.prop
)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship "
"direction for relationship '%s' - foreign "
"key columns are present in neither the parent "
"nor the child's mapped tables" % self.prop
)
def _deannotate_pairs(self, collection):
"""provide deannotation for the various lists of
pairs, so that using them in hashes doesn't incur
high-overhead __eq__() comparisons against
original columns mapped.
"""
return [(x._deannotate(), y._deannotate()) for x, y in collection]
def _setup_pairs(self):
sync_pairs = []
lrp = util.OrderedSet([])
secondary_sync_pairs = []
def go(joincond, collection):
def visit_binary(binary, left, right):
if (
"remote" in right._annotations
and "remote" not in left._annotations
and self.can_be_synced_fn(left)
):
lrp.add((left, right))
elif (
"remote" in left._annotations
and "remote" not in right._annotations
and self.can_be_synced_fn(right)
):
lrp.add((right, left))
if binary.operator is operators.eq and self.can_be_synced_fn(
left, right
):
if "foreign" in right._annotations:
collection.append((left, right))
elif "foreign" in left._annotations:
collection.append((right, left))
visit_binary_product(visit_binary, joincond)
for joincond, collection in [
(self.primaryjoin, sync_pairs),
(self.secondaryjoin, secondary_sync_pairs),
]:
if joincond is None:
continue
go(joincond, collection)
self.local_remote_pairs = self._deannotate_pairs(lrp)
self.synchronize_pairs = self._deannotate_pairs(sync_pairs)
self.secondary_synchronize_pairs = self._deannotate_pairs(
secondary_sync_pairs
)
_track_overlapping_sync_targets = weakref.WeakKeyDictionary()
def _warn_for_conflicting_sync_targets(self):
if not self.support_sync:
return
# we would like to detect if we are synchronizing any column
# pairs in conflict with another relationship that wishes to sync
# an entirely different column to the same target. This is a
# very rare edge case so we will try to minimize the memory/overhead
# impact of this check
for from_, to_ in [
(from_, to_) for (from_, to_) in self.synchronize_pairs
] + [
(from_, to_) for (from_, to_) in self.secondary_synchronize_pairs
]:
# save ourselves a ton of memory and overhead by only
# considering columns that are subject to a overlapping
# FK constraints at the core level. This condition can arise
# if multiple relationships overlap foreign() directly, but
# we're going to assume it's typically a ForeignKeyConstraint-
# level configuration that benefits from this warning.
if len(to_.foreign_keys) < 2:
continue
if to_ not in self._track_overlapping_sync_targets:
self._track_overlapping_sync_targets[
to_
] = weakref.WeakKeyDictionary({self.prop: from_})
else:
other_props = []
prop_to_from = self._track_overlapping_sync_targets[to_]
for pr, fr_ in prop_to_from.items():
if (
pr.mapper in mapperlib._mapper_registry
and (
self.prop._persists_for(pr.parent)
or pr._persists_for(self.prop.parent)
)
and fr_ is not from_
and pr not in self.prop._reverse_property
):
other_props.append((pr, fr_))
if other_props:
util.warn(
"relationship '%s' will copy column %s to column %s, "
"which conflicts with relationship(s): %s. "
"Consider applying "
"viewonly=True to read-only relationships, or provide "
"a primaryjoin condition marking writable columns "
"with the foreign() annotation."
% (
self.prop,
from_,
to_,
", ".join(
"'%s' (copies %s to %s)" % (pr, fr_, to_)
for (pr, fr_) in other_props
),
)
)
self._track_overlapping_sync_targets[to_][self.prop] = from_
@util.memoized_property
def remote_columns(self):
return self._gather_join_annotations("remote")
@util.memoized_property
def local_columns(self):
return self._gather_join_annotations("local")
@util.memoized_property
def foreign_key_columns(self):
return self._gather_join_annotations("foreign")
def _gather_join_annotations(self, annotation):
s = set(
self._gather_columns_with_annotation(self.primaryjoin, annotation)
)
if self.secondaryjoin is not None:
s.update(
self._gather_columns_with_annotation(
self.secondaryjoin, annotation
)
)
return {x._deannotate() for x in s}
def _gather_columns_with_annotation(self, clause, *annotation):
annotation = set(annotation)
return set(
[
col
for col in visitors.iterate(clause, {})
if annotation.issubset(col._annotations)
]
)
def join_targets(
self, source_selectable, dest_selectable, aliased, single_crit=None
):
"""Given a source and destination selectable, create a
join between them.
This takes into account aliasing the join clause
to reference the appropriate corresponding columns
in the target objects, as well as the extra child
criterion, equivalent column sets, etc.
"""
# place a barrier on the destination such that
# replacement traversals won't ever dig into it.
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable, {"no_replacement_traverse": True}
)
primaryjoin, secondaryjoin, secondary = (
self.primaryjoin,
self.secondaryjoin,
self.secondary,
)
# adjust the join condition for single table inheritance,
# in the case that the join is to a subclass
# this is analogous to the
# "_adjust_for_single_table_inheritance()" method in Query.
if single_crit is not None:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & single_crit
else:
primaryjoin = primaryjoin & single_crit
if aliased:
if secondary is not None:
secondary = secondary.alias(flat=True)
primary_aliasizer = ClauseAdapter(
secondary, exclude_fn=_ColInAnnotations("local")
)
secondary_aliasizer = ClauseAdapter(
dest_selectable, equivalents=self.child_equivalents
).chain(primary_aliasizer)
if source_selectable is not None:
primary_aliasizer = ClauseAdapter(
secondary, exclude_fn=_ColInAnnotations("local")
).chain(
ClauseAdapter(
source_selectable,
equivalents=self.parent_equivalents,
)
)
secondaryjoin = secondary_aliasizer.traverse(secondaryjoin)
else:
primary_aliasizer = ClauseAdapter(
dest_selectable,
exclude_fn=_ColInAnnotations("local"),
equivalents=self.child_equivalents,
)
if source_selectable is not None:
primary_aliasizer.chain(
ClauseAdapter(
source_selectable,
exclude_fn=_ColInAnnotations("remote"),
equivalents=self.parent_equivalents,
)
)
secondary_aliasizer = None
primaryjoin = primary_aliasizer.traverse(primaryjoin)
target_adapter = secondary_aliasizer or primary_aliasizer
target_adapter.exclude_fn = None
else:
target_adapter = None
return (
primaryjoin,
secondaryjoin,
secondary,
target_adapter,
dest_selectable,
)
def create_lazy_clause(self, reverse_direction=False):
binds = util.column_dict()
equated_columns = util.column_dict()
has_secondary = self.secondaryjoin is not None
if has_secondary:
lookup = collections.defaultdict(list)
for l, r in self.local_remote_pairs:
lookup[l].append((l, r))
equated_columns[r] = l
elif not reverse_direction:
for l, r in self.local_remote_pairs:
equated_columns[r] = l
else:
for l, r in self.local_remote_pairs:
equated_columns[l] = r
def col_to_bind(col):
if (
(not reverse_direction and "local" in col._annotations)
or reverse_direction
and (
(has_secondary and col in lookup)
or (not has_secondary and "remote" in col._annotations)
)
):
if col not in binds:
binds[col] = sql.bindparam(
None, None, type_=col.type, unique=True
)
return binds[col]
return None
lazywhere = self.primaryjoin
if self.secondaryjoin is None or not reverse_direction:
lazywhere = visitors.replacement_traverse(
lazywhere, {}, col_to_bind
)
if self.secondaryjoin is not None:
secondaryjoin = self.secondaryjoin
if reverse_direction:
secondaryjoin = visitors.replacement_traverse(
secondaryjoin, {}, col_to_bind
)
lazywhere = sql.and_(lazywhere, secondaryjoin)
bind_to_col = {binds[col].key: col for col in binds}
return lazywhere, bind_to_col, equated_columns
class _ColInAnnotations(object):
"""Seralizable equivalent to:
lambda c: "name" in c._annotations
"""
def __init__(self, name):
self.name = name
def __call__(self, c):
return self.name in c._annotations
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/orm/state.py
|
# orm/state.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines instrumentation of instances.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
"""
import weakref
from . import base
from . import exc as orm_exc
from . import interfaces
from .base import ATTR_WAS_SET
from .base import INIT_OK
from .base import NEVER_SET
from .base import NO_VALUE
from .base import PASSIVE_NO_INITIALIZE
from .base import PASSIVE_NO_RESULT
from .base import PASSIVE_OFF
from .base import SQL_OK
from .path_registry import PathRegistry
from .. import exc as sa_exc
from .. import inspection
from .. import util
@inspection._self_inspects
class InstanceState(interfaces.InspectionAttrInfo):
"""tracks state information at the instance level.
The :class:`.InstanceState` is a key object used by the
SQLAlchemy ORM in order to track the state of an object;
it is created the moment an object is instantiated, typically
as a result of :term:`instrumentation` which SQLAlchemy applies
to the ``__init__()`` method of the class.
:class:`.InstanceState` is also a semi-public object,
available for runtime inspection as to the state of a
mapped instance, including information such as its current
status within a particular :class:`.Session` and details
about data on individual attributes. The public API
in order to acquire a :class:`.InstanceState` object
is to use the :func:`_sa.inspect` system::
>>> from sqlalchemy import inspect
>>> insp = inspect(some_mapped_object)
.. seealso::
:ref:`core_inspection_toplevel`
"""
session_id = None
key = None
runid = None
load_options = util.EMPTY_SET
load_path = PathRegistry.root
insert_order = None
_strong_obj = None
modified = False
expired = False
_deleted = False
_load_pending = False
_orphaned_outside_of_session = False
is_instance = True
identity_token = None
_last_known_values = ()
callables = ()
"""A namespace where a per-state loader callable can be associated.
In SQLAlchemy 1.0, this is only used for lazy loaders / deferred
loaders that were set up via query option.
Previously, callables was used also to indicate expired attributes
by storing a link to the InstanceState itself in this dictionary.
This role is now handled by the expired_attributes set.
"""
def __init__(self, obj, manager):
self.class_ = obj.__class__
self.manager = manager
self.obj = weakref.ref(obj, self._cleanup)
self.committed_state = {}
self.expired_attributes = set()
expired_attributes = None
"""The set of keys which are 'expired' to be loaded by
the manager's deferred scalar loader, assuming no pending
changes.
see also the ``unmodified`` collection which is intersected
against this set when a refresh operation occurs."""
@util.memoized_property
def attrs(self):
"""Return a namespace representing each attribute on
the mapped object, including its current value
and history.
The returned object is an instance of :class:`.AttributeState`.
This object allows inspection of the current data
within an attribute as well as attribute history
since the last flush.
"""
return util.ImmutableProperties(
dict((key, AttributeState(self, key)) for key in self.manager)
)
@property
def transient(self):
"""Return true if the object is :term:`transient`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is None and not self._attached
@property
def pending(self):
"""Return true if the object is :term:`pending`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is None and self._attached
@property
def deleted(self):
"""Return true if the object is :term:`deleted`.
An object that is in the deleted state is guaranteed to
not be within the :attr:`.Session.identity_map` of its parent
:class:`.Session`; however if the session's transaction is rolled
back, the object will be restored to the persistent state and
the identity map.
.. note::
The :attr:`.InstanceState.deleted` attribute refers to a specific
state of the object that occurs between the "persistent" and
"detached" states; once the object is :term:`detached`, the
:attr:`.InstanceState.deleted` attribute **no longer returns
True**; in order to detect that a state was deleted, regardless
of whether or not the object is associated with a
:class:`.Session`, use the :attr:`.InstanceState.was_deleted`
accessor.
.. versionadded: 1.1
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and self._attached and self._deleted
@property
def was_deleted(self):
"""Return True if this object is or was previously in the
"deleted" state and has not been reverted to persistent.
This flag returns True once the object was deleted in flush.
When the object is expunged from the session either explicitly
or via transaction commit and enters the "detached" state,
this flag will continue to report True.
.. versionadded:: 1.1 - added a local method form of
:func:`.orm.util.was_deleted`.
.. seealso::
:attr:`.InstanceState.deleted` - refers to the "deleted" state
:func:`.orm.util.was_deleted` - standalone function
:ref:`session_object_states`
"""
return self._deleted
@property
def persistent(self):
"""Return true if the object is :term:`persistent`.
An object that is in the persistent state is guaranteed to
be within the :attr:`.Session.identity_map` of its parent
:class:`.Session`.
.. versionchanged:: 1.1 The :attr:`.InstanceState.persistent`
accessor no longer returns True for an object that was
"deleted" within a flush; use the :attr:`.InstanceState.deleted`
accessor to detect this state. This allows the "persistent"
state to guarantee membership in the identity map.
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and self._attached and not self._deleted
@property
def detached(self):
"""Return true if the object is :term:`detached`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and not self._attached
@property
@util.dependencies("sqlalchemy.orm.session")
def _attached(self, sessionlib):
return (
self.session_id is not None
and self.session_id in sessionlib._sessions
)
def _track_last_known_value(self, key):
"""Track the last known value of a particular key after expiration
operations.
.. versionadded:: 1.3
"""
if key not in self._last_known_values:
self._last_known_values = dict(self._last_known_values)
self._last_known_values[key] = NO_VALUE
@property
@util.dependencies("sqlalchemy.orm.session")
def session(self, sessionlib):
"""Return the owning :class:`.Session` for this instance,
or ``None`` if none available.
Note that the result here can in some cases be *different*
from that of ``obj in session``; an object that's been deleted
will report as not ``in session``, however if the transaction is
still in progress, this attribute will still refer to that session.
Only when the transaction is completed does the object become
fully detached under normal circumstances.
"""
return sessionlib._state_session(self)
@property
def object(self):
"""Return the mapped object represented by this
:class:`.InstanceState`."""
return self.obj()
@property
def identity(self):
"""Return the mapped identity of the mapped object.
This is the primary key identity as persisted by the ORM
which can always be passed directly to
:meth:`_query.Query.get`.
Returns ``None`` if the object has no primary key identity.
.. note::
An object which is :term:`transient` or :term:`pending`
does **not** have a mapped identity until it is flushed,
even if its attributes include primary key values.
"""
if self.key is None:
return None
else:
return self.key[1]
@property
def identity_key(self):
"""Return the identity key for the mapped object.
This is the key used to locate the object within
the :attr:`.Session.identity_map` mapping. It contains
the identity as returned by :attr:`.identity` within it.
"""
# TODO: just change .key to .identity_key across
# the board ? probably
return self.key
@util.memoized_property
def parents(self):
return {}
@util.memoized_property
def _pending_mutations(self):
return {}
@util.memoized_property
def mapper(self):
"""Return the :class:`_orm.Mapper` used for this mapped object."""
return self.manager.mapper
@property
def has_identity(self):
"""Return ``True`` if this object has an identity key.
This should always have the same value as the
expression ``state.persistent or state.detached``.
"""
return bool(self.key)
@classmethod
def _detach_states(self, states, session, to_transient=False):
persistent_to_detached = (
session.dispatch.persistent_to_detached or None
)
deleted_to_detached = session.dispatch.deleted_to_detached or None
pending_to_transient = session.dispatch.pending_to_transient or None
persistent_to_transient = (
session.dispatch.persistent_to_transient or None
)
for state in states:
deleted = state._deleted
pending = state.key is None
persistent = not pending and not deleted
state.session_id = None
if to_transient and state.key:
del state.key
if persistent:
if to_transient:
if persistent_to_transient is not None:
persistent_to_transient(session, state)
elif persistent_to_detached is not None:
persistent_to_detached(session, state)
elif deleted and deleted_to_detached is not None:
deleted_to_detached(session, state)
elif pending and pending_to_transient is not None:
pending_to_transient(session, state)
state._strong_obj = None
def _detach(self, session=None):
if session:
InstanceState._detach_states([self], session)
else:
self.session_id = self._strong_obj = None
def _dispose(self):
self._detach()
del self.obj
def _cleanup(self, ref):
"""Weakref callback cleanup.
This callable cleans out the state when it is being garbage
collected.
this _cleanup **assumes** that there are no strong refs to us!
Will not work otherwise!
"""
# Python builtins become undefined during interpreter shutdown.
# Guard against exceptions during this phase, as the method cannot
# proceed in any case if builtins have been undefined.
if dict is None:
return
instance_dict = self._instance_dict()
if instance_dict is not None:
instance_dict._fast_discard(self)
del self._instance_dict
# we can't possibly be in instance_dict._modified
# b.c. this is weakref cleanup only, that set
# is strong referencing!
# assert self not in instance_dict._modified
self.session_id = self._strong_obj = None
del self.obj
def obj(self):
return None
@property
def dict(self):
"""Return the instance dict used by the object.
Under normal circumstances, this is always synonymous
with the ``__dict__`` attribute of the mapped object,
unless an alternative instrumentation system has been
configured.
In the case that the actual object has been garbage
collected, this accessor returns a blank dictionary.
"""
o = self.obj()
if o is not None:
return base.instance_dict(o)
else:
return {}
def _initialize_instance(*mixed, **kwargs):
self, instance, args = mixed[0], mixed[1], mixed[2:] # noqa
manager = self.manager
manager.dispatch.init(self, args, kwargs)
try:
return manager.original_init(*mixed[1:], **kwargs)
except:
with util.safe_reraise():
manager.dispatch.init_failure(self, args, kwargs)
def get_history(self, key, passive):
return self.manager[key].impl.get_history(self, self.dict, passive)
def get_impl(self, key):
return self.manager[key].impl
def _get_pending_mutation(self, key):
if key not in self._pending_mutations:
self._pending_mutations[key] = PendingCollection()
return self._pending_mutations[key]
def __getstate__(self):
state_dict = {"instance": self.obj()}
state_dict.update(
(k, self.__dict__[k])
for k in (
"committed_state",
"_pending_mutations",
"modified",
"expired",
"callables",
"key",
"parents",
"load_options",
"class_",
"expired_attributes",
"info",
)
if k in self.__dict__
)
if self.load_path:
state_dict["load_path"] = self.load_path.serialize()
state_dict["manager"] = self.manager._serialize(self, state_dict)
return state_dict
def __setstate__(self, state_dict):
inst = state_dict["instance"]
if inst is not None:
self.obj = weakref.ref(inst, self._cleanup)
self.class_ = inst.__class__
else:
# None being possible here generally new as of 0.7.4
# due to storage of state in "parents". "class_"
# also new.
self.obj = None
self.class_ = state_dict["class_"]
self.committed_state = state_dict.get("committed_state", {})
self._pending_mutations = state_dict.get("_pending_mutations", {})
self.parents = state_dict.get("parents", {})
self.modified = state_dict.get("modified", False)
self.expired = state_dict.get("expired", False)
if "info" in state_dict:
self.info.update(state_dict["info"])
if "callables" in state_dict:
self.callables = state_dict["callables"]
try:
self.expired_attributes = state_dict["expired_attributes"]
except KeyError:
self.expired_attributes = set()
# 0.9 and earlier compat
for k in list(self.callables):
if self.callables[k] is self:
self.expired_attributes.add(k)
del self.callables[k]
else:
if "expired_attributes" in state_dict:
self.expired_attributes = state_dict["expired_attributes"]
else:
self.expired_attributes = set()
self.__dict__.update(
[
(k, state_dict[k])
for k in ("key", "load_options")
if k in state_dict
]
)
if self.key:
try:
self.identity_token = self.key[2]
except IndexError:
# 1.1 and earlier compat before identity_token
assert len(self.key) == 2
self.key = self.key + (None,)
self.identity_token = None
if "load_path" in state_dict:
self.load_path = PathRegistry.deserialize(state_dict["load_path"])
state_dict["manager"](self, inst, state_dict)
def _reset(self, dict_, key):
"""Remove the given attribute and any
callables associated with it."""
old = dict_.pop(key, None)
if old is not None and self.manager[key].impl.collection:
self.manager[key].impl._invalidate_collection(old)
self.expired_attributes.discard(key)
if self.callables:
self.callables.pop(key, None)
def _copy_callables(self, from_):
if "callables" in from_.__dict__:
self.callables = dict(from_.callables)
@classmethod
def _instance_level_callable_processor(cls, manager, fn, key):
impl = manager[key].impl
if impl.collection:
def _set_callable(state, dict_, row):
if "callables" not in state.__dict__:
state.callables = {}
old = dict_.pop(key, None)
if old is not None:
impl._invalidate_collection(old)
state.callables[key] = fn
else:
def _set_callable(state, dict_, row):
if "callables" not in state.__dict__:
state.callables = {}
state.callables[key] = fn
return _set_callable
def _expire(self, dict_, modified_set):
self.expired = True
if self.modified:
modified_set.discard(self)
self.committed_state.clear()
self.modified = False
self._strong_obj = None
if "_pending_mutations" in self.__dict__:
del self.__dict__["_pending_mutations"]
if "parents" in self.__dict__:
del self.__dict__["parents"]
self.expired_attributes.update(
[
impl.key
for impl in self.manager._scalar_loader_impls
if impl.expire_missing or impl.key in dict_
]
)
if self.callables:
for k in self.expired_attributes.intersection(self.callables):
del self.callables[k]
for k in self.manager._collection_impl_keys.intersection(dict_):
collection = dict_.pop(k)
collection._sa_adapter.invalidated = True
if self._last_known_values:
self._last_known_values.update(
(k, dict_[k]) for k in self._last_known_values if k in dict_
)
for key in self.manager._all_key_set.intersection(dict_):
del dict_[key]
self.manager.dispatch.expire(self, None)
def _expire_attributes(self, dict_, attribute_names, no_loader=False):
pending = self.__dict__.get("_pending_mutations", None)
callables = self.callables
for key in attribute_names:
impl = self.manager[key].impl
if impl.accepts_scalar_loader:
if no_loader and (impl.callable_ or key in callables):
continue
self.expired_attributes.add(key)
if callables and key in callables:
del callables[key]
old = dict_.pop(key, NO_VALUE)
if impl.collection and old is not NO_VALUE:
impl._invalidate_collection(old)
if (
self._last_known_values
and key in self._last_known_values
and old is not NO_VALUE
):
self._last_known_values[key] = old
self.committed_state.pop(key, None)
if pending:
pending.pop(key, None)
self.manager.dispatch.expire(self, attribute_names)
def _load_expired(self, state, passive):
"""__call__ allows the InstanceState to act as a deferred
callable for loading expired attributes, which is also
serializable (picklable).
"""
if not passive & SQL_OK:
return PASSIVE_NO_RESULT
toload = self.expired_attributes.intersection(self.unmodified)
self.manager.deferred_scalar_loader(self, toload)
# if the loader failed, or this
# instance state didn't have an identity,
# the attributes still might be in the callables
# dict. ensure they are removed.
self.expired_attributes.clear()
return ATTR_WAS_SET
@property
def unmodified(self):
"""Return the set of keys which have no uncommitted changes"""
return set(self.manager).difference(self.committed_state)
def unmodified_intersection(self, keys):
"""Return self.unmodified.intersection(keys)."""
return (
set(keys)
.intersection(self.manager)
.difference(self.committed_state)
)
@property
def unloaded(self):
"""Return the set of keys which do not have a loaded value.
This includes expired attributes and any other attribute that
was never populated or modified.
"""
return (
set(self.manager)
.difference(self.committed_state)
.difference(self.dict)
)
@property
def unloaded_expirable(self):
"""Return the set of keys which do not have a loaded value.
This includes expired attributes and any other attribute that
was never populated or modified.
"""
return self.unloaded.intersection(
attr
for attr in self.manager
if self.manager[attr].impl.expire_missing
)
@property
def _unloaded_non_object(self):
return self.unloaded.intersection(
attr
for attr in self.manager
if self.manager[attr].impl.accepts_scalar_loader
)
def _instance_dict(self):
return None
def _modified_event(
self, dict_, attr, previous, collection=False, is_userland=False
):
if attr:
if not attr.send_modified_events:
return
if is_userland and attr.key not in dict_:
raise sa_exc.InvalidRequestError(
"Can't flag attribute '%s' modified; it's not present in "
"the object state" % attr.key
)
if attr.key not in self.committed_state or is_userland:
if collection:
if previous is NEVER_SET:
if attr.key in dict_:
previous = dict_[attr.key]
if previous not in (None, NO_VALUE, NEVER_SET):
previous = attr.copy(previous)
self.committed_state[attr.key] = previous
if attr.key in self._last_known_values:
self._last_known_values[attr.key] = NO_VALUE
# assert self._strong_obj is None or self.modified
if (self.session_id and self._strong_obj is None) or not self.modified:
self.modified = True
instance_dict = self._instance_dict()
if instance_dict:
instance_dict._modified.add(self)
# only create _strong_obj link if attached
# to a session
inst = self.obj()
if self.session_id:
self._strong_obj = inst
if inst is None and attr:
raise orm_exc.ObjectDereferencedError(
"Can't emit change event for attribute '%s' - "
"parent object of type %s has been garbage "
"collected."
% (self.manager[attr.key], base.state_class_str(self))
)
def _commit(self, dict_, keys):
"""Commit attributes.
This is used by a partial-attribute load operation to mark committed
those attributes which were refreshed from the database.
Attributes marked as "expired" can potentially remain "expired" after
this step if a value was not populated in state.dict.
"""
for key in keys:
self.committed_state.pop(key, None)
self.expired = False
self.expired_attributes.difference_update(
set(keys).intersection(dict_)
)
# the per-keys commit removes object-level callables,
# while that of commit_all does not. it's not clear
# if this behavior has a clear rationale, however tests do
# ensure this is what it does.
if self.callables:
for key in (
set(self.callables).intersection(keys).intersection(dict_)
):
del self.callables[key]
def _commit_all(self, dict_, instance_dict=None):
"""commit all attributes unconditionally.
This is used after a flush() or a full load/refresh
to remove all pending state from the instance.
- all attributes are marked as "committed"
- the "strong dirty reference" is removed
- the "modified" flag is set to False
- any "expired" markers for scalar attributes loaded are removed.
- lazy load callables for objects / collections *stay*
Attributes marked as "expired" can potentially remain
"expired" after this step if a value was not populated in state.dict.
"""
self._commit_all_states([(self, dict_)], instance_dict)
@classmethod
def _commit_all_states(self, iter_, instance_dict=None):
"""Mass / highly inlined version of commit_all()."""
for state, dict_ in iter_:
state_dict = state.__dict__
state.committed_state.clear()
if "_pending_mutations" in state_dict:
del state_dict["_pending_mutations"]
state.expired_attributes.difference_update(dict_)
if instance_dict and state.modified:
instance_dict._modified.discard(state)
state.modified = state.expired = False
state._strong_obj = None
class AttributeState(object):
"""Provide an inspection interface corresponding
to a particular attribute on a particular mapped object.
The :class:`.AttributeState` object is accessed
via the :attr:`.InstanceState.attrs` collection
of a particular :class:`.InstanceState`::
from sqlalchemy import inspect
insp = inspect(some_mapped_object)
attr_state = insp.attrs.some_attribute
"""
def __init__(self, state, key):
self.state = state
self.key = key
@property
def loaded_value(self):
"""The current value of this attribute as loaded from the database.
If the value has not been loaded, or is otherwise not present
in the object's dictionary, returns NO_VALUE.
"""
return self.state.dict.get(self.key, NO_VALUE)
@property
def value(self):
"""Return the value of this attribute.
This operation is equivalent to accessing the object's
attribute directly or via ``getattr()``, and will fire
off any pending loader callables if needed.
"""
return self.state.manager[self.key].__get__(
self.state.obj(), self.state.class_
)
@property
def history(self):
"""Return the current **pre-flush** change history for
this attribute, via the :class:`.History` interface.
This method will **not** emit loader callables if the value of the
attribute is unloaded.
.. note::
The attribute history system tracks changes on a **per flush
basis**. Each time the :class:`.Session` is flushed, the history
of each attribute is reset to empty. The :class:`.Session` by
default autoflushes each time a :class:`_query.Query` is invoked.
For
options on how to control this, see :ref:`session_flushing`.
.. seealso::
:meth:`.AttributeState.load_history` - retrieve history
using loader callables if the value is not locally present.
:func:`.attributes.get_history` - underlying function
"""
return self.state.get_history(self.key, PASSIVE_NO_INITIALIZE)
def load_history(self):
"""Return the current **pre-flush** change history for
this attribute, via the :class:`.History` interface.
This method **will** emit loader callables if the value of the
attribute is unloaded.
.. note::
The attribute history system tracks changes on a **per flush
basis**. Each time the :class:`.Session` is flushed, the history
of each attribute is reset to empty. The :class:`.Session` by
default autoflushes each time a :class:`_query.Query` is invoked.
For
options on how to control this, see :ref:`session_flushing`.
.. seealso::
:attr:`.AttributeState.history`
:func:`.attributes.get_history` - underlying function
.. versionadded:: 0.9.0
"""
return self.state.get_history(self.key, PASSIVE_OFF ^ INIT_OK)
class PendingCollection(object):
"""A writable placeholder for an unloaded collection.
Stores items appended to and removed from a collection that has not yet
been loaded. When the collection is loaded, the changes stored in
PendingCollection are applied to it to produce the final result.
"""
def __init__(self):
self.deleted_items = util.IdentitySet()
self.added_items = util.OrderedIdentitySet()
def append(self, value):
if value in self.deleted_items:
self.deleted_items.remove(value)
else:
self.added_items.add(value)
def remove(self, value):
if value in self.added_items:
self.added_items.remove(value)
else:
self.deleted_items.add(value)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/engine/interfaces.py
|
# engine/interfaces.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Define core interfaces used by the engine system."""
from .. import util
from ..sql.compiler import Compiled # noqa
from ..sql.compiler import TypeCompiler # noqa
class Dialect(object):
"""Define the behavior of a specific database and DB-API combination.
Any aspect of metadata definition, SQL query generation,
execution, result-set handling, or anything else which varies
between databases is defined under the general category of the
Dialect. The Dialect acts as a factory for other
database-specific object implementations including
ExecutionContext, Compiled, DefaultGenerator, and TypeEngine.
.. note:: Third party dialects should not subclass :class:`.Dialect`
directly. Instead, subclass :class:`.default.DefaultDialect` or
descendant class.
All dialects include the following attributes. There are many other
attributes that may be supported as well:
``name``
identifying name for the dialect from a DBAPI-neutral point of view
(i.e. 'sqlite')
``driver``
identifying name for the dialect's DBAPI
``positional``
True if the paramstyle for this Dialect is positional.
``paramstyle``
the paramstyle to be used (some DB-APIs support multiple
paramstyles).
``encoding``
type of encoding to use for unicode, usually defaults to
'utf-8'.
``statement_compiler``
a :class:`.Compiled` class used to compile SQL statements
``ddl_compiler``
a :class:`.Compiled` class used to compile DDL statements
``server_version_info``
a tuple containing a version number for the DB backend in use.
This value is only available for supporting dialects, and is
typically populated during the initial connection to the database.
``default_schema_name``
the name of the default schema. This value is only available for
supporting dialects, and is typically populated during the
initial connection to the database.
``execution_ctx_cls``
a :class:`.ExecutionContext` class used to handle statement execution
``execute_sequence_format``
either the 'tuple' or 'list' type, depending on what cursor.execute()
accepts for the second argument (they vary).
``preparer``
a :class:`~sqlalchemy.sql.compiler.IdentifierPreparer` class used to
quote identifiers.
``supports_alter``
``True`` if the database supports ``ALTER TABLE`` - used only for
generating foreign key constraints in certain circumstances
``max_identifier_length``
The maximum length of identifier names.
``supports_sane_rowcount``
Indicate whether the dialect properly implements rowcount for
``UPDATE`` and ``DELETE`` statements.
``supports_sane_multi_rowcount``
Indicate whether the dialect properly implements rowcount for
``UPDATE`` and ``DELETE`` statements when executed via
executemany.
``preexecute_autoincrement_sequences``
True if 'implicit' primary key functions must be executed separately
in order to get their value. This is currently oriented towards
PostgreSQL.
``implicit_returning``
use RETURNING or equivalent during INSERT execution in order to load
newly generated primary keys and other column defaults in one execution,
which are then available via inserted_primary_key.
If an insert statement has returning() specified explicitly,
the "implicit" functionality is not used and inserted_primary_key
will not be available.
``colspecs``
A dictionary of TypeEngine classes from sqlalchemy.types mapped
to subclasses that are specific to the dialect class. This
dictionary is class-level only and is not accessed from the
dialect instance itself.
``supports_default_values``
Indicates if the construct ``INSERT INTO tablename DEFAULT
VALUES`` is supported
``supports_sequences``
Indicates if the dialect supports CREATE SEQUENCE or similar.
``sequences_optional``
If True, indicates if the "optional" flag on the Sequence() construct
should signal to not generate a CREATE SEQUENCE. Applies only to
dialects that support sequences. Currently used only to allow PostgreSQL
SERIAL to be used on a column that specifies Sequence() for usage on
other backends.
``supports_native_enum``
Indicates if the dialect supports a native ENUM construct.
This will prevent types.Enum from generating a CHECK
constraint when that type is used.
``supports_native_boolean``
Indicates if the dialect supports a native boolean construct.
This will prevent types.Boolean from generating a CHECK
constraint when that type is used.
``dbapi_exception_translation_map``
A dictionary of names that will contain as values the names of
pep-249 exceptions ("IntegrityError", "OperationalError", etc)
keyed to alternate class names, to support the case where a
DBAPI has exception classes that aren't named as they are
referred to (e.g. IntegrityError = MyException). In the vast
majority of cases this dictionary is empty.
.. versionadded:: 1.0.5
"""
_has_events = False
def create_connect_args(self, url):
"""Build DB-API compatible connection arguments.
Given a :class:`.URL` object, returns a tuple
consisting of a ``(*args, **kwargs)`` suitable to send directly
to the dbapi's connect function. The arguments are sent to the
:meth:`.Dialect.connect` method which then runs the DBAPI-level
``connect()`` function.
The method typically makes use of the
:meth:`.URL.translate_connect_args`
method in order to generate a dictionary of options.
The default implementation is::
def create_connect_args(self, url):
opts = url.translate_connect_args()
opts.update(url.query)
return [[], opts]
:param url: a :class:`.URL` object
:return: a tuple of ``(*args, **kwargs)`` which will be passed to the
:meth:`.Dialect.connect` method.
.. seealso::
:meth:`.URL.translate_connect_args`
"""
raise NotImplementedError()
@classmethod
def type_descriptor(cls, typeobj):
"""Transform a generic type to a dialect-specific type.
Dialect classes will usually use the
:func:`_types.adapt_type` function in the types module to
accomplish this.
The returned result is cached *per dialect class* so can
contain no dialect-instance state.
"""
raise NotImplementedError()
def initialize(self, connection):
"""Called during strategized creation of the dialect with a
connection.
Allows dialects to configure options based on server version info or
other properties.
The connection passed here is a SQLAlchemy Connection object,
with full capabilities.
The initialize() method of the base dialect should be called via
super().
"""
pass
def reflecttable(
self, connection, table, include_columns, exclude_columns, resolve_fks
):
"""Load table description from the database.
Given a :class:`_engine.Connection` and a
:class:`~sqlalchemy.schema.Table` object, reflect its columns and
properties from the database.
The implementation of this method is provided by
:meth:`.DefaultDialect.reflecttable`, which makes use of
:class:`_reflection.Inspector` to retrieve column information.
Dialects should **not** seek to implement this method, and should
instead implement individual schema inspection operations such as
:meth:`.Dialect.get_columns`, :meth:`.Dialect.get_pk_constraint`,
etc.
"""
raise NotImplementedError()
def get_columns(self, connection, table_name, schema=None, **kw):
"""Return information about columns in `table_name`.
Given a :class:`_engine.Connection`, a string
`table_name`, and an optional string `schema`, return column
information as a list of dictionaries with these keys:
name
the column's name
type
[sqlalchemy.types#TypeEngine]
nullable
boolean
default
the column's default value
autoincrement
boolean
sequence
a dictionary of the form
{'name' : str, 'start' :int, 'increment': int, 'minvalue': int,
'maxvalue': int, 'nominvalue': bool, 'nomaxvalue': bool,
'cycle': bool, 'cache': int, 'order': bool}
Additional column attributes may be present.
"""
raise NotImplementedError()
@util.deprecated(
"0.8",
"The :meth:`.Dialect.get_primary_keys` method is deprecated and "
"will be removed in a future release. Please refer to the "
":meth:`.Dialect.get_pk_constraint` method. ",
)
def get_primary_keys(self, connection, table_name, schema=None, **kw):
"""Return information about primary keys in `table_name`.
"""
raise NotImplementedError()
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
"""Return information about the primary key constraint on
table_name`.
Given a :class:`_engine.Connection`, a string
`table_name`, and an optional string `schema`, return primary
key information as a dictionary with these keys:
constrained_columns
a list of column names that make up the primary key
name
optional name of the primary key constraint.
"""
raise NotImplementedError()
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""Return information about foreign_keys in `table_name`.
Given a :class:`_engine.Connection`, a string
`table_name`, and an optional string `schema`, return foreign
key information as a list of dicts with these keys:
name
the constraint's name
constrained_columns
a list of column names that make up the foreign key
referred_schema
the name of the referred schema
referred_table
the name of the referred table
referred_columns
a list of column names in the referred table that correspond to
constrained_columns
"""
raise NotImplementedError()
def get_table_names(self, connection, schema=None, **kw):
"""Return a list of table names for `schema`."""
raise NotImplementedError()
def get_temp_table_names(self, connection, schema=None, **kw):
"""Return a list of temporary table names on the given connection,
if supported by the underlying backend.
"""
raise NotImplementedError()
def get_view_names(self, connection, schema=None, **kw):
"""Return a list of all view names available in the database.
schema:
Optional, retrieve names from a non-default schema.
"""
raise NotImplementedError()
def get_temp_view_names(self, connection, schema=None, **kw):
"""Return a list of temporary view names on the given connection,
if supported by the underlying backend.
"""
raise NotImplementedError()
def get_view_definition(self, connection, view_name, schema=None, **kw):
"""Return view definition.
Given a :class:`_engine.Connection`, a string
`view_name`, and an optional string `schema`, return the view
definition.
"""
raise NotImplementedError()
def get_indexes(self, connection, table_name, schema=None, **kw):
"""Return information about indexes in `table_name`.
Given a :class:`_engine.Connection`, a string
`table_name` and an optional string `schema`, return index
information as a list of dictionaries with these keys:
name
the index's name
column_names
list of column names in order
unique
boolean
"""
raise NotImplementedError()
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
r"""Return information about unique constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
unique constraint information as a list of dicts with these keys:
name
the unique constraint's name
column_names
list of column names in order
\**kw
other options passed to the dialect's get_unique_constraints()
method.
.. versionadded:: 0.9.0
"""
raise NotImplementedError()
def get_check_constraints(self, connection, table_name, schema=None, **kw):
r"""Return information about check constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
check constraint information as a list of dicts with these keys:
name
the check constraint's name
sqltext
the check constraint's SQL expression
\**kw
other options passed to the dialect's get_check_constraints()
method.
.. versionadded:: 1.1.0
"""
raise NotImplementedError()
def get_table_comment(self, connection, table_name, schema=None, **kw):
r"""Return the "comment" for the table identified by `table_name`.
Given a string `table_name` and an optional string `schema`, return
table comment information as a dictionary with this key:
text
text of the comment
Raises ``NotImplementedError`` for dialects that don't support
comments.
.. versionadded:: 1.2
"""
raise NotImplementedError()
def normalize_name(self, name):
"""convert the given name to lowercase if it is detected as
case insensitive.
this method is only used if the dialect defines
requires_name_normalize=True.
"""
raise NotImplementedError()
def denormalize_name(self, name):
"""convert the given name to a case insensitive identifier
for the backend if it is an all-lowercase name.
this method is only used if the dialect defines
requires_name_normalize=True.
"""
raise NotImplementedError()
def has_table(self, connection, table_name, schema=None):
"""Check the existence of a particular table in the database.
Given a :class:`_engine.Connection` object and a string
`table_name`, return True if the given table (possibly within
the specified `schema`) exists in the database, False
otherwise.
"""
raise NotImplementedError()
def has_sequence(self, connection, sequence_name, schema=None):
"""Check the existence of a particular sequence in the database.
Given a :class:`_engine.Connection` object and a string
`sequence_name`, return True if the given sequence exists in
the database, False otherwise.
"""
raise NotImplementedError()
def _get_server_version_info(self, connection):
"""Retrieve the server version info from the given connection.
This is used by the default implementation to populate the
"server_version_info" attribute and is called exactly
once upon first connect.
"""
raise NotImplementedError()
def _get_default_schema_name(self, connection):
"""Return the string name of the currently selected schema from
the given connection.
This is used by the default implementation to populate the
"default_schema_name" attribute and is called exactly
once upon first connect.
"""
raise NotImplementedError()
def do_begin(self, dbapi_connection):
"""Provide an implementation of ``connection.begin()``, given a
DB-API connection.
The DBAPI has no dedicated "begin" method and it is expected
that transactions are implicit. This hook is provided for those
DBAPIs that might need additional help in this area.
Note that :meth:`.Dialect.do_begin` is not called unless a
:class:`.Transaction` object is in use. The
:meth:`.Dialect.do_autocommit`
hook is provided for DBAPIs that need some extra commands emitted
after a commit in order to enter the next transaction, when the
SQLAlchemy :class:`_engine.Connection`
is used in its default "autocommit"
mode.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_rollback(self, dbapi_connection):
"""Provide an implementation of ``connection.rollback()``, given
a DB-API connection.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_commit(self, dbapi_connection):
"""Provide an implementation of ``connection.commit()``, given a
DB-API connection.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_close(self, dbapi_connection):
"""Provide an implementation of ``connection.close()``, given a DBAPI
connection.
This hook is called by the :class:`_pool.Pool`
when a connection has been
detached from the pool, or is being returned beyond the normal
capacity of the pool.
"""
raise NotImplementedError()
def create_xid(self):
"""Create a two-phase transaction ID.
This id will be passed to do_begin_twophase(),
do_rollback_twophase(), do_commit_twophase(). Its format is
unspecified.
"""
raise NotImplementedError()
def do_savepoint(self, connection, name):
"""Create a savepoint with the given name.
:param connection: a :class:`_engine.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_rollback_to_savepoint(self, connection, name):
"""Rollback a connection to the named savepoint.
:param connection: a :class:`_engine.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_release_savepoint(self, connection, name):
"""Release the named savepoint on a connection.
:param connection: a :class:`_engine.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_begin_twophase(self, connection, xid):
"""Begin a two phase transaction on the given connection.
:param connection: a :class:`_engine.Connection`.
:param xid: xid
"""
raise NotImplementedError()
def do_prepare_twophase(self, connection, xid):
"""Prepare a two phase transaction on the given connection.
:param connection: a :class:`_engine.Connection`.
:param xid: xid
"""
raise NotImplementedError()
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
"""Rollback a two phase transaction on the given connection.
:param connection: a :class:`_engine.Connection`.
:param xid: xid
:param is_prepared: whether or not
:meth:`.TwoPhaseTransaction.prepare` was called.
:param recover: if the recover flag was passed.
"""
raise NotImplementedError()
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
"""Commit a two phase transaction on the given connection.
:param connection: a :class:`_engine.Connection`.
:param xid: xid
:param is_prepared: whether or not
:meth:`.TwoPhaseTransaction.prepare` was called.
:param recover: if the recover flag was passed.
"""
raise NotImplementedError()
def do_recover_twophase(self, connection):
"""Recover list of uncommitted prepared two phase transaction
identifiers on the given connection.
:param connection: a :class:`_engine.Connection`.
"""
raise NotImplementedError()
def do_executemany(self, cursor, statement, parameters, context=None):
"""Provide an implementation of ``cursor.executemany(statement,
parameters)``."""
raise NotImplementedError()
def do_execute(self, cursor, statement, parameters, context=None):
"""Provide an implementation of ``cursor.execute(statement,
parameters)``."""
raise NotImplementedError()
def do_execute_no_params(
self, cursor, statement, parameters, context=None
):
"""Provide an implementation of ``cursor.execute(statement)``.
The parameter collection should not be sent.
"""
raise NotImplementedError()
def is_disconnect(self, e, connection, cursor):
"""Return True if the given DB-API error indicates an invalid
connection"""
raise NotImplementedError()
def connect(self, *cargs, **cparams):
r"""Establish a connection using this dialect's DBAPI.
The default implementation of this method is::
def connect(self, *cargs, **cparams):
return self.dbapi.connect(*cargs, **cparams)
The ``*cargs, **cparams`` parameters are generated directly
from this dialect's :meth:`.Dialect.create_connect_args` method.
This method may be used for dialects that need to perform programmatic
per-connection steps when a new connection is procured from the
DBAPI.
:param \*cargs: positional parameters returned from the
:meth:`.Dialect.create_connect_args` method
:param \*\*cparams: keyword parameters returned from the
:meth:`.Dialect.create_connect_args` method.
:return: a DBAPI connection, typically from the :pep:`249` module
level ``.connect()`` function.
.. seealso::
:meth:`.Dialect.create_connect_args`
:meth:`.Dialect.on_connect`
"""
def on_connect(self):
"""return a callable which sets up a newly created DBAPI connection.
The callable should accept a single argument "conn" which is the
DBAPI connection itself. The inner callable has no
return value.
E.g.::
class MyDialect(default.DefaultDialect):
# ...
def on_connect(self):
def do_on_connect(connection):
connection.execute("SET SPECIAL FLAGS etc")
return do_on_connect
This is used to set dialect-wide per-connection options such as
isolation modes, Unicode modes, etc.
The "do_on_connect" callable is invoked by using the
:meth:`_events.PoolEvents.first_connect` and
:meth:`_events.PoolEvents.connect` event
hooks, then unwrapping the DBAPI connection and passing it into the
callable. The reason it is invoked for both events is so that any
dialect-level initialization that occurs upon first connection, which
also makes use of the :meth:`_events.PoolEvents.first_connect` method,
will
proceed after this hook has been called. This currently means the
hook is in fact called twice for the very first connection in which a
dialect creates; and once per connection afterwards.
If None is returned, no event listener is generated.
:return: a callable that accepts a single DBAPI connection as an
argument, or None.
.. seealso::
:meth:`.Dialect.connect` - allows the DBAPI ``connect()`` sequence
itself to be controlled.
"""
return None
def reset_isolation_level(self, dbapi_conn):
"""Given a DBAPI connection, revert its isolation to the default.
Note that this is a dialect-level method which is used as part
of the implementation of the :class:`_engine.Connection` and
:class:`_engine.Engine`
isolation level facilities; these APIs should be preferred for
most typical use cases.
.. seealso::
:meth:`_engine.Connection.get_isolation_level`
- view current level
:attr:`_engine.Connection.default_isolation_level`
- view default level
:paramref:`.Connection.execution_options.isolation_level` -
set per :class:`_engine.Connection` isolation level
:paramref:`_sa.create_engine.isolation_level` -
set per :class:`_engine.Engine` isolation level
"""
raise NotImplementedError()
def set_isolation_level(self, dbapi_conn, level):
"""Given a DBAPI connection, set its isolation level.
Note that this is a dialect-level method which is used as part
of the implementation of the :class:`_engine.Connection` and
:class:`_engine.Engine`
isolation level facilities; these APIs should be preferred for
most typical use cases.
.. seealso::
:meth:`_engine.Connection.get_isolation_level`
- view current level
:attr:`_engine.Connection.default_isolation_level`
- view default level
:paramref:`.Connection.execution_options.isolation_level` -
set per :class:`_engine.Connection` isolation level
:paramref:`_sa.create_engine.isolation_level` -
set per :class:`_engine.Engine` isolation level
"""
raise NotImplementedError()
def get_isolation_level(self, dbapi_conn):
"""Given a DBAPI connection, return its isolation level.
When working with a :class:`_engine.Connection` object,
the corresponding
DBAPI connection may be procured using the
:attr:`_engine.Connection.connection` accessor.
Note that this is a dialect-level method which is used as part
of the implementation of the :class:`_engine.Connection` and
:class:`_engine.Engine` isolation level facilities;
these APIs should be preferred for most typical use cases.
.. seealso::
:meth:`_engine.Connection.get_isolation_level`
- view current level
:attr:`_engine.Connection.default_isolation_level`
- view default level
:paramref:`.Connection.execution_options.isolation_level` -
set per :class:`_engine.Connection` isolation level
:paramref:`_sa.create_engine.isolation_level` -
set per :class:`_engine.Engine` isolation level
"""
raise NotImplementedError()
@classmethod
def get_dialect_cls(cls, url):
"""Given a URL, return the :class:`.Dialect` that will be used.
This is a hook that allows an external plugin to provide functionality
around an existing dialect, by allowing the plugin to be loaded
from the url based on an entrypoint, and then the plugin returns
the actual dialect to be used.
By default this just returns the cls.
.. versionadded:: 1.0.3
"""
return cls
@classmethod
def load_provisioning(cls):
"""set up the provision.py module for this dialect.
For dialects that include a provision.py module that sets up
provisioning followers, this method should initiate that process.
A typical implementation would be::
@classmethod
def load_provisioning(cls):
__import__("mydialect.provision")
The default method assumes a module named ``provision.py`` inside
the owning package of the current dialect, based on the ``__module__``
attribute::
@classmethod
def load_provisioning(cls):
package = ".".join(cls.__module__.split(".")[0:-1])
try:
__import__(package + ".provision")
except ImportError:
pass
.. versionadded:: 1.3.14
"""
@classmethod
def engine_created(cls, engine):
"""A convenience hook called before returning the final
:class:`_engine.Engine`.
If the dialect returned a different class from the
:meth:`.get_dialect_cls`
method, then the hook is called on both classes, first on
the dialect class returned by the :meth:`.get_dialect_cls` method and
then on the class on which the method was called.
The hook should be used by dialects and/or wrappers to apply special
events to the engine or its components. In particular, it allows
a dialect-wrapping class to apply dialect-level events.
.. versionadded:: 1.0.3
"""
pass
class CreateEnginePlugin(object):
"""A set of hooks intended to augment the construction of an
:class:`_engine.Engine` object based on entrypoint names in a URL.
The purpose of :class:`.CreateEnginePlugin` is to allow third-party
systems to apply engine, pool and dialect level event listeners without
the need for the target application to be modified; instead, the plugin
names can be added to the database URL. Target applications for
:class:`.CreateEnginePlugin` include:
* connection and SQL performance tools, e.g. which use events to track
number of checkouts and/or time spent with statements
* connectivity plugins such as proxies
Plugins are registered using entry points in a similar way as that
of dialects::
entry_points={
'sqlalchemy.plugins': [
'myplugin = myapp.plugins:MyPlugin'
]
A plugin that uses the above names would be invoked from a database
URL as in::
from sqlalchemy import create_engine
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?plugin=myplugin")
Alternatively, the :paramref:`.create_engine.plugins" argument may be
passed as a list to :func:`_sa.create_engine`::
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/test",
plugins=["myplugin"])
.. versionadded:: 1.2.3 plugin names can also be specified
to :func:`_sa.create_engine` as a list
The ``plugin`` argument supports multiple instances, so that a URL
may specify multiple plugins; they are loaded in the order stated
in the URL::
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/"
"test?plugin=plugin_one&plugin=plugin_twp&plugin=plugin_three")
A plugin can receive additional arguments from the URL string as
well as from the keyword arguments passed to :func:`_sa.create_engine`.
The :class:`.URL` object and the keyword dictionary are passed to the
constructor so that these arguments can be extracted from the url's
:attr:`.URL.query` collection as well as from the dictionary::
class MyPlugin(CreateEnginePlugin):
def __init__(self, url, kwargs):
self.my_argument_one = url.query.pop('my_argument_one')
self.my_argument_two = url.query.pop('my_argument_two')
self.my_argument_three = kwargs.pop('my_argument_three', None)
Arguments like those illustrated above would be consumed from the
following::
from sqlalchemy import create_engine
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/"
"test?plugin=myplugin&my_argument_one=foo&my_argument_two=bar",
my_argument_three='bat')
The URL and dictionary are used for subsequent setup of the engine
as they are, so the plugin can modify their arguments in-place.
Arguments that are only understood by the plugin should be popped
or otherwise removed so that they aren't interpreted as erroneous
arguments afterwards.
When the engine creation process completes and produces the
:class:`_engine.Engine` object, it is again passed to the plugin via the
:meth:`.CreateEnginePlugin.engine_created` hook. In this hook, additional
changes can be made to the engine, most typically involving setup of
events (e.g. those defined in :ref:`core_event_toplevel`).
.. versionadded:: 1.1
"""
def __init__(self, url, kwargs):
"""Construct a new :class:`.CreateEnginePlugin`.
The plugin object is instantiated individually for each call
to :func:`_sa.create_engine`. A single :class:`_engine.
Engine` will be
passed to the :meth:`.CreateEnginePlugin.engine_created` method
corresponding to this URL.
:param url: the :class:`.URL` object. The plugin should inspect
what it needs here as well as remove its custom arguments from the
:attr:`.URL.query` collection. The URL can be modified in-place
in any other way as well.
:param kwargs: The keyword arguments passed to :func`.create_engine`.
The plugin can read and modify this dictionary in-place, to affect
the ultimate arguments used to create the engine. It should
remove its custom arguments from the dictionary as well.
"""
self.url = url
def handle_dialect_kwargs(self, dialect_cls, dialect_args):
"""parse and modify dialect kwargs"""
def handle_pool_kwargs(self, pool_cls, pool_args):
"""parse and modify pool kwargs"""
def engine_created(self, engine):
"""Receive the :class:`_engine.Engine`
object when it is fully constructed.
The plugin may make additional changes to the engine, such as
registering engine or connection pool events.
"""
class ExecutionContext(object):
"""A messenger object for a Dialect that corresponds to a single
execution.
ExecutionContext should have these data members:
connection
Connection object which can be freely used by default value
generators to execute SQL. This Connection should reference the
same underlying connection/transactional resources of
root_connection.
root_connection
Connection object which is the source of this ExecutionContext. This
Connection may have close_with_result=True set, in which case it can
only be used once.
dialect
dialect which created this ExecutionContext.
cursor
DB-API cursor procured from the connection,
compiled
if passed to constructor, sqlalchemy.engine.base.Compiled object
being executed,
statement
string version of the statement to be executed. Is either
passed to the constructor, or must be created from the
sql.Compiled object by the time pre_exec() has completed.
parameters
bind parameters passed to the execute() method. For compiled
statements, this is a dictionary or list of dictionaries. For
textual statements, it should be in a format suitable for the
dialect's paramstyle (i.e. dict or list of dicts for non
positional, list or list of lists/tuples for positional).
isinsert
True if the statement is an INSERT.
isupdate
True if the statement is an UPDATE.
should_autocommit
True if the statement is a "committable" statement.
prefetch_cols
a list of Column objects for which a client-side default
was fired off. Applies to inserts and updates.
postfetch_cols
a list of Column objects for which a server-side default or
inline SQL expression value was fired off. Applies to inserts
and updates.
"""
exception = None
"""A DBAPI-level exception that was caught when this ExecutionContext
attempted to execute a statement.
This attribute is meaningful only within the
:meth:`_events.ConnectionEvents.dbapi_error` event.
.. versionadded:: 0.9.7
.. seealso::
:attr:`.ExecutionContext.is_disconnect`
:meth:`_events.ConnectionEvents.dbapi_error`
"""
is_disconnect = None
"""Boolean flag set to True or False when a DBAPI-level exception
is caught when this ExecutionContext attempted to execute a statement.
This attribute is meaningful only within the
:meth:`_events.ConnectionEvents.dbapi_error` event.
.. versionadded:: 0.9.7
.. seealso::
:attr:`.ExecutionContext.exception`
:meth:`_events.ConnectionEvents.dbapi_error`
"""
def create_cursor(self):
"""Return a new cursor generated from this ExecutionContext's
connection.
Some dialects may wish to change the behavior of
connection.cursor(), such as postgresql which may return a PG
"server side" cursor.
"""
raise NotImplementedError()
def pre_exec(self):
"""Called before an execution of a compiled statement.
If a compiled statement was passed to this ExecutionContext,
the `statement` and `parameters` datamembers must be
initialized after this statement is complete.
"""
raise NotImplementedError()
def post_exec(self):
"""Called after the execution of a compiled statement.
If a compiled statement was passed to this ExecutionContext,
the `last_insert_ids`, `last_inserted_params`, etc.
datamembers should be available after this method completes.
"""
raise NotImplementedError()
def result(self):
"""Return a result object corresponding to this ExecutionContext.
Returns a ResultProxy.
"""
raise NotImplementedError()
def handle_dbapi_exception(self, e):
"""Receive a DBAPI exception which occurred upon execute, result
fetch, etc."""
raise NotImplementedError()
def should_autocommit_text(self, statement):
"""Parse the given textual statement and return True if it refers to
a "committable" statement"""
raise NotImplementedError()
def lastrow_has_defaults(self):
"""Return True if the last INSERT or UPDATE row contained
inlined or database-side defaults.
"""
raise NotImplementedError()
def get_rowcount(self):
"""Return the DBAPI ``cursor.rowcount`` value, or in some
cases an interpreted value.
See :attr:`_engine.ResultProxy.rowcount` for details on this.
"""
raise NotImplementedError()
class Connectable(object):
"""Interface for an object which supports execution of SQL constructs.
The two implementations of :class:`.Connectable` are
:class:`_engine.Connection` and :class:`_engine.Engine`.
Connectable must also implement the 'dialect' member which references a
:class:`.Dialect` instance.
"""
def connect(self, **kwargs):
"""Return a :class:`_engine.Connection` object.
Depending on context, this may be ``self`` if this object
is already an instance of :class:`_engine.Connection`, or a newly
procured :class:`_engine.Connection` if this object is an instance
of :class:`_engine.Engine`.
"""
engine = None
"""The :class:`_engine.Engine` instance referred to by this
:class:`.Connectable`.
May be ``self`` if this is already an :class:`_engine.Engine`.
"""
@util.deprecated(
"1.3",
"The :meth:`_engine.Engine.contextual_connect` and "
":meth:`_engine.Connection.contextual_connect` methods are "
"deprecated. This "
"method is an artifact of the threadlocal engine strategy which is "
"also to be deprecated. For explicit connections from an "
":class:`_engine.Engine`, use the :meth:`_engine.Engine.connect` "
"method.",
)
def contextual_connect(self, *arg, **kw):
"""Return a :class:`_engine.Connection`
object which may be part of an ongoing
context.
Depending on context, this may be ``self`` if this object
is already an instance of :class:`_engine.Connection`, or a newly
procured :class:`_engine.Connection` if this object is an instance
of :class:`_engine.Engine`.
"""
return self._contextual_connect(*arg, **kw)
def _contextual_connect(self):
raise NotImplementedError()
@util.deprecated(
"0.7",
"The :meth:`.Connectable.create` method is deprecated and will be "
"removed in a future release. Please use the ``.create()`` method "
"on specific schema objects to emit DDL sequences, including "
":meth:`_schema.Table.create`, :meth:`.Index.create`, and "
":meth:`_schema.MetaData.create_all`.",
)
def create(self, entity, **kwargs):
"""Emit CREATE statements for the given schema entity.
"""
raise NotImplementedError()
@util.deprecated(
"0.7",
"The :meth:`.Connectable.drop` method is deprecated and will be "
"removed in a future release. Please use the ``.drop()`` method "
"on specific schema objects to emit DDL sequences, including "
":meth:`_schema.Table.drop`, :meth:`.Index.drop`, and "
":meth:`_schema.MetaData.drop_all`.",
)
def drop(self, entity, **kwargs):
"""Emit DROP statements for the given schema entity.
"""
raise NotImplementedError()
def execute(self, object_, *multiparams, **params):
"""Executes the given construct and returns a """
""":class:`_engine.ResultProxy`."""
raise NotImplementedError()
def scalar(self, object_, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying cursor is closed after execution.
"""
raise NotImplementedError()
def _run_visitor(self, visitorcallable, element, **kwargs):
raise NotImplementedError()
def _execute_clauseelement(self, elem, multiparams=None, params=None):
raise NotImplementedError()
class ExceptionContext(object):
"""Encapsulate information about an error condition in progress.
This object exists solely to be passed to the
:meth:`_events.ConnectionEvents.handle_error` event,
supporting an interface that
can be extended without backwards-incompatibility.
.. versionadded:: 0.9.7
"""
connection = None
"""The :class:`_engine.Connection` in use during the exception.
This member is present, except in the case of a failure when
first connecting.
.. seealso::
:attr:`.ExceptionContext.engine`
"""
engine = None
"""The :class:`_engine.Engine` in use during the exception.
This member should always be present, even in the case of a failure
when first connecting.
.. versionadded:: 1.0.0
"""
cursor = None
"""The DBAPI cursor object.
May be None.
"""
statement = None
"""String SQL statement that was emitted directly to the DBAPI.
May be None.
"""
parameters = None
"""Parameter collection that was emitted directly to the DBAPI.
May be None.
"""
original_exception = None
"""The exception object which was caught.
This member is always present.
"""
sqlalchemy_exception = None
"""The :class:`sqlalchemy.exc.StatementError` which wraps the original,
and will be raised if exception handling is not circumvented by the event.
May be None, as not all exception types are wrapped by SQLAlchemy.
For DBAPI-level exceptions that subclass the dbapi's Error class, this
field will always be present.
"""
chained_exception = None
"""The exception that was returned by the previous handler in the
exception chain, if any.
If present, this exception will be the one ultimately raised by
SQLAlchemy unless a subsequent handler replaces it.
May be None.
"""
execution_context = None
"""The :class:`.ExecutionContext` corresponding to the execution
operation in progress.
This is present for statement execution operations, but not for
operations such as transaction begin/end. It also is not present when
the exception was raised before the :class:`.ExecutionContext`
could be constructed.
Note that the :attr:`.ExceptionContext.statement` and
:attr:`.ExceptionContext.parameters` members may represent a
different value than that of the :class:`.ExecutionContext`,
potentially in the case where a
:meth:`_events.ConnectionEvents.before_cursor_execute` event or similar
modified the statement/parameters to be sent.
May be None.
"""
is_disconnect = None
"""Represent whether the exception as occurred represents a "disconnect"
condition.
This flag will always be True or False within the scope of the
:meth:`_events.ConnectionEvents.handle_error` handler.
SQLAlchemy will defer to this flag in order to determine whether or not
the connection should be invalidated subsequently. That is, by
assigning to this flag, a "disconnect" event which then results in
a connection and pool invalidation can be invoked or prevented by
changing this flag.
"""
invalidate_pool_on_disconnect = True
"""Represent whether all connections in the pool should be invalidated
when a "disconnect" condition is in effect.
Setting this flag to False within the scope of the
:meth:`_events.ConnectionEvents.handle_error`
event will have the effect such
that the full collection of connections in the pool will not be
invalidated during a disconnect; only the current connection that is the
subject of the error will actually be invalidated.
The purpose of this flag is for custom disconnect-handling schemes where
the invalidation of other connections in the pool is to be performed
based on other conditions, or even on a per-connection basis.
.. versionadded:: 1.0.3
"""
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/engine/threadlocal.py
|
# engine/threadlocal.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides a thread-local transactional wrapper around the root Engine class.
The ``threadlocal`` module is invoked when using the
``strategy="threadlocal"`` flag with :func:`~sqlalchemy.engine.create_engine`.
This module is semi-private and is invoked automatically when the threadlocal
engine strategy is used.
"""
import weakref
from . import base
from .. import util
class TLConnection(base.Connection):
def __init__(self, *arg, **kw):
super(TLConnection, self).__init__(*arg, **kw)
self.__opencount = 0
def _increment_connect(self):
self.__opencount += 1
return self
def close(self):
if self.__opencount == 1:
base.Connection.close(self)
self.__opencount -= 1
def _force_close(self):
self.__opencount = 0
base.Connection.close(self)
class TLEngine(base.Engine):
"""An Engine that includes support for thread-local managed
transactions.
"""
_tl_connection_cls = TLConnection
@util.deprecated(
"1.3",
"The 'threadlocal' engine strategy is deprecated, and will be "
"removed in a future release. The strategy is no longer relevant "
"to modern usage patterns (including that of the ORM "
":class:`.Session` object) which make use of a "
":class:`_engine.Connection` "
"object in order to invoke statements.",
)
def __init__(self, *args, **kwargs):
super(TLEngine, self).__init__(*args, **kwargs)
self._connections = util.threading.local()
def contextual_connect(self, **kw):
return self._contextual_connect(**kw)
def _contextual_connect(self, **kw):
if not hasattr(self._connections, "conn"):
connection = None
else:
connection = self._connections.conn()
if connection is None or connection.closed:
# guards against pool-level reapers, if desired.
# or not connection.connection.is_valid:
connection = self._tl_connection_cls(
self,
self._wrap_pool_connect(self.pool.connect, connection),
**kw
)
self._connections.conn = weakref.ref(connection)
return connection._increment_connect()
def begin_twophase(self, xid=None):
if not hasattr(self._connections, "trans"):
self._connections.trans = []
self._connections.trans.append(
self._contextual_connect().begin_twophase(xid=xid)
)
return self
def begin_nested(self):
if not hasattr(self._connections, "trans"):
self._connections.trans = []
self._connections.trans.append(
self._contextual_connect().begin_nested()
)
return self
def begin(self):
if not hasattr(self._connections, "trans"):
self._connections.trans = []
self._connections.trans.append(self._contextual_connect().begin())
return self
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
if type_ is None:
self.commit()
else:
self.rollback()
def prepare(self):
if (
not hasattr(self._connections, "trans")
or not self._connections.trans
):
return
self._connections.trans[-1].prepare()
def commit(self):
if (
not hasattr(self._connections, "trans")
or not self._connections.trans
):
return
trans = self._connections.trans.pop(-1)
trans.commit()
def rollback(self):
if (
not hasattr(self._connections, "trans")
or not self._connections.trans
):
return
trans = self._connections.trans.pop(-1)
trans.rollback()
def dispose(self):
self._connections = util.threading.local()
super(TLEngine, self).dispose()
@property
def closed(self):
return (
not hasattr(self._connections, "conn")
or self._connections.conn() is None
or self._connections.conn().closed
)
def close(self):
if not self.closed:
self._contextual_connect().close()
connection = self._connections.conn()
connection._force_close()
del self._connections.conn
self._connections.trans = []
def __repr__(self):
return "TLEngine(%r)" % self.url
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/engine/strategies.py
|
# engine/strategies.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Strategies for creating new instances of Engine types.
These are semi-private implementation classes which provide the
underlying behavior for the "strategy" keyword argument available on
:func:`~sqlalchemy.engine.create_engine`. Current available options are
``plain``, ``threadlocal``, and ``mock``.
New strategies can be added via new ``EngineStrategy`` classes.
"""
from operator import attrgetter
from . import base
from . import threadlocal
from . import url
from .. import event
from .. import pool as poollib
from .. import util
from ..sql import schema
strategies = {}
class EngineStrategy(object):
"""An adaptor that processes input arguments and produces an Engine.
Provides a ``create`` method that receives input arguments and
produces an instance of base.Engine or a subclass.
"""
def __init__(self):
strategies[self.name] = self
def create(self, *args, **kwargs):
"""Given arguments, returns a new Engine instance."""
raise NotImplementedError()
class DefaultEngineStrategy(EngineStrategy):
"""Base class for built-in strategies."""
def create(self, name_or_url, **kwargs):
# create url.URL object
u = url.make_url(name_or_url)
plugins = u._instantiate_plugins(kwargs)
u.query.pop("plugin", None)
kwargs.pop("plugins", None)
entrypoint = u._get_entrypoint()
dialect_cls = entrypoint.get_dialect_cls(u)
if kwargs.pop("_coerce_config", False):
def pop_kwarg(key, default=None):
value = kwargs.pop(key, default)
if key in dialect_cls.engine_config_types:
value = dialect_cls.engine_config_types[key](value)
return value
else:
pop_kwarg = kwargs.pop
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = pop_kwarg(k)
dbapi = kwargs.pop("module", None)
if dbapi is None:
dbapi_args = {}
for k in util.get_func_kwargs(dialect_cls.dbapi):
if k in kwargs:
dbapi_args[k] = pop_kwarg(k)
dbapi = dialect_cls.dbapi(**dbapi_args)
dialect_args["dbapi"] = dbapi
for plugin in plugins:
plugin.handle_dialect_kwargs(dialect_cls, dialect_args)
# create dialect
dialect = dialect_cls(**dialect_args)
# assemble connection arguments
(cargs, cparams) = dialect.create_connect_args(u)
cparams.update(pop_kwarg("connect_args", {}))
cargs = list(cargs) # allow mutability
# look for existing pool or create
pool = pop_kwarg("pool", None)
if pool is None:
def connect(connection_record=None):
if dialect._has_events:
for fn in dialect.dispatch.do_connect:
connection = fn(
dialect, connection_record, cargs, cparams
)
if connection is not None:
return connection
return dialect.connect(*cargs, **cparams)
creator = pop_kwarg("creator", connect)
poolclass = pop_kwarg("poolclass", None)
if poolclass is None:
poolclass = dialect_cls.get_pool_class(u)
pool_args = {"dialect": dialect}
# consume pool arguments from kwargs, translating a few of
# the arguments
translate = {
"logging_name": "pool_logging_name",
"echo": "echo_pool",
"timeout": "pool_timeout",
"recycle": "pool_recycle",
"events": "pool_events",
"use_threadlocal": "pool_threadlocal",
"reset_on_return": "pool_reset_on_return",
"pre_ping": "pool_pre_ping",
"use_lifo": "pool_use_lifo",
}
for k in util.get_cls_kwargs(poolclass):
tk = translate.get(k, k)
if tk in kwargs:
pool_args[k] = pop_kwarg(tk)
for plugin in plugins:
plugin.handle_pool_kwargs(poolclass, pool_args)
pool = poolclass(creator, **pool_args)
else:
if isinstance(pool, poollib.dbapi_proxy._DBProxy):
pool = pool.get_pool(*cargs, **cparams)
else:
pool = pool
pool._dialect = dialect
# create engine.
engineclass = self.engine_cls
engine_args = {}
for k in util.get_cls_kwargs(engineclass):
if k in kwargs:
engine_args[k] = pop_kwarg(k)
_initialize = kwargs.pop("_initialize", True)
# all kwargs should be consumed
if kwargs:
raise TypeError(
"Invalid argument(s) %s sent to create_engine(), "
"using configuration %s/%s/%s. Please check that the "
"keyword arguments are appropriate for this combination "
"of components."
% (
",".join("'%s'" % k for k in kwargs),
dialect.__class__.__name__,
pool.__class__.__name__,
engineclass.__name__,
)
)
engine = engineclass(pool, dialect, u, **engine_args)
if _initialize:
do_on_connect = dialect.on_connect()
if do_on_connect:
def on_connect(dbapi_connection, connection_record):
conn = getattr(
dbapi_connection, "_sqla_unwrap", dbapi_connection
)
if conn is None:
return
do_on_connect(conn)
event.listen(pool, "first_connect", on_connect)
event.listen(pool, "connect", on_connect)
def first_connect(dbapi_connection, connection_record):
c = base.Connection(
engine, connection=dbapi_connection, _has_events=False
)
c._execution_options = util.immutabledict()
dialect.initialize(c)
dialect.do_rollback(c.connection)
event.listen(
pool,
"first_connect",
first_connect,
_once_unless_exception=True,
)
dialect_cls.engine_created(engine)
if entrypoint is not dialect_cls:
entrypoint.engine_created(engine)
for plugin in plugins:
plugin.engine_created(engine)
return engine
class PlainEngineStrategy(DefaultEngineStrategy):
"""Strategy for configuring a regular Engine."""
name = "plain"
engine_cls = base.Engine
PlainEngineStrategy()
class ThreadLocalEngineStrategy(DefaultEngineStrategy):
"""Strategy for configuring an Engine with threadlocal behavior."""
name = "threadlocal"
engine_cls = threadlocal.TLEngine
ThreadLocalEngineStrategy()
class MockEngineStrategy(EngineStrategy):
"""Strategy for configuring an Engine-like object with mocked execution.
Produces a single mock Connectable object which dispatches
statement execution to a passed-in function.
"""
name = "mock"
def create(self, name_or_url, executor, **kwargs):
# create url.URL object
u = url.make_url(name_or_url)
dialect_cls = u.get_dialect()
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = kwargs.pop(k)
# create dialect
dialect = dialect_cls(**dialect_args)
return MockEngineStrategy.MockConnection(dialect, executor)
class MockConnection(base.Connectable):
def __init__(self, dialect, execute):
self._dialect = dialect
self.execute = execute
engine = property(lambda s: s)
dialect = property(attrgetter("_dialect"))
name = property(lambda s: s._dialect.name)
schema_for_object = schema._schema_getter(None)
def contextual_connect(self, **kwargs):
return self
def connect(self, **kwargs):
return self
def execution_options(self, **kw):
return self
def compiler(self, statement, parameters, **kwargs):
return self._dialect.compiler(
statement, parameters, engine=self, **kwargs
)
def create(self, entity, **kwargs):
kwargs["checkfirst"] = False
from sqlalchemy.engine import ddl
ddl.SchemaGenerator(self.dialect, self, **kwargs).traverse_single(
entity
)
def drop(self, entity, **kwargs):
kwargs["checkfirst"] = False
from sqlalchemy.engine import ddl
ddl.SchemaDropper(self.dialect, self, **kwargs).traverse_single(
entity
)
def _run_visitor(
self, visitorcallable, element, connection=None, **kwargs
):
kwargs["checkfirst"] = False
visitorcallable(self.dialect, self, **kwargs).traverse_single(
element
)
def execute(self, object_, *multiparams, **params):
raise NotImplementedError()
MockEngineStrategy()
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/engine/util.py
|
# engine/util.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import util
def connection_memoize(key):
"""Decorator, memoize a function in a connection.info stash.
Only applicable to functions which take no arguments other than a
connection. The memo will be stored in ``connection.info[key]``.
"""
@util.decorator
def decorated(fn, self, connection):
connection = connection.connect()
try:
return connection.info[key]
except KeyError:
connection.info[key] = val = fn(self, connection)
return val
return decorated
def py_fallback():
def _distill_params(multiparams, params): # noqa
r"""Given arguments from the calling form \*multiparams, \**params,
return a list of bind parameter structures, usually a list of
dictionaries.
In the case of 'raw' execution which accepts positional parameters,
it may be a list of tuples or lists.
"""
if not multiparams:
if params:
return [params]
else:
return []
elif len(multiparams) == 1:
zero = multiparams[0]
if isinstance(zero, (list, tuple)):
if (
not zero
or hasattr(zero[0], "__iter__")
and not hasattr(zero[0], "strip")
):
# execute(stmt, [{}, {}, {}, ...])
# execute(stmt, [(), (), (), ...])
return zero
else:
# execute(stmt, ("value", "value"))
return [zero]
elif hasattr(zero, "keys"):
# execute(stmt, {"key":"value"})
return [zero]
else:
# execute(stmt, "value")
return [[zero]]
else:
if hasattr(multiparams[0], "__iter__") and not hasattr(
multiparams[0], "strip"
):
return multiparams
else:
return [multiparams]
return locals()
try:
from sqlalchemy.cutils import _distill_params # noqa
except ImportError:
globals().update(py_fallback())
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/engine/reflection.py
|
# engine/reflection.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides an abstraction for obtaining database schema information.
Usage Notes:
Here are some general conventions when accessing the low level inspector
methods such as get_table_names, get_columns, etc.
1. Inspector methods return lists of dicts in most cases for the following
reasons:
* They're both standard types that can be serialized.
* Using a dict instead of a tuple allows easy expansion of attributes.
* Using a list for the outer structure maintains order and is easy to work
with (e.g. list comprehension [d['name'] for d in cols]).
2. Records that contain a name, such as the column name in a column record
use the key 'name'. So for most return values, each record will have a
'name' attribute..
"""
from .base import Connectable
from .. import exc
from .. import inspection
from .. import sql
from .. import util
from ..sql import operators
from ..sql import schema as sa_schema
from ..sql.type_api import TypeEngine
from ..util import deprecated
from ..util import topological
@util.decorator
def cache(fn, self, con, *args, **kw):
info_cache = kw.get("info_cache", None)
if info_cache is None:
return fn(self, con, *args, **kw)
key = (
fn.__name__,
tuple(a for a in args if isinstance(a, util.string_types)),
tuple((k, v) for k, v in kw.items() if k != "info_cache"),
)
ret = info_cache.get(key)
if ret is None:
ret = fn(self, con, *args, **kw)
info_cache[key] = ret
return ret
class Inspector(object):
"""Performs database schema inspection.
The Inspector acts as a proxy to the reflection methods of the
:class:`~sqlalchemy.engine.interfaces.Dialect`, providing a
consistent interface as well as caching support for previously
fetched metadata.
A :class:`_reflection.Inspector` object is usually created via the
:func:`_sa.inspect` function::
from sqlalchemy import inspect, create_engine
engine = create_engine('...')
insp = inspect(engine)
The inspection method above is equivalent to using the
:meth:`_reflection.Inspector.from_engine` method, i.e.::
engine = create_engine('...')
insp = Inspector.from_engine(engine)
Where above, the :class:`~sqlalchemy.engine.interfaces.Dialect` may opt
to return an :class:`_reflection.Inspector`
subclass that provides additional
methods specific to the dialect's target database.
"""
def __init__(self, bind):
"""Initialize a new :class:`_reflection.Inspector`.
:param bind: a :class:`~sqlalchemy.engine.Connectable`,
which is typically an instance of
:class:`~sqlalchemy.engine.Engine` or
:class:`~sqlalchemy.engine.Connection`.
For a dialect-specific instance of :class:`_reflection.Inspector`, see
:meth:`_reflection.Inspector.from_engine`
"""
# this might not be a connection, it could be an engine.
self.bind = bind
# set the engine
if hasattr(bind, "engine"):
self.engine = bind.engine
else:
self.engine = bind
if self.engine is bind:
# if engine, ensure initialized
bind.connect().close()
self.dialect = self.engine.dialect
self.info_cache = {}
@classmethod
def from_engine(cls, bind):
"""Construct a new dialect-specific Inspector object from the given
engine or connection.
:param bind: a :class:`~sqlalchemy.engine.Connectable`,
which is typically an instance of
:class:`~sqlalchemy.engine.Engine` or
:class:`~sqlalchemy.engine.Connection`.
This method differs from direct a direct constructor call of
:class:`_reflection.Inspector` in that the
:class:`~sqlalchemy.engine.interfaces.Dialect` is given a chance to
provide a dialect-specific :class:`_reflection.Inspector` instance,
which may
provide additional methods.
See the example at :class:`_reflection.Inspector`.
"""
if hasattr(bind.dialect, "inspector"):
return bind.dialect.inspector(bind)
return Inspector(bind)
@inspection._inspects(Connectable)
def _insp(bind):
return Inspector.from_engine(bind)
@property
def default_schema_name(self):
"""Return the default schema name presented by the dialect
for the current engine's database user.
E.g. this is typically ``public`` for PostgreSQL and ``dbo``
for SQL Server.
"""
return self.dialect.default_schema_name
def get_schema_names(self):
"""Return all schema names.
"""
if hasattr(self.dialect, "get_schema_names"):
return self.dialect.get_schema_names(
self.bind, info_cache=self.info_cache
)
return []
@util.deprecated_params(
order_by=(
"1.0",
"The :paramref:`get_table_names.order_by` parameter is deprecated "
"and will be removed in a future release. Please refer to "
":meth:`_reflection.Inspector.get_sorted_table_and_fkc_names` "
"for a "
"more comprehensive solution to resolving foreign key cycles "
"between tables.",
)
)
def get_table_names(self, schema=None, order_by=None):
"""Return all table names in referred to within a particular schema.
The names are expected to be real tables only, not views.
Views are instead returned using the
:meth:`_reflection.Inspector.get_view_names`
method.
:param schema: Schema name. If ``schema`` is left at ``None``, the
database's default schema is
used, else the named schema is searched. If the database does not
support named schemas, behavior is undefined if ``schema`` is not
passed as ``None``. For special quoting, use :class:`.quoted_name`.
:param order_by: Optional, may be the string "foreign_key" to sort
the result on foreign key dependencies. Does not automatically
resolve cycles, and will raise :class:`.CircularDependencyError`
if cycles exist.
.. seealso::
:meth:`_reflection.Inspector.get_sorted_table_and_fkc_names`
:attr:`_schema.MetaData.sorted_tables`
"""
if hasattr(self.dialect, "get_table_names"):
tnames = self.dialect.get_table_names(
self.bind, schema, info_cache=self.info_cache
)
else:
tnames = self.engine.table_names(schema)
if order_by == "foreign_key":
tuples = []
for tname in tnames:
for fkey in self.get_foreign_keys(tname, schema):
if tname != fkey["referred_table"]:
tuples.append((fkey["referred_table"], tname))
tnames = list(topological.sort(tuples, tnames))
return tnames
def get_sorted_table_and_fkc_names(self, schema=None):
"""Return dependency-sorted table and foreign key constraint names in
referred to within a particular schema.
This will yield 2-tuples of
``(tablename, [(tname, fkname), (tname, fkname), ...])``
consisting of table names in CREATE order grouped with the foreign key
constraint names that are not detected as belonging to a cycle.
The final element
will be ``(None, [(tname, fkname), (tname, fkname), ..])``
which will consist of remaining
foreign key constraint names that would require a separate CREATE
step after-the-fact, based on dependencies between tables.
.. versionadded:: 1.0.-
.. seealso::
:meth:`_reflection.Inspector.get_table_names`
:func:`.sort_tables_and_constraints` - similar method which works
with an already-given :class:`_schema.MetaData`.
"""
if hasattr(self.dialect, "get_table_names"):
tnames = self.dialect.get_table_names(
self.bind, schema, info_cache=self.info_cache
)
else:
tnames = self.engine.table_names(schema)
tuples = set()
remaining_fkcs = set()
fknames_for_table = {}
for tname in tnames:
fkeys = self.get_foreign_keys(tname, schema)
fknames_for_table[tname] = set([fk["name"] for fk in fkeys])
for fkey in fkeys:
if tname != fkey["referred_table"]:
tuples.add((fkey["referred_table"], tname))
try:
candidate_sort = list(topological.sort(tuples, tnames))
except exc.CircularDependencyError as err:
for edge in err.edges:
tuples.remove(edge)
remaining_fkcs.update(
(edge[1], fkc) for fkc in fknames_for_table[edge[1]]
)
candidate_sort = list(topological.sort(tuples, tnames))
return [
(tname, fknames_for_table[tname].difference(remaining_fkcs))
for tname in candidate_sort
] + [(None, list(remaining_fkcs))]
def get_temp_table_names(self):
"""return a list of temporary table names for the current bind.
This method is unsupported by most dialects; currently
only SQLite implements it.
.. versionadded:: 1.0.0
"""
return self.dialect.get_temp_table_names(
self.bind, info_cache=self.info_cache
)
def get_temp_view_names(self):
"""return a list of temporary view names for the current bind.
This method is unsupported by most dialects; currently
only SQLite implements it.
.. versionadded:: 1.0.0
"""
return self.dialect.get_temp_view_names(
self.bind, info_cache=self.info_cache
)
def get_table_options(self, table_name, schema=None, **kw):
"""Return a dictionary of options specified when the table of the
given name was created.
This currently includes some options that apply to MySQL tables.
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
if hasattr(self.dialect, "get_table_options"):
return self.dialect.get_table_options(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
return {}
def get_view_names(self, schema=None):
"""Return all view names in `schema`.
:param schema: Optional, retrieve names from a non-default schema.
For special quoting, use :class:`.quoted_name`.
"""
return self.dialect.get_view_names(
self.bind, schema, info_cache=self.info_cache
)
def get_view_definition(self, view_name, schema=None):
"""Return definition for `view_name`.
:param schema: Optional, retrieve names from a non-default schema.
For special quoting, use :class:`.quoted_name`.
"""
return self.dialect.get_view_definition(
self.bind, view_name, schema, info_cache=self.info_cache
)
def get_columns(self, table_name, schema=None, **kw):
"""Return information about columns in `table_name`.
Given a string `table_name` and an optional string `schema`, return
column information as a list of dicts with these keys:
* ``name`` - the column's name
* ``type`` - the type of this column; an instance of
:class:`~sqlalchemy.types.TypeEngine`
* ``nullable`` - boolean flag if the column is NULL or NOT NULL
* ``default`` - the column's server default value - this is returned
as a string SQL expression.
* ``autoincrement`` - indicates that the column is auto incremented -
this is returned as a boolean or 'auto'
* ``comment`` - (optional) the commnet on the column. Only some
dialects return this key
* ``computed`` - (optional) when present it indicates that this column
is computed by the database. Only some dialects return this key.
Returned as a dict with the keys:
* ``sqltext`` - the expression used to generate this column returned
as a string SQL expression
* ``persisted`` - (optional) boolean that indicates if the column is
stored in the table
.. versionadded:: 1.3.16 - added support for computed reflection.
* ``dialect_options`` - (optional) a dict with dialect specific options
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
:return: list of dictionaries, each representing the definition of
a database column.
"""
col_defs = self.dialect.get_columns(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
for col_def in col_defs:
# make this easy and only return instances for coltype
coltype = col_def["type"]
if not isinstance(coltype, TypeEngine):
col_def["type"] = coltype()
return col_defs
@deprecated(
"0.7",
"The :meth:`_reflection.Inspector.get_primary_keys` "
"method is deprecated and "
"will be removed in a future release. Please refer to the "
":meth:`_reflection.Inspector.get_pk_constraint` method.",
)
def get_primary_keys(self, table_name, schema=None, **kw):
"""Return information about primary keys in `table_name`.
Given a string `table_name`, and an optional string `schema`, return
primary key information as a list of column names.
"""
return self.dialect.get_pk_constraint(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)["constrained_columns"]
def get_pk_constraint(self, table_name, schema=None, **kw):
"""Return information about primary key constraint on `table_name`.
Given a string `table_name`, and an optional string `schema`, return
primary key information as a dictionary with these keys:
constrained_columns
a list of column names that make up the primary key
name
optional name of the primary key constraint.
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
return self.dialect.get_pk_constraint(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
def get_foreign_keys(self, table_name, schema=None, **kw):
"""Return information about foreign_keys in `table_name`.
Given a string `table_name`, and an optional string `schema`, return
foreign key information as a list of dicts with these keys:
constrained_columns
a list of column names that make up the foreign key
referred_schema
the name of the referred schema
referred_table
the name of the referred table
referred_columns
a list of column names in the referred table that correspond to
constrained_columns
name
optional name of the foreign key constraint.
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
return self.dialect.get_foreign_keys(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
def get_indexes(self, table_name, schema=None, **kw):
"""Return information about indexes in `table_name`.
Given a string `table_name` and an optional string `schema`, return
index information as a list of dicts with these keys:
name
the index's name
column_names
list of column names in order
unique
boolean
column_sorting
optional dict mapping column names to tuple of sort keywords,
which may include ``asc``, ``desc``, ``nullsfirst``, ``nullslast``.
.. versionadded:: 1.3.5
dialect_options
dict of dialect-specific index options. May not be present
for all dialects.
.. versionadded:: 1.0.0
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
return self.dialect.get_indexes(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
def get_unique_constraints(self, table_name, schema=None, **kw):
"""Return information about unique constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
unique constraint information as a list of dicts with these keys:
name
the unique constraint's name
column_names
list of column names in order
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
return self.dialect.get_unique_constraints(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
def get_table_comment(self, table_name, schema=None, **kw):
"""Return information about the table comment for ``table_name``.
Given a string ``table_name`` and an optional string ``schema``,
return table comment information as a dictionary with these keys:
text
text of the comment.
Raises ``NotImplementedError`` for a dialect that does not support
comments.
.. versionadded:: 1.2
"""
return self.dialect.get_table_comment(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
def get_check_constraints(self, table_name, schema=None, **kw):
"""Return information about check constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
check constraint information as a list of dicts with these keys:
name
the check constraint's name
sqltext
the check constraint's SQL expression
dialect_options
may or may not be present; a dictionary with additional
dialect-specific options for this CHECK constraint
.. versionadded:: 1.3.8
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
.. versionadded:: 1.1.0
"""
return self.dialect.get_check_constraints(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
def reflecttable(
self,
table,
include_columns,
exclude_columns=(),
resolve_fks=True,
_extend_on=None,
):
"""Given a Table object, load its internal constructs based on
introspection.
This is the underlying method used by most dialects to produce
table reflection. Direct usage is like::
from sqlalchemy import create_engine, MetaData, Table
from sqlalchemy.engine.reflection import Inspector
engine = create_engine('...')
meta = MetaData()
user_table = Table('user', meta)
insp = Inspector.from_engine(engine)
insp.reflecttable(user_table, None)
:param table: a :class:`~sqlalchemy.schema.Table` instance.
:param include_columns: a list of string column names to include
in the reflection process. If ``None``, all columns are reflected.
"""
if _extend_on is not None:
if table in _extend_on:
return
else:
_extend_on.add(table)
dialect = self.bind.dialect
schema = self.bind.schema_for_object(table)
table_name = table.name
# get table-level arguments that are specifically
# intended for reflection, e.g. oracle_resolve_synonyms.
# these are unconditionally passed to related Table
# objects
reflection_options = dict(
(k, table.dialect_kwargs.get(k))
for k in dialect.reflection_options
if k in table.dialect_kwargs
)
# reflect table options, like mysql_engine
tbl_opts = self.get_table_options(
table_name, schema, **table.dialect_kwargs
)
if tbl_opts:
# add additional kwargs to the Table if the dialect
# returned them
table._validate_dialect_kwargs(tbl_opts)
if util.py2k:
if isinstance(schema, str):
schema = schema.decode(dialect.encoding)
if isinstance(table_name, str):
table_name = table_name.decode(dialect.encoding)
found_table = False
cols_by_orig_name = {}
for col_d in self.get_columns(
table_name, schema, **table.dialect_kwargs
):
found_table = True
self._reflect_column(
table,
col_d,
include_columns,
exclude_columns,
cols_by_orig_name,
)
if not found_table:
raise exc.NoSuchTableError(table.name)
self._reflect_pk(
table_name, schema, table, cols_by_orig_name, exclude_columns
)
self._reflect_fk(
table_name,
schema,
table,
cols_by_orig_name,
exclude_columns,
resolve_fks,
_extend_on,
reflection_options,
)
self._reflect_indexes(
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
)
self._reflect_unique_constraints(
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
)
self._reflect_check_constraints(
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
)
self._reflect_table_comment(
table_name, schema, table, reflection_options
)
def _reflect_column(
self, table, col_d, include_columns, exclude_columns, cols_by_orig_name
):
orig_name = col_d["name"]
table.dispatch.column_reflect(self, table, col_d)
# fetch name again as column_reflect is allowed to
# change it
name = col_d["name"]
if (include_columns and name not in include_columns) or (
exclude_columns and name in exclude_columns
):
return
coltype = col_d["type"]
col_kw = dict(
(k, col_d[k])
for k in [
"nullable",
"autoincrement",
"quote",
"info",
"key",
"comment",
]
if k in col_d
)
if "dialect_options" in col_d:
col_kw.update(col_d["dialect_options"])
colargs = []
if col_d.get("default") is not None:
default = col_d["default"]
if isinstance(default, sql.elements.TextClause):
default = sa_schema.DefaultClause(default, _reflected=True)
elif not isinstance(default, sa_schema.FetchedValue):
default = sa_schema.DefaultClause(
sql.text(col_d["default"]), _reflected=True
)
colargs.append(default)
if "computed" in col_d:
computed = sa_schema.Computed(**col_d["computed"])
colargs.append(computed)
if "sequence" in col_d:
self._reflect_col_sequence(col_d, colargs)
cols_by_orig_name[orig_name] = col = sa_schema.Column(
name, coltype, *colargs, **col_kw
)
if col.key in table.primary_key:
col.primary_key = True
table.append_column(col)
def _reflect_col_sequence(self, col_d, colargs):
if "sequence" in col_d:
# TODO: mssql and sybase are using this.
seq = col_d["sequence"]
sequence = sa_schema.Sequence(seq["name"], 1, 1)
if "start" in seq:
sequence.start = seq["start"]
if "increment" in seq:
sequence.increment = seq["increment"]
colargs.append(sequence)
def _reflect_pk(
self, table_name, schema, table, cols_by_orig_name, exclude_columns
):
pk_cons = self.get_pk_constraint(
table_name, schema, **table.dialect_kwargs
)
if pk_cons:
pk_cols = [
cols_by_orig_name[pk]
for pk in pk_cons["constrained_columns"]
if pk in cols_by_orig_name and pk not in exclude_columns
]
# update pk constraint name
table.primary_key.name = pk_cons.get("name")
# tell the PKConstraint to re-initialize
# its column collection
table.primary_key._reload(pk_cols)
def _reflect_fk(
self,
table_name,
schema,
table,
cols_by_orig_name,
exclude_columns,
resolve_fks,
_extend_on,
reflection_options,
):
fkeys = self.get_foreign_keys(
table_name, schema, **table.dialect_kwargs
)
for fkey_d in fkeys:
conname = fkey_d["name"]
# look for columns by orig name in cols_by_orig_name,
# but support columns that are in-Python only as fallback
constrained_columns = [
cols_by_orig_name[c].key if c in cols_by_orig_name else c
for c in fkey_d["constrained_columns"]
]
if exclude_columns and set(constrained_columns).intersection(
exclude_columns
):
continue
referred_schema = fkey_d["referred_schema"]
referred_table = fkey_d["referred_table"]
referred_columns = fkey_d["referred_columns"]
refspec = []
if referred_schema is not None:
if resolve_fks:
sa_schema.Table(
referred_table,
table.metadata,
autoload=True,
schema=referred_schema,
autoload_with=self.bind,
_extend_on=_extend_on,
**reflection_options
)
for column in referred_columns:
refspec.append(
".".join([referred_schema, referred_table, column])
)
else:
if resolve_fks:
sa_schema.Table(
referred_table,
table.metadata,
autoload=True,
autoload_with=self.bind,
schema=sa_schema.BLANK_SCHEMA,
_extend_on=_extend_on,
**reflection_options
)
for column in referred_columns:
refspec.append(".".join([referred_table, column]))
if "options" in fkey_d:
options = fkey_d["options"]
else:
options = {}
table.append_constraint(
sa_schema.ForeignKeyConstraint(
constrained_columns,
refspec,
conname,
link_to_name=True,
**options
)
)
_index_sort_exprs = [
("asc", operators.asc_op),
("desc", operators.desc_op),
("nullsfirst", operators.nullsfirst_op),
("nullslast", operators.nullslast_op),
]
def _reflect_indexes(
self,
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
):
# Indexes
indexes = self.get_indexes(table_name, schema)
for index_d in indexes:
name = index_d["name"]
columns = index_d["column_names"]
column_sorting = index_d.get("column_sorting", {})
unique = index_d["unique"]
flavor = index_d.get("type", "index")
dialect_options = index_d.get("dialect_options", {})
duplicates = index_d.get("duplicates_constraint")
if include_columns and not set(columns).issubset(include_columns):
util.warn(
"Omitting %s key for (%s), key covers omitted columns."
% (flavor, ", ".join(columns))
)
continue
if duplicates:
continue
# look for columns by orig name in cols_by_orig_name,
# but support columns that are in-Python only as fallback
idx_cols = []
for c in columns:
try:
idx_col = (
cols_by_orig_name[c]
if c in cols_by_orig_name
else table.c[c]
)
except KeyError:
util.warn(
"%s key '%s' was not located in "
"columns for table '%s'" % (flavor, c, table_name)
)
continue
c_sorting = column_sorting.get(c, ())
for k, op in self._index_sort_exprs:
if k in c_sorting:
idx_col = op(idx_col)
idx_cols.append(idx_col)
sa_schema.Index(
name,
*idx_cols,
_table=table,
**dict(list(dialect_options.items()) + [("unique", unique)])
)
def _reflect_unique_constraints(
self,
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
):
# Unique Constraints
try:
constraints = self.get_unique_constraints(table_name, schema)
except NotImplementedError:
# optional dialect feature
return
for const_d in constraints:
conname = const_d["name"]
columns = const_d["column_names"]
duplicates = const_d.get("duplicates_index")
if include_columns and not set(columns).issubset(include_columns):
util.warn(
"Omitting unique constraint key for (%s), "
"key covers omitted columns." % ", ".join(columns)
)
continue
if duplicates:
continue
# look for columns by orig name in cols_by_orig_name,
# but support columns that are in-Python only as fallback
constrained_cols = []
for c in columns:
try:
constrained_col = (
cols_by_orig_name[c]
if c in cols_by_orig_name
else table.c[c]
)
except KeyError:
util.warn(
"unique constraint key '%s' was not located in "
"columns for table '%s'" % (c, table_name)
)
else:
constrained_cols.append(constrained_col)
table.append_constraint(
sa_schema.UniqueConstraint(*constrained_cols, name=conname)
)
def _reflect_check_constraints(
self,
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
):
try:
constraints = self.get_check_constraints(table_name, schema)
except NotImplementedError:
# optional dialect feature
return
for const_d in constraints:
table.append_constraint(sa_schema.CheckConstraint(**const_d))
def _reflect_table_comment(
self, table_name, schema, table, reflection_options
):
try:
comment_dict = self.get_table_comment(table_name, schema)
except NotImplementedError:
return
else:
table.comment = comment_dict.get("text", None)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/engine/__init__.py
|
# engine/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL connections, SQL execution and high-level DB-API interface.
The engine package defines the basic components used to interface
DB-API modules with higher-level statement construction,
connection-management, execution and result contexts. The primary
"entry point" class into this package is the Engine and its public
constructor ``create_engine()``.
This package includes:
base.py
Defines interface classes and some implementation classes which
comprise the basic components used to interface between a DB-API,
constructed and plain-text statements, connections, transactions,
and results.
default.py
Contains default implementations of some of the components defined
in base.py. All current database dialects use the classes in
default.py as base classes for their own database-specific
implementations.
strategies.py
The mechanics of constructing ``Engine`` objects are represented
here. Defines the ``EngineStrategy`` class which represents how
to go from arguments specified to the ``create_engine()``
function, to a fully constructed ``Engine``, including
initialization of connection pooling, dialects, and specific
subclasses of ``Engine``.
threadlocal.py
The ``TLEngine`` class is defined here, which is a subclass of
the generic ``Engine`` and tracks ``Connection`` and
``Transaction`` objects against the identity of the current
thread. This allows certain programming patterns based around
the concept of a "thread-local connection" to be possible.
The ``TLEngine`` is created by using the "threadlocal" engine
strategy in conjunction with the ``create_engine()`` function.
url.py
Defines the ``URL`` class which represents the individual
components of a string URL passed to ``create_engine()``. Also
defines a basic module-loading strategy for the dialect specifier
within a URL.
"""
from . import strategies
from . import util # noqa
from .base import Connection # noqa
from .base import Engine # noqa
from .base import NestedTransaction # noqa
from .base import RootTransaction # noqa
from .base import Transaction # noqa
from .base import TwoPhaseTransaction # noqa
from .interfaces import Compiled # noqa
from .interfaces import Connectable # noqa
from .interfaces import CreateEnginePlugin # noqa
from .interfaces import Dialect # noqa
from .interfaces import ExceptionContext # noqa
from .interfaces import ExecutionContext # noqa
from .interfaces import TypeCompiler # noqa
from .result import BaseRowProxy # noqa
from .result import BufferedColumnResultProxy # noqa
from .result import BufferedColumnRow # noqa
from .result import BufferedRowResultProxy # noqa
from .result import FullyBufferedResultProxy # noqa
from .result import ResultProxy # noqa
from .result import RowProxy # noqa
from .util import connection_memoize # noqa
from ..sql import ddl # noqa
# backwards compat
default_strategy = "plain"
def create_engine(*args, **kwargs):
"""Create a new :class:`_engine.Engine` instance.
The standard calling form is to send the URL as the
first positional argument, usually a string
that indicates database dialect and connection arguments::
engine = create_engine("postgresql://scott:tiger@localhost/test")
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`_engine.Engine`
and its underlying :class:`.Dialect` and :class:`_pool.Pool`
constructs::
engine = create_engine("mysql://scott:tiger@hostname/dbname",
encoding='latin1', echo=True)
The string form of the URL is
``dialect[+driver]://user:password@host/dbname[?key=value..]``, where
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be specific to
the :class:`_engine.Engine`, the underlying :class:`.Dialect`,
as well as the
:class:`_pool.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`_sa.create_engine()` usage.
Once established, the newly resulting :class:`_engine.Engine` will
request a connection from the underlying :class:`_pool.Pool` once
:meth:`_engine.Engine.connect` is called, or a method which depends on it
such as :meth:`_engine.Engine.execute` is invoked. The
:class:`_pool.Pool` in turn
will establish the first actual DBAPI connection when this request
is received. The :func:`_sa.create_engine` call itself does **not**
establish any actual DBAPI connections directly.
.. seealso::
:doc:`/core/engines`
:doc:`/dialects/index`
:ref:`connections_toplevel`
:param case_sensitive=True: if False, result column names
will match in a case-insensitive fashion, that is,
``row['SomeColumn']``.
:param connect_args: a dictionary of options which will be
passed directly to the DBAPI's ``connect()`` method as
additional keyword arguments. See the example
at :ref:`custom_dbapi_args`.
:param convert_unicode=False: if set to True, causes
all :class:`.String` datatypes to act as though the
:paramref:`.String.convert_unicode` flag has been set to ``True``,
regardless of a setting of ``False`` on an individual :class:`.String`
type. This has the effect of causing all :class:`.String` -based
columns to accommodate Python Unicode objects directly as though the
datatype were the :class:`.Unicode` type.
.. deprecated:: 1.3
The :paramref:`_sa.create_engine.convert_unicode` parameter
is deprecated and will be removed in a future release.
All modern DBAPIs now support Python Unicode directly and this
parameter is unnecessary.
:param creator: a callable which returns a DBAPI connection.
This creation function will be passed to the underlying
connection pool and will be used to create all new database
connections. Usage of this function causes connection
parameters specified in the URL argument to be bypassed.
This hook is not as flexible as the newer
:class:`_events.DialectEvents.do_connect` hook which allows complete
control over how a connection is made to the database, given the full
set of URL arguments and state beforehand.
.. seealso::
:class:`_events.DialectEvents.do_connect` - event hook that allows
full control over DBAPI connection mechanics.
:ref:`custom_dbapi_args`
:param echo=False: if True, the Engine will log all statements
as well as a ``repr()`` of their parameter lists to the default log
handler, which defaults to ``sys.stdout`` for output. If set to the
string ``"debug"``, result rows will be printed to the standard output
as well. The ``echo`` attribute of ``Engine`` can be modified at any
time to turn logging on and off; direct control of logging is also
available using the standard Python ``logging`` module.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param echo_pool=False: if True, the connection pool will log
informational output such as when connections are invalidated
as well as when connections are recycled to the default log handler,
which defaults to ``sys.stdout`` for output. If set to the string
``"debug"``, the logging will include pool checkouts and checkins.
Direct control of logging is also available using the standard Python
``logging`` module.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param empty_in_strategy: The SQL compilation strategy to use when
rendering an IN or NOT IN expression for :meth:`.ColumnOperators.in_`
where the right-hand side
is an empty set. This is a string value that may be one of
``static``, ``dynamic``, or ``dynamic_warn``. The ``static``
strategy is the default, and an IN comparison to an empty set
will generate a simple false expression "1 != 1". The ``dynamic``
strategy behaves like that of SQLAlchemy 1.1 and earlier, emitting
a false expression of the form "expr != expr", which has the effect
of evaluting to NULL in the case of a null expression.
``dynamic_warn`` is the same as ``dynamic``, however also emits a
warning when an empty set is encountered; this because the "dynamic"
comparison is typically poorly performing on most databases.
.. versionadded:: 1.2 Added the ``empty_in_strategy`` setting and
additionally defaulted the behavior for empty-set IN comparisons
to a static boolean expression.
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPIs own encoding facilities.**
.. note:: The ``encoding`` parameter deals only with in-Python
encoding issues that were prevalent with many DBAPIs under Python
2. Under Python 3 it is mostly unused. For DBAPIs that require
client encoding configurations, such as those of MySQL and Oracle,
please consult specific :ref:`dialect documentation
<dialect_toplevel>` for details.
All modern DBAPIs that work in Python 3 necessarily feature direct
support for Python unicode strings. Under Python 2, this was not
always the case. For those scenarios where the DBAPI is detected as
not supporting a Python ``unicode`` object under Python 2, this
encoding is used to determine the source/destination encoding. It is
**not used** for those cases where the DBAPI handles unicode directly.
To properly configure a system to accommodate Python ``unicode``
objects, the DBAPI should be configured to handle unicode to the
greatest degree as is appropriate - see the notes on unicode pertaining
to the specific target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI, nearly always under **Python 2 only**,
include zero or more of:
* the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
* the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
When using Python 3, the DBAPI is required to support all of the above
values as Python ``unicode`` objects, which in Python 3 are just known
as ``str``. In Python 2, the DBAPI does not specify unicode behavior
at all, so SQLAlchemy must make decisions for each of the above values
on a per-DBAPI basis - implementations are completely inconsistent in
their behavior.
:param execution_options: Dictionary execution options which will
be applied to all connections. See
:meth:`~sqlalchemy.engine.Connection.execution_options`
:param hide_parameters: Boolean, when set to True, SQL statement parameters
will not be displayed in INFO logging nor will they be formatted into
the string representation of :class:`.StatementError` objects.
.. versionadded:: 1.3.8
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including PostgreSQL, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
:param isolation_level: this string parameter is interpreted by various
dialects in order to affect the transaction isolation level of the
database connection. The parameter essentially accepts some subset of
these string arguments: ``"SERIALIZABLE"``, ``"REPEATABLE_READ"``,
``"READ_COMMITTED"``, ``"READ_UNCOMMITTED"`` and ``"AUTOCOMMIT"``.
Behavior here varies per backend, and
individual dialects should be consulted directly.
Note that the isolation level can also be set on a
per-:class:`_engine.Connection` basis as well, using the
:paramref:`.Connection.execution_options.isolation_level`
feature.
.. seealso::
:attr:`_engine.Connection.default_isolation_level`
- view default level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`_engine.Connection` isolation level
:ref:`SQLite Transaction Isolation <sqlite_isolation_level>`
:ref:`PostgreSQL Transaction Isolation <postgresql_isolation_level>`
:ref:`MySQL Transaction Isolation <mysql_isolation_level>`
:ref:`session_transaction_isolation` - for the ORM
:param json_deserializer: for dialects that support the
:class:`_types.JSON`
datatype, this is a Python callable that will convert a JSON string
to a Python object. By default, the Python ``json.loads`` function is
used.
.. versionchanged:: 1.3.7 The SQLite dialect renamed this from
``_json_deserializer``.
:param json_serializer: for dialects that support the :class:`_types.JSON`
datatype, this is a Python callable that will render a given object
as JSON. By default, the Python ``json.dumps`` function is used.
.. versionchanged:: 1.3.7 The SQLite dialect renamed this from
``_json_serializer``.
:param label_length=None: optional integer value which limits
the size of dynamically generated column labels to that many
characters. If less than 6, labels are generated as
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length``, which may be affected via the
:paramref:`_sa.create_engine.max_identifier_length` parameter,
is used instead. The value of
:paramref:`_sa.create_engine.label_length`
may not be larger than that of
:paramref:`_sa.create_engine.max_identfier_length`.
.. seealso::
:paramref:`_sa.create_engine.max_identifier_length`
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
:param max_identifier_length: integer; override the max_identifier_length
determined by the dialect. if ``None`` or zero, has no effect. This
is the database's configured maximum number of characters that may be
used in a SQL identifier such as a table name, column name, or label
name. All dialects determine this value automatically, however in the
case of a new database version for which this value has changed but
SQLAlchemy's dialect has not been adjusted, the value may be passed
here.
.. versionadded:: 1.3.9
.. seealso::
:paramref:`_sa.create_engine.label_length`
:param max_overflow=10: the number of connections to allow in
connection pool "overflow", that is connections that can be
opened above and beyond the pool_size setting, which defaults
to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`.
:param module=None: reference to a Python module object (the module
itself, not its string name). Specifies an alternate DBAPI module to
be used by the engine's dialect. Each sub-dialect references a
specific DBAPI which will be imported before first connect. This
parameter causes the import to be bypassed, and the given module to
be used instead. Can be used for testing of DBAPIs as well as to
inject "mock" DBAPI implementations into the :class:`_engine.Engine`.
:param paramstyle=None: The `paramstyle <http://legacy.python.org/dev/peps/pep-0249/#paramstyle>`_
to use when rendering bound parameters. This style defaults to the
one recommended by the DBAPI itself, which is retrieved from the
``.paramstyle`` attribute of the DBAPI. However, most DBAPIs accept
more than one paramstyle, and in particular it may be desirable
to change a "named" paramstyle into a "positional" one, or vice versa.
When this attribute is passed, it should be one of the values
``"qmark"``, ``"numeric"``, ``"named"``, ``"format"`` or
``"pyformat"``, and should correspond to a parameter style known
to be supported by the DBAPI in use.
:param pool=None: an already-constructed instance of
:class:`~sqlalchemy.pool.Pool`, such as a
:class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this
pool will be used directly as the underlying connection pool
for the engine, bypassing whatever connection parameters are
present in the URL argument. For information on constructing
connection pools manually, see :ref:`pooling_toplevel`.
:param poolclass=None: a :class:`~sqlalchemy.pool.Pool`
subclass, which will be used to create a connection pool
instance using the connection parameters given in the URL. Note
this differs from ``pool`` in that you don't actually
instantiate the pool in this case, you just indicate what type
of pool to be used.
:param pool_logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param pool_pre_ping: boolean, if True will enable the connection pool
"pre-ping" feature that tests connections for liveness upon
each checkout.
.. versionadded:: 1.2
.. seealso::
:ref:`pool_disconnects_pessimistic`
:param pool_size=5: the number of connections to keep open
inside the connection pool. This used with
:class:`~sqlalchemy.pool.QueuePool` as
well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With
:class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting
of 0 indicates no limit; to disable pooling, set ``poolclass`` to
:class:`~sqlalchemy.pool.NullPool` instead.
:param pool_recycle=-1: this setting causes the pool to recycle
connections after the given number of seconds has passed. It
defaults to -1, or no timeout. For example, setting to 3600
means connections will be recycled after one hour. Note that
MySQL in particular will disconnect automatically if no
activity is detected on a connection for eight hours (although
this is configurable with the MySQLDB connection itself and the
server configuration as well).
.. seealso::
:ref:`pool_setting_recycle`
:param pool_reset_on_return='rollback': set the
:paramref:`_pool.Pool.reset_on_return` parameter of the underlying
:class:`_pool.Pool` object, which can be set to the values
``"rollback"``, ``"commit"``, or ``None``.
.. seealso::
:paramref:`_pool.Pool.reset_on_return`
:param pool_timeout=30: number of seconds to wait before giving
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`.
:param pool_use_lifo=False: use LIFO (last-in-first-out) when retrieving
connections from :class:`.QueuePool` instead of FIFO
(first-in-first-out). Using LIFO, a server-side timeout scheme can
reduce the number of connections used during non- peak periods of
use. When planning for server-side timeouts, ensure that a recycle or
pre-ping strategy is in use to gracefully handle stale connections.
.. versionadded:: 1.3
.. seealso::
:ref:`pool_use_lifo`
:ref:`pool_disconnects`
:param plugins: string list of plugin names to load. See
:class:`.CreateEnginePlugin` for background.
.. versionadded:: 1.2.3
:param strategy='plain': selects alternate engine implementations.
Currently available are:
* the ``threadlocal`` strategy, which is described in
:ref:`threadlocal_strategy`;
* the ``mock`` strategy, which dispatches all statement
execution to a function passed as the argument ``executor``.
See `example in the FAQ
<http://docs.sqlalchemy.org/en/latest/faq/metadata_schema.html#how-can-i-get-the-create-table-drop-table-output-as-a-string>`_.
:param executor=None: a function taking arguments
``(sql, *multiparams, **params)``, to which the ``mock`` strategy will
dispatch all statement execution. Used only by ``strategy='mock'``.
""" # noqa
strategy = kwargs.pop("strategy", default_strategy)
strategy = strategies.strategies[strategy]
return strategy.create(*args, **kwargs)
def engine_from_config(configuration, prefix="sqlalchemy.", **kwargs):
"""Create a new Engine instance using a configuration dictionary.
The dictionary is typically produced from a config file.
The keys of interest to ``engine_from_config()`` should be prefixed, e.g.
``sqlalchemy.url``, ``sqlalchemy.echo``, etc. The 'prefix' argument
indicates the prefix to be searched for. Each matching key (after the
prefix is stripped) is treated as though it were the corresponding keyword
argument to a :func:`_sa.create_engine` call.
The only required key is (assuming the default prefix) ``sqlalchemy.url``,
which provides the :ref:`database URL <database_urls>`.
A select set of keyword arguments will be "coerced" to their
expected type based on string values. The set of arguments
is extensible per-dialect using the ``engine_config_types`` accessor.
:param configuration: A dictionary (typically produced from a config file,
but this is not a requirement). Items whose keys start with the value
of 'prefix' will have that prefix stripped, and will then be passed to
:ref:`create_engine`.
:param prefix: Prefix to match and then strip from keys
in 'configuration'.
:param kwargs: Each keyword argument to ``engine_from_config()`` itself
overrides the corresponding item taken from the 'configuration'
dictionary. Keyword arguments should *not* be prefixed.
"""
options = dict(
(key[len(prefix) :], configuration[key])
for key in configuration
if key.startswith(prefix)
)
options["_coerce_config"] = True
options.update(kwargs)
url = options.pop("url")
return create_engine(url, **options)
__all__ = ("create_engine", "engine_from_config")
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/engine/url.py
|
# engine/url.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the :class:`~sqlalchemy.engine.url.URL` class which encapsulates
information about a database connection specification.
The URL object is created automatically when
:func:`~sqlalchemy.engine.create_engine` is called with a string
argument; alternatively, the URL is a public-facing construct which can
be used directly and is also accepted directly by ``create_engine()``.
"""
import re
from .interfaces import Dialect
from .. import exc
from .. import util
from ..dialects import plugins
from ..dialects import registry
class URL(object):
"""
Represent the components of a URL used to connect to a database.
This object is suitable to be passed directly to a
:func:`~sqlalchemy.create_engine` call. The fields of the URL are parsed
from a string by the :func:`.make_url` function. the string
format of the URL is an RFC-1738-style string.
All initialization parameters are available as public attributes.
:param drivername: the name of the database backend.
This name will correspond to a module in sqlalchemy/databases
or a third party plug-in.
:param username: The user name.
:param password: database password.
:param host: The name of the host.
:param port: The port number.
:param database: The database name.
:param query: A dictionary of options to be passed to the
dialect and/or the DBAPI upon connect.
"""
def __init__(
self,
drivername,
username=None,
password=None,
host=None,
port=None,
database=None,
query=None,
):
self.drivername = drivername
self.username = username
self.password_original = password
self.host = host
if port is not None:
self.port = int(port)
else:
self.port = None
self.database = database
self.query = query or {}
def __to_string__(self, hide_password=True):
s = self.drivername + "://"
if self.username is not None:
s += _rfc_1738_quote(self.username)
if self.password is not None:
s += ":" + (
"***" if hide_password else _rfc_1738_quote(self.password)
)
s += "@"
if self.host is not None:
if ":" in self.host:
s += "[%s]" % self.host
else:
s += self.host
if self.port is not None:
s += ":" + str(self.port)
if self.database is not None:
s += "/" + self.database
if self.query:
keys = list(self.query)
keys.sort()
s += "?" + "&".join(
"%s=%s" % (util.quote_plus(k), util.quote_plus(element))
for k in keys
for element in util.to_list(self.query[k])
)
return s
def __str__(self):
return self.__to_string__(hide_password=False)
def __repr__(self):
return self.__to_string__()
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return (
isinstance(other, URL)
and self.drivername == other.drivername
and self.username == other.username
and self.password == other.password
and self.host == other.host
and self.database == other.database
and self.query == other.query
and self.port == other.port
)
def __ne__(self, other):
return not self == other
@property
def password(self):
if self.password_original is None:
return None
else:
return util.text_type(self.password_original)
@password.setter
def password(self, password):
self.password_original = password
def get_backend_name(self):
if "+" not in self.drivername:
return self.drivername
else:
return self.drivername.split("+")[0]
def get_driver_name(self):
if "+" not in self.drivername:
return self.get_dialect().driver
else:
return self.drivername.split("+")[1]
def _instantiate_plugins(self, kwargs):
plugin_names = util.to_list(self.query.get("plugin", ()))
plugin_names += kwargs.get("plugins", [])
return [
plugins.load(plugin_name)(self, kwargs)
for plugin_name in plugin_names
]
def _get_entrypoint(self):
"""Return the "entry point" dialect class.
This is normally the dialect itself except in the case when the
returned class implements the get_dialect_cls() method.
"""
if "+" not in self.drivername:
name = self.drivername
else:
name = self.drivername.replace("+", ".")
cls = registry.load(name)
# check for legacy dialects that
# would return a module with 'dialect' as the
# actual class
if (
hasattr(cls, "dialect")
and isinstance(cls.dialect, type)
and issubclass(cls.dialect, Dialect)
):
return cls.dialect
else:
return cls
def get_dialect(self):
"""Return the SQLAlchemy database dialect class corresponding
to this URL's driver name.
"""
entrypoint = self._get_entrypoint()
dialect_cls = entrypoint.get_dialect_cls(self)
return dialect_cls
def translate_connect_args(self, names=[], **kw):
r"""Translate url attributes into a dictionary of connection arguments.
Returns attributes of this url (`host`, `database`, `username`,
`password`, `port`) as a plain dictionary. The attribute names are
used as the keys by default. Unset or false attributes are omitted
from the final dictionary.
:param \**kw: Optional, alternate key names for url attributes.
:param names: Deprecated. Same purpose as the keyword-based alternate
names, but correlates the name to the original positionally.
"""
translated = {}
attribute_names = ["host", "database", "username", "password", "port"]
for sname in attribute_names:
if names:
name = names.pop(0)
elif sname in kw:
name = kw[sname]
else:
name = sname
if name is not None and getattr(self, sname, False):
translated[name] = getattr(self, sname)
return translated
def make_url(name_or_url):
"""Given a string or unicode instance, produce a new URL instance.
The given string is parsed according to the RFC 1738 spec. If an
existing URL object is passed, just returns the object.
"""
if isinstance(name_or_url, util.string_types):
return _parse_rfc1738_args(name_or_url)
else:
return name_or_url
def _parse_rfc1738_args(name):
pattern = re.compile(
r"""
(?P<name>[\w\+]+)://
(?:
(?P<username>[^:/]*)
(?::(?P<password>.*))?
@)?
(?:
(?:
\[(?P<ipv6host>[^/]+)\] |
(?P<ipv4host>[^/:]+)
)?
(?::(?P<port>[^/]*))?
)?
(?:/(?P<database>.*))?
""",
re.X,
)
m = pattern.match(name)
if m is not None:
components = m.groupdict()
if components["database"] is not None:
tokens = components["database"].split("?", 2)
components["database"] = tokens[0]
if len(tokens) > 1:
query = {}
for key, value in util.parse_qsl(tokens[1]):
if util.py2k:
key = key.encode("ascii")
if key in query:
query[key] = util.to_list(query[key])
query[key].append(value)
else:
query[key] = value
else:
query = None
else:
query = None
components["query"] = query
if components["username"] is not None:
components["username"] = _rfc_1738_unquote(components["username"])
if components["password"] is not None:
components["password"] = _rfc_1738_unquote(components["password"])
ipv4host = components.pop("ipv4host")
ipv6host = components.pop("ipv6host")
components["host"] = ipv4host or ipv6host
name = components.pop("name")
return URL(name, **components)
else:
raise exc.ArgumentError(
"Could not parse rfc1738 URL from string '%s'" % name
)
def _rfc_1738_quote(text):
return re.sub(r"[:@/]", lambda m: "%%%X" % ord(m.group(0)), text)
def _rfc_1738_unquote(text):
return util.unquote(text)
def _parse_keyvalue_args(name):
m = re.match(r"(\w+)://(.*)", name)
if m is not None:
(name, args) = m.group(1, 2)
opts = dict(util.parse_qsl(args))
return URL(name, *opts)
else:
return None
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/engine/result.py
|
# engine/result.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Define result set constructs including :class:`_engine.ResultProxy`
and :class:`.RowProxy`."""
import collections
import operator
from .. import exc
from .. import util
from ..sql import expression
from ..sql import sqltypes
from ..sql import util as sql_util
# This reconstructor is necessary so that pickles with the C extension or
# without use the same Binary format.
try:
# We need a different reconstructor on the C extension so that we can
# add extra checks that fields have correctly been initialized by
# __setstate__.
from sqlalchemy.cresultproxy import safe_rowproxy_reconstructor
# The extra function embedding is needed so that the
# reconstructor function has the same signature whether or not
# the extension is present.
def rowproxy_reconstructor(cls, state):
return safe_rowproxy_reconstructor(cls, state)
except ImportError:
def rowproxy_reconstructor(cls, state):
obj = cls.__new__(cls)
obj.__setstate__(state)
return obj
try:
from sqlalchemy.cresultproxy import BaseRowProxy
_baserowproxy_usecext = True
except ImportError:
_baserowproxy_usecext = False
class BaseRowProxy(object):
__slots__ = ("_parent", "_row", "_processors", "_keymap")
def __init__(self, parent, row, processors, keymap):
"""RowProxy objects are constructed by ResultProxy objects."""
self._parent = parent
self._row = row
self._processors = processors
self._keymap = keymap
def __reduce__(self):
return (
rowproxy_reconstructor,
(self.__class__, self.__getstate__()),
)
def values(self):
"""Return the values represented by this RowProxy as a list."""
return list(self)
def __iter__(self):
for processor, value in zip(self._processors, self._row):
if processor is None:
yield value
else:
yield processor(value)
def __len__(self):
return len(self._row)
def __getitem__(self, key):
try:
processor, obj, index = self._keymap[key]
except KeyError as err:
processor, obj, index = self._parent._key_fallback(key, err)
except TypeError:
if isinstance(key, slice):
l = []
for processor, value in zip(
self._processors[key], self._row[key]
):
if processor is None:
l.append(value)
else:
l.append(processor(value))
return tuple(l)
else:
raise
if index is None:
raise exc.InvalidRequestError(
"Ambiguous column name '%s' in "
"result set column descriptions" % obj
)
if processor is not None:
return processor(self._row[index])
else:
return self._row[index]
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
util.raise_(AttributeError(e.args[0]), replace_context=e)
class RowProxy(BaseRowProxy):
"""Represent a single result row.
The :class:`.RowProxy` object is retrieved from a database result, from the
:class:`_engine.ResultProxy` object using methods like
:meth:`_engine.ResultProxy.fetchall`.
The :class:`.RowProxy` object seeks to act mostly like a Python named
tuple, but also provides some Python dictionary behaviors at the same time.
.. seealso::
:ref:`coretutorial_selecting` - includes examples of selecting
rows from SELECT statements.
"""
__slots__ = ()
def __contains__(self, key):
return self._parent._has_key(key)
def __getstate__(self):
return {"_parent": self._parent, "_row": tuple(self)}
def __setstate__(self, state):
self._parent = parent = state["_parent"]
self._row = state["_row"]
self._processors = parent._processors
self._keymap = parent._keymap
__hash__ = None
def _op(self, other, op):
return (
op(tuple(self), tuple(other))
if isinstance(other, RowProxy)
else op(tuple(self), other)
)
def __lt__(self, other):
return self._op(other, operator.lt)
def __le__(self, other):
return self._op(other, operator.le)
def __ge__(self, other):
return self._op(other, operator.ge)
def __gt__(self, other):
return self._op(other, operator.gt)
def __eq__(self, other):
return self._op(other, operator.eq)
def __ne__(self, other):
return self._op(other, operator.ne)
def __repr__(self):
return repr(sql_util._repr_row(self))
def has_key(self, key):
"""Return True if this :class:`.RowProxy` contains the given key.
Through the SQLAlchemy 1.x series, the ``__contains__()`` method
of :class:`.RowProxy` also links to :meth:`.RowProxy.has_key`, in that
an expression such as ::
"some_col" in row
Will return True if the row contains a column named ``"some_col"``,
in the way that a Python mapping works.
However, it is planned that the 2.0 series of SQLAlchemy will reverse
this behavior so that ``__contains__()`` will refer to a value being
present in the row, in the way that a Python tuple works.
"""
return self._parent._has_key(key)
def items(self):
"""Return a list of tuples, each tuple containing a key/value pair.
This method is analogous to the Python dictionary ``.items()`` method,
except that it returns a list, not an iterator.
"""
return [(key, self[key]) for key in self.keys()]
def keys(self):
"""Return the list of keys as strings represented by this
:class:`.RowProxy`.
This method is analogous to the Python dictionary ``.keys()`` method,
except that it returns a list, not an iterator.
"""
return self._parent.keys
def iterkeys(self):
"""Return a an iterator against the :meth:`.RowProxy.keys` method.
This method is analogous to the Python-2-only dictionary
``.iterkeys()`` method.
"""
return iter(self._parent.keys)
def itervalues(self):
"""Return a an iterator against the :meth:`.RowProxy.values` method.
This method is analogous to the Python-2-only dictionary
``.itervalues()`` method.
"""
return iter(self)
def values(self):
"""Return the values represented by this :class:`.RowProxy` as a list.
This method is analogous to the Python dictionary ``.values()`` method,
except that it returns a list, not an iterator.
"""
return super(RowProxy, self).values()
try:
# Register RowProxy with Sequence,
# so sequence protocol is implemented
util.collections_abc.Sequence.register(RowProxy)
except ImportError:
pass
class ResultMetaData(object):
"""Handle cursor.description, applying additional info from an execution
context."""
__slots__ = (
"_keymap",
"case_sensitive",
"matched_on_name",
"_processors",
"keys",
"_orig_processors",
)
def __init__(self, parent, cursor_description):
context = parent.context
dialect = context.dialect
self.case_sensitive = dialect.case_sensitive
self.matched_on_name = False
self._orig_processors = None
if context.result_column_struct:
(
result_columns,
cols_are_ordered,
textual_ordered,
) = context.result_column_struct
num_ctx_cols = len(result_columns)
else:
result_columns = (
cols_are_ordered
) = num_ctx_cols = textual_ordered = False
# merge cursor.description with the column info
# present in the compiled structure, if any
raw = self._merge_cursor_description(
context,
cursor_description,
result_columns,
num_ctx_cols,
cols_are_ordered,
textual_ordered,
)
self._keymap = {}
if not _baserowproxy_usecext:
# keymap indexes by integer index: this is only used
# in the pure Python BaseRowProxy.__getitem__
# implementation to avoid an expensive
# isinstance(key, util.int_types) in the most common
# case path
len_raw = len(raw)
self._keymap.update(
[(elem[0], (elem[3], elem[4], elem[0])) for elem in raw]
+ [
(elem[0] - len_raw, (elem[3], elem[4], elem[0]))
for elem in raw
]
)
# processors in key order for certain per-row
# views like __iter__ and slices
self._processors = [elem[3] for elem in raw]
# keymap by primary string...
by_key = dict([(elem[2], (elem[3], elem[4], elem[0])) for elem in raw])
# for compiled SQL constructs, copy additional lookup keys into
# the key lookup map, such as Column objects, labels,
# column keys and other names
if num_ctx_cols:
# if by-primary-string dictionary smaller (or bigger?!) than
# number of columns, assume we have dupes, rewrite
# dupe records with "None" for index which results in
# ambiguous column exception when accessed.
if len(by_key) != num_ctx_cols:
seen = set()
for rec in raw:
key = rec[1]
if key in seen:
# this is an "ambiguous" element, replacing
# the full record in the map
key = key.lower() if not self.case_sensitive else key
by_key[key] = (None, key, None)
seen.add(key)
# copy secondary elements from compiled columns
# into self._keymap, write in the potentially "ambiguous"
# element
self._keymap.update(
[
(obj_elem, by_key[elem[2]])
for elem in raw
if elem[4]
for obj_elem in elem[4]
]
)
# if we did a pure positional match, then reset the
# original "expression element" back to the "unambiguous"
# entry. This is a new behavior in 1.1 which impacts
# TextAsFrom but also straight compiled SQL constructs.
if not self.matched_on_name:
self._keymap.update(
[
(elem[4][0], (elem[3], elem[4], elem[0]))
for elem in raw
if elem[4]
]
)
else:
# no dupes - copy secondary elements from compiled
# columns into self._keymap
self._keymap.update(
[
(obj_elem, (elem[3], elem[4], elem[0]))
for elem in raw
if elem[4]
for obj_elem in elem[4]
]
)
# update keymap with primary string names taking
# precedence
self._keymap.update(by_key)
# update keymap with "translated" names (sqlite-only thing)
if not num_ctx_cols and context._translate_colname:
self._keymap.update(
[(elem[5], self._keymap[elem[2]]) for elem in raw if elem[5]]
)
def _merge_cursor_description(
self,
context,
cursor_description,
result_columns,
num_ctx_cols,
cols_are_ordered,
textual_ordered,
):
"""Merge a cursor.description with compiled result column information.
There are at least four separate strategies used here, selected
depending on the type of SQL construct used to start with.
The most common case is that of the compiled SQL expression construct,
which generated the column names present in the raw SQL string and
which has the identical number of columns as were reported by
cursor.description. In this case, we assume a 1-1 positional mapping
between the entries in cursor.description and the compiled object.
This is also the most performant case as we disregard extracting /
decoding the column names present in cursor.description since we
already have the desired name we generated in the compiled SQL
construct.
The next common case is that of the completely raw string SQL,
such as passed to connection.execute(). In this case we have no
compiled construct to work with, so we extract and decode the
names from cursor.description and index those as the primary
result row target keys.
The remaining fairly common case is that of the textual SQL
that includes at least partial column information; this is when
we use a :class:`.TextAsFrom` construct. This construct may have
unordered or ordered column information. In the ordered case, we
merge the cursor.description and the compiled construct's information
positionally, and warn if there are additional description names
present, however we still decode the names in cursor.description
as we don't have a guarantee that the names in the columns match
on these. In the unordered case, we match names in cursor.description
to that of the compiled construct based on name matching.
In both of these cases, the cursor.description names and the column
expression objects and names are indexed as result row target keys.
The final case is much less common, where we have a compiled
non-textual SQL expression construct, but the number of columns
in cursor.description doesn't match what's in the compiled
construct. We make the guess here that there might be textual
column expressions in the compiled construct that themselves include
a comma in them causing them to split. We do the same name-matching
as with textual non-ordered columns.
The name-matched system of merging is the same as that used by
SQLAlchemy for all cases up through te 0.9 series. Positional
matching for compiled SQL expressions was introduced in 1.0 as a
major performance feature, and positional matching for textual
:class:`.TextAsFrom` objects in 1.1. As name matching is no longer
a common case, it was acceptable to factor it into smaller generator-
oriented methods that are easier to understand, but incur slightly
more performance overhead.
"""
case_sensitive = context.dialect.case_sensitive
if (
num_ctx_cols
and cols_are_ordered
and not textual_ordered
and num_ctx_cols == len(cursor_description)
):
self.keys = [elem[0] for elem in result_columns]
# pure positional 1-1 case; doesn't need to read
# the names from cursor.description
return [
(
idx,
key,
name.lower() if not case_sensitive else name,
context.get_result_processor(
type_, key, cursor_description[idx][1]
),
obj,
None,
)
for idx, (key, name, obj, type_) in enumerate(result_columns)
]
else:
# name-based or text-positional cases, where we need
# to read cursor.description names
if textual_ordered:
# textual positional case
raw_iterator = self._merge_textual_cols_by_position(
context, cursor_description, result_columns
)
elif num_ctx_cols:
# compiled SQL with a mismatch of description cols
# vs. compiled cols, or textual w/ unordered columns
raw_iterator = self._merge_cols_by_name(
context, cursor_description, result_columns
)
else:
# no compiled SQL, just a raw string
raw_iterator = self._merge_cols_by_none(
context, cursor_description
)
return [
(
idx,
colname,
colname,
context.get_result_processor(
mapped_type, colname, coltype
),
obj,
untranslated,
)
for (
idx,
colname,
mapped_type,
coltype,
obj,
untranslated,
) in raw_iterator
]
def _colnames_from_description(self, context, cursor_description):
"""Extract column names and data types from a cursor.description.
Applies unicode decoding, column translation, "normalization",
and case sensitivity rules to the names based on the dialect.
"""
dialect = context.dialect
case_sensitive = dialect.case_sensitive
translate_colname = context._translate_colname
description_decoder = (
dialect._description_decoder
if dialect.description_encoding
else None
)
normalize_name = (
dialect.normalize_name if dialect.requires_name_normalize else None
)
untranslated = None
self.keys = []
for idx, rec in enumerate(cursor_description):
colname = rec[0]
coltype = rec[1]
if description_decoder:
colname = description_decoder(colname)
if translate_colname:
colname, untranslated = translate_colname(colname)
if normalize_name:
colname = normalize_name(colname)
self.keys.append(colname)
if not case_sensitive:
colname = colname.lower()
yield idx, colname, untranslated, coltype
def _merge_textual_cols_by_position(
self, context, cursor_description, result_columns
):
num_ctx_cols = len(result_columns) if result_columns else None
if num_ctx_cols > len(cursor_description):
util.warn(
"Number of columns in textual SQL (%d) is "
"smaller than number of columns requested (%d)"
% (num_ctx_cols, len(cursor_description))
)
seen = set()
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
if idx < num_ctx_cols:
ctx_rec = result_columns[idx]
obj = ctx_rec[2]
mapped_type = ctx_rec[3]
if obj[0] in seen:
raise exc.InvalidRequestError(
"Duplicate column expression requested "
"in textual SQL: %r" % obj[0]
)
seen.add(obj[0])
else:
mapped_type = sqltypes.NULLTYPE
obj = None
yield idx, colname, mapped_type, coltype, obj, untranslated
def _merge_cols_by_name(self, context, cursor_description, result_columns):
dialect = context.dialect
case_sensitive = dialect.case_sensitive
result_map = self._create_result_map(result_columns, case_sensitive)
self.matched_on_name = True
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
try:
ctx_rec = result_map[colname]
except KeyError:
mapped_type = sqltypes.NULLTYPE
obj = None
else:
obj = ctx_rec[1]
mapped_type = ctx_rec[2]
yield idx, colname, mapped_type, coltype, obj, untranslated
def _merge_cols_by_none(self, context, cursor_description):
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
yield idx, colname, sqltypes.NULLTYPE, coltype, None, untranslated
@classmethod
def _create_result_map(cls, result_columns, case_sensitive=True):
d = {}
for elem in result_columns:
key, rec = elem[0], elem[1:]
if not case_sensitive:
key = key.lower()
if key in d:
# conflicting keyname, just double up the list
# of objects. this will cause an "ambiguous name"
# error if an attempt is made by the result set to
# access.
e_name, e_obj, e_type = d[key]
d[key] = e_name, e_obj + rec[1], e_type
else:
d[key] = rec
return d
def _key_fallback(self, key, err, raiseerr=True):
map_ = self._keymap
result = None
if isinstance(key, util.string_types):
result = map_.get(key if self.case_sensitive else key.lower())
# fallback for targeting a ColumnElement to a textual expression
# this is a rare use case which only occurs when matching text()
# or colummn('name') constructs to ColumnElements, or after a
# pickle/unpickle roundtrip
elif isinstance(key, expression.ColumnElement):
if (
key._label
and (key._label if self.case_sensitive else key._label.lower())
in map_
):
result = map_[
key._label if self.case_sensitive else key._label.lower()
]
elif (
hasattr(key, "name")
and (key.name if self.case_sensitive else key.name.lower())
in map_
):
# match is only on name.
result = map_[
key.name if self.case_sensitive else key.name.lower()
]
# search extra hard to make sure this
# isn't a column/label name overlap.
# this check isn't currently available if the row
# was unpickled.
if result is not None and result[1] is not None:
for obj in result[1]:
if key._compare_name_for_result(obj):
break
else:
result = None
if result is None:
if raiseerr:
util.raise_(
exc.NoSuchColumnError(
"Could not locate column in row for column '%s'"
% expression._string_or_unprintable(key)
),
replace_context=err,
)
else:
return None
else:
map_[key] = result
return result
def _has_key(self, key):
if key in self._keymap:
return True
else:
return self._key_fallback(key, None, False) is not None
def _getter(self, key, raiseerr=True):
if key in self._keymap:
processor, obj, index = self._keymap[key]
else:
ret = self._key_fallback(key, None, raiseerr)
if ret is None:
return None
processor, obj, index = ret
if index is None:
util.raise_(
exc.InvalidRequestError(
"Ambiguous column name '%s' in "
"result set column descriptions" % obj
),
from_=None,
)
return operator.itemgetter(index)
def __getstate__(self):
return {
"_pickled_keymap": dict(
(key, index)
for key, (processor, obj, index) in self._keymap.items()
if isinstance(key, util.string_types + util.int_types)
),
"keys": self.keys,
"case_sensitive": self.case_sensitive,
"matched_on_name": self.matched_on_name,
}
def __setstate__(self, state):
# the row has been processed at pickling time so we don't need any
# processor anymore
self._processors = [None for _ in range(len(state["keys"]))]
self._keymap = keymap = {}
for key, index in state["_pickled_keymap"].items():
# not preserving "obj" here, unfortunately our
# proxy comparison fails with the unpickle
keymap[key] = (None, None, index)
self.keys = state["keys"]
self.case_sensitive = state["case_sensitive"]
self.matched_on_name = state["matched_on_name"]
class ResultProxy(object):
"""A facade around a DBAPI cursor object.
Returns database rows via the :class:`.RowProxy` class, which provides
additional API features and behaviors on top of the raw data returned
by the DBAPI.
.. seealso::
:ref:`coretutorial_selecting` - introductory material for accessing
:class:`_engine.ResultProxy` and :class:`.RowProxy` objects.
"""
_process_row = RowProxy
out_parameters = None
_autoclose_connection = False
_metadata = None
_soft_closed = False
closed = False
def __init__(self, context):
self.context = context
self.dialect = context.dialect
self.cursor = self._saved_cursor = context.cursor
self.connection = context.root_connection
self._echo = (
self.connection._echo and context.engine._should_log_debug()
)
self._init_metadata()
def _getter(self, key, raiseerr=True):
try:
getter = self._metadata._getter
except AttributeError as err:
return self._non_result(None, err)
else:
return getter(key, raiseerr)
def _has_key(self, key):
try:
has_key = self._metadata._has_key
except AttributeError as err:
return self._non_result(None, err)
else:
return has_key(key)
def _init_metadata(self):
cursor_description = self._cursor_description()
if cursor_description is not None:
if (
self.context.compiled
and "compiled_cache" in self.context.execution_options
):
if self.context.compiled._cached_metadata:
self._metadata = self.context.compiled._cached_metadata
else:
self._metadata = (
self.context.compiled._cached_metadata
) = ResultMetaData(self, cursor_description)
else:
self._metadata = ResultMetaData(self, cursor_description)
if self._echo:
self.context.engine.logger.debug(
"Col %r", tuple(x[0] for x in cursor_description)
)
def keys(self):
"""Return the list of string keys that would represented by each
:class:`.RowProxy`."""
if self._metadata:
return self._metadata.keys
else:
return []
@util.memoized_property
def rowcount(self):
"""Return the 'rowcount' for this result.
The 'rowcount' reports the number of rows *matched*
by the WHERE criterion of an UPDATE or DELETE statement.
.. note::
Notes regarding :attr:`_engine.ResultProxy.rowcount`:
* This attribute returns the number of rows *matched*,
which is not necessarily the same as the number of rows
that were actually *modified* - an UPDATE statement, for example,
may have no net change on a given row if the SET values
given are the same as those present in the row already.
Such a row would be matched but not modified.
On backends that feature both styles, such as MySQL,
rowcount is configured by default to return the match
count in all cases.
* :attr:`_engine.ResultProxy.rowcount`
is *only* useful in conjunction
with an UPDATE or DELETE statement. Contrary to what the Python
DBAPI says, it does *not* return the
number of rows available from the results of a SELECT statement
as DBAPIs cannot support this functionality when rows are
unbuffered.
* :attr:`_engine.ResultProxy.rowcount`
may not be fully implemented by
all dialects. In particular, most DBAPIs do not support an
aggregate rowcount result from an executemany call.
The :meth:`_engine.ResultProxy.supports_sane_rowcount` and
:meth:`_engine.ResultProxy.supports_sane_multi_rowcount` methods
will report from the dialect if each usage is known to be
supported.
* Statements that use RETURNING may not return a correct
rowcount.
"""
try:
return self.context.rowcount
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
@property
def lastrowid(self):
"""return the 'lastrowid' accessor on the DBAPI cursor.
This is a DBAPI specific method and is only functional
for those backends which support it, for statements
where it is appropriate. It's behavior is not
consistent across backends.
Usage of this method is normally unnecessary when
using insert() expression constructs; the
:attr:`~ResultProxy.inserted_primary_key` attribute provides a
tuple of primary key values for a newly inserted row,
regardless of database backend.
"""
try:
return self._saved_cursor.lastrowid
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self._saved_cursor, self.context
)
@property
def returns_rows(self):
"""True if this :class:`_engine.ResultProxy` returns rows.
I.e. if it is legal to call the methods
:meth:`_engine.ResultProxy.fetchone`,
:meth:`_engine.ResultProxy.fetchmany`
:meth:`_engine.ResultProxy.fetchall`.
"""
return self._metadata is not None
@property
def is_insert(self):
"""True if this :class:`_engine.ResultProxy` is the result
of a executing an expression language compiled
:func:`_expression.insert` construct.
When True, this implies that the
:attr:`inserted_primary_key` attribute is accessible,
assuming the statement did not include
a user defined "returning" construct.
"""
return self.context.isinsert
def _cursor_description(self):
"""May be overridden by subclasses."""
return self._saved_cursor.description
def _soft_close(self):
"""Soft close this :class:`_engine.ResultProxy`.
This releases all DBAPI cursor resources, but leaves the
ResultProxy "open" from a semantic perspective, meaning the
fetchXXX() methods will continue to return empty results.
This method is called automatically when:
* all result rows are exhausted using the fetchXXX() methods.
* cursor.description is None.
This method is **not public**, but is documented in order to clarify
the "autoclose" process used.
.. versionadded:: 1.0.0
.. seealso::
:meth:`_engine.ResultProxy.close`
"""
if self._soft_closed:
return
self._soft_closed = True
cursor = self.cursor
self.connection._safe_close_cursor(cursor)
if self._autoclose_connection:
self.connection.close()
self.cursor = None
def close(self):
"""Close this ResultProxy.
This closes out the underlying DBAPI cursor corresponding
to the statement execution, if one is still present. Note that the
DBAPI cursor is automatically released when the
:class:`_engine.ResultProxy`
exhausts all available rows. :meth:`_engine.ResultProxy.close`
is generally
an optional method except in the case when discarding a
:class:`_engine.ResultProxy`
that still has additional rows pending for fetch.
In the case of a result that is the product of
:ref:`connectionless execution <dbengine_implicit>`,
the underlying :class:`_engine.Connection` object is also closed,
which
:term:`releases` DBAPI connection resources.
After this method is called, it is no longer valid to call upon
the fetch methods, which will raise a :class:`.ResourceClosedError`
on subsequent use.
.. versionchanged:: 1.0.0 - the :meth:`_engine.ResultProxy.close`
method
has been separated out from the process that releases the underlying
DBAPI cursor resource. The "auto close" feature of the
:class:`_engine.Connection` now performs a so-called "soft close",
which
releases the underlying DBAPI cursor, but allows the
:class:`_engine.ResultProxy`
to still behave as an open-but-exhausted
result set; the actual :meth:`_engine.ResultProxy.close`
method is never
called. It is still safe to discard a
:class:`_engine.ResultProxy`
that has been fully exhausted without calling this method.
.. seealso::
:ref:`connections_toplevel`
"""
if not self.closed:
self._soft_close()
self.closed = True
def __iter__(self):
"""Implement iteration protocol."""
while True:
row = self.fetchone()
if row is None:
return
else:
yield row
def __next__(self):
"""Implement the Python next() protocol.
This method, mirrored as both ``.next()`` and ``.__next__()``, is part
of Python's API for producing iterator-like behavior.
.. versionadded:: 1.2
"""
row = self.fetchone()
if row is None:
raise StopIteration()
else:
return row
next = __next__
@util.memoized_property
def inserted_primary_key(self):
"""Return the primary key for the row just inserted.
The return value is a list of scalar values
corresponding to the list of primary key columns
in the target table.
This only applies to single row :func:`_expression.insert`
constructs which did not explicitly specify
:meth:`_expression.Insert.returning`.
Note that primary key columns which specify a
server_default clause,
or otherwise do not qualify as "autoincrement"
columns (see the notes at :class:`_schema.Column`), and were
generated using the database-side default, will
appear in this list as ``None`` unless the backend
supports "returning" and the insert statement executed
with the "implicit returning" enabled.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert:
raise exc.InvalidRequestError(
"Statement is not an insert() " "expression construct."
)
elif self.context._is_explicit_returning:
raise exc.InvalidRequestError(
"Can't call inserted_primary_key "
"when returning() "
"is used."
)
return self.context.inserted_primary_key
def last_updated_params(self):
"""Return the collection of updated parameters from this
execution.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an update() " "expression construct."
)
elif self.context.executemany:
return self.context.compiled_parameters
else:
return self.context.compiled_parameters[0]
def last_inserted_params(self):
"""Return the collection of inserted parameters from this
execution.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert:
raise exc.InvalidRequestError(
"Statement is not an insert() " "expression construct."
)
elif self.context.executemany:
return self.context.compiled_parameters
else:
return self.context.compiled_parameters[0]
@property
def returned_defaults(self):
"""Return the values of default columns that were fetched using
the :meth:`.ValuesBase.return_defaults` feature.
The value is an instance of :class:`.RowProxy`, or ``None``
if :meth:`.ValuesBase.return_defaults` was not used or if the
backend does not support RETURNING.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ValuesBase.return_defaults`
"""
return self.context.returned_defaults
def lastrow_has_defaults(self):
"""Return ``lastrow_has_defaults()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
"""
return self.context.lastrow_has_defaults()
def postfetch_cols(self):
"""Return ``postfetch_cols()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() or update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert and not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an insert() or update() "
"expression construct."
)
return self.context.postfetch_cols
def prefetch_cols(self):
"""Return ``prefetch_cols()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() or update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert and not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an insert() or update() "
"expression construct."
)
return self.context.prefetch_cols
def supports_sane_rowcount(self):
"""Return ``supports_sane_rowcount`` from the dialect.
See :attr:`_engine.ResultProxy.rowcount` for background.
"""
return self.dialect.supports_sane_rowcount
def supports_sane_multi_rowcount(self):
"""Return ``supports_sane_multi_rowcount`` from the dialect.
See :attr:`_engine.ResultProxy.rowcount` for background.
"""
return self.dialect.supports_sane_multi_rowcount
def _fetchone_impl(self):
try:
return self.cursor.fetchone()
except AttributeError as err:
return self._non_result(None, err)
def _fetchmany_impl(self, size=None):
try:
if size is None:
return self.cursor.fetchmany()
else:
return self.cursor.fetchmany(size)
except AttributeError as err:
return self._non_result([], err)
def _fetchall_impl(self):
try:
return self.cursor.fetchall()
except AttributeError as err:
return self._non_result([], err)
def _non_result(self, default, err=None):
if self._metadata is None:
util.raise_(
exc.ResourceClosedError(
"This result object does not return rows. "
"It has been closed automatically."
),
replace_context=err,
)
elif self.closed:
util.raise_(
exc.ResourceClosedError("This result object is closed."),
replace_context=err,
)
else:
return default
def process_rows(self, rows):
process_row = self._process_row
metadata = self._metadata
keymap = metadata._keymap
processors = metadata._processors
if self._echo:
log = self.context.engine.logger.debug
l = []
for row in rows:
log("Row %r", sql_util._repr_row(row))
l.append(process_row(metadata, row, processors, keymap))
return l
else:
return [
process_row(metadata, row, processors, keymap) for row in rows
]
def fetchall(self):
"""Fetch all rows, just like DB-API ``cursor.fetchall()``.
After all rows have been exhausted, the underlying DBAPI
cursor resource is released, and the object may be safely
discarded.
Subsequent calls to :meth:`_engine.ResultProxy.fetchall` will return
an empty list. After the :meth:`_engine.ResultProxy.close` method is
called, the method will raise :class:`.ResourceClosedError`.
:return: a list of :class:`.RowProxy` objects
"""
try:
l = self.process_rows(self._fetchall_impl())
self._soft_close()
return l
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
def fetchmany(self, size=None):
"""Fetch many rows, just like DB-API
``cursor.fetchmany(size=cursor.arraysize)``.
After all rows have been exhausted, the underlying DBAPI
cursor resource is released, and the object may be safely
discarded.
Calls to :meth:`_engine.ResultProxy.fetchmany`
after all rows have been
exhausted will return
an empty list. After the :meth:`_engine.ResultProxy.close` method is
called, the method will raise :class:`.ResourceClosedError`.
:return: a list of :class:`.RowProxy` objects
"""
try:
l = self.process_rows(self._fetchmany_impl(size))
if len(l) == 0:
self._soft_close()
return l
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
def fetchone(self):
"""Fetch one row, just like DB-API ``cursor.fetchone()``.
After all rows have been exhausted, the underlying DBAPI
cursor resource is released, and the object may be safely
discarded.
Calls to :meth:`_engine.ResultProxy.fetchone` after all rows have
been exhausted will return ``None``.
After the :meth:`_engine.ResultProxy.close` method is
called, the method will raise :class:`.ResourceClosedError`.
:return: a :class:`.RowProxy` object, or None if no rows remain
"""
try:
row = self._fetchone_impl()
if row is not None:
return self.process_rows([row])[0]
else:
self._soft_close()
return None
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
def first(self):
"""Fetch the first row and then close the result set unconditionally.
After calling this method, the object is fully closed,
e.g. the :meth:`_engine.ResultProxy.close`
method will have been called.
:return: a :class:`.RowProxy` object, or None if no rows remain
"""
if self._metadata is None:
return self._non_result(None)
try:
row = self._fetchone_impl()
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
try:
if row is not None:
return self.process_rows([row])[0]
else:
return None
finally:
self.close()
def scalar(self):
"""Fetch the first column of the first row, and close the result set.
After calling this method, the object is fully closed,
e.g. the :meth:`_engine.ResultProxy.close`
method will have been called.
:return: a Python scalar value , or None if no rows remain
"""
row = self.first()
if row is not None:
return row[0]
else:
return None
class BufferedRowResultProxy(ResultProxy):
"""A ResultProxy with row buffering behavior.
``ResultProxy`` that buffers the contents of a selection of rows
before ``fetchone()`` is called. This is to allow the results of
``cursor.description`` to be available immediately, when
interfacing with a DB-API that requires rows to be consumed before
this information is available (currently psycopg2, when used with
server-side cursors).
The pre-fetching behavior fetches only one row initially, and then
grows its buffer size by a fixed amount with each successive need
for additional rows up to a size of 1000.
The size argument is configurable using the ``max_row_buffer``
execution option::
with psycopg2_engine.connect() as conn:
result = conn.execution_options(
stream_results=True, max_row_buffer=50
).execute("select * from table")
.. versionadded:: 1.0.6 Added the ``max_row_buffer`` option.
.. seealso::
:ref:`psycopg2_execution_options`
"""
def _init_metadata(self):
self._max_row_buffer = self.context.execution_options.get(
"max_row_buffer", None
)
self.__buffer_rows()
super(BufferedRowResultProxy, self)._init_metadata()
# this is a "growth chart" for the buffering of rows.
# each successive __buffer_rows call will use the next
# value in the list for the buffer size until the max
# is reached
size_growth = {
1: 5,
5: 10,
10: 20,
20: 50,
50: 100,
100: 250,
250: 500,
500: 1000,
}
def __buffer_rows(self):
if self.cursor is None:
return
size = getattr(self, "_bufsize", 1)
self.__rowbuffer = collections.deque(self.cursor.fetchmany(size))
self._bufsize = self.size_growth.get(size, size)
if self._max_row_buffer is not None:
self._bufsize = min(self._max_row_buffer, self._bufsize)
def _soft_close(self, **kw):
self.__rowbuffer.clear()
super(BufferedRowResultProxy, self)._soft_close(**kw)
def _fetchone_impl(self):
if self.cursor is None:
return self._non_result(None)
if not self.__rowbuffer:
self.__buffer_rows()
if not self.__rowbuffer:
return None
return self.__rowbuffer.popleft()
def _fetchmany_impl(self, size=None):
if size is None:
return self._fetchall_impl()
result = []
for x in range(0, size):
row = self._fetchone_impl()
if row is None:
break
result.append(row)
return result
def _fetchall_impl(self):
if self.cursor is None:
return self._non_result([])
self.__rowbuffer.extend(self.cursor.fetchall())
ret = self.__rowbuffer
self.__rowbuffer = collections.deque()
return ret
class FullyBufferedResultProxy(ResultProxy):
"""A result proxy that buffers rows fully upon creation.
Used for operations where a result is to be delivered
after the database conversation can not be continued,
such as MSSQL INSERT...OUTPUT after an autocommit.
"""
def _init_metadata(self):
super(FullyBufferedResultProxy, self)._init_metadata()
self.__rowbuffer = self._buffer_rows()
def _buffer_rows(self):
return collections.deque(self.cursor.fetchall())
def _soft_close(self, **kw):
self.__rowbuffer.clear()
super(FullyBufferedResultProxy, self)._soft_close(**kw)
def _fetchone_impl(self):
if self.__rowbuffer:
return self.__rowbuffer.popleft()
else:
return self._non_result(None)
def _fetchmany_impl(self, size=None):
if size is None:
return self._fetchall_impl()
result = []
for x in range(0, size):
row = self._fetchone_impl()
if row is None:
break
result.append(row)
return result
def _fetchall_impl(self):
if not self.cursor:
return self._non_result([])
ret = self.__rowbuffer
self.__rowbuffer = collections.deque()
return ret
class BufferedColumnRow(RowProxy):
def __init__(self, parent, row, processors, keymap):
# preprocess row
row = list(row)
# this is a tad faster than using enumerate
index = 0
for processor in parent._orig_processors:
if processor is not None:
row[index] = processor(row[index])
index += 1
row = tuple(row)
super(BufferedColumnRow, self).__init__(
parent, row, processors, keymap
)
class BufferedColumnResultProxy(ResultProxy):
"""A ResultProxy with column buffering behavior.
``ResultProxy`` that loads all columns into memory each time
fetchone() is called. If fetchmany() or fetchall() are called,
the full grid of results is fetched. This is to operate with
databases where result rows contain "live" results that fall out
of scope unless explicitly fetched.
.. versionchanged:: 1.2 This :class:`_engine.ResultProxy` is not used by
any SQLAlchemy-included dialects.
"""
_process_row = BufferedColumnRow
def _init_metadata(self):
super(BufferedColumnResultProxy, self)._init_metadata()
metadata = self._metadata
# don't double-replace the processors, in the case
# of a cached ResultMetaData
if metadata._orig_processors is None:
# orig_processors will be used to preprocess each row when
# they are constructed.
metadata._orig_processors = metadata._processors
# replace the all type processors by None processors.
metadata._processors = [None for _ in range(len(metadata.keys))]
keymap = {}
for k, (func, obj, index) in metadata._keymap.items():
keymap[k] = (None, obj, index)
metadata._keymap = keymap
def fetchall(self):
# can't call cursor.fetchall(), since rows must be
# fully processed before requesting more from the DBAPI.
l = []
while True:
row = self.fetchone()
if row is None:
break
l.append(row)
return l
def fetchmany(self, size=None):
# can't call cursor.fetchmany(), since rows must be
# fully processed before requesting more from the DBAPI.
if size is None:
return self.fetchall()
l = []
for i in range(size):
row = self.fetchone()
if row is None:
break
l.append(row)
return l
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/engine/default.py
|
# engine/default.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Default implementations of per-dialect sqlalchemy.engine classes.
These are semi-private implementation classes which are only of importance
to database dialect authors; dialects will usually use the classes here
as the base class for their own corresponding classes.
"""
import codecs
import random
import re
import weakref
from . import interfaces
from . import reflection
from . import result
from .. import event
from .. import exc
from .. import pool
from .. import processors
from .. import types as sqltypes
from .. import util
from ..sql import compiler
from ..sql import expression
from ..sql import schema
from ..sql.elements import quoted_name
AUTOCOMMIT_REGEXP = re.compile(
r"\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER)", re.I | re.UNICODE
)
# When we're handed literal SQL, ensure it's a SELECT query
SERVER_SIDE_CURSOR_RE = re.compile(r"\s*SELECT", re.I | re.UNICODE)
class DefaultDialect(interfaces.Dialect):
"""Default implementation of Dialect"""
statement_compiler = compiler.SQLCompiler
ddl_compiler = compiler.DDLCompiler
type_compiler = compiler.GenericTypeCompiler
preparer = compiler.IdentifierPreparer
supports_alter = True
supports_comments = False
inline_comments = False
# the first value we'd get for an autoincrement
# column.
default_sequence_base = 1
# most DBAPIs happy with this for execute().
# not cx_oracle.
execute_sequence_format = tuple
supports_views = True
supports_sequences = False
sequences_optional = False
preexecute_autoincrement_sequences = False
postfetch_lastrowid = True
implicit_returning = False
supports_right_nested_joins = True
cte_follows_insert = False
supports_native_enum = False
supports_native_boolean = False
non_native_boolean_check_constraint = True
supports_simple_order_by_label = True
tuple_in_values = False
engine_config_types = util.immutabledict(
[
("convert_unicode", util.bool_or_str("force")),
("pool_timeout", util.asint),
("echo", util.bool_or_str("debug")),
("echo_pool", util.bool_or_str("debug")),
("pool_recycle", util.asint),
("pool_size", util.asint),
("max_overflow", util.asint),
("pool_threadlocal", util.asbool),
]
)
# if the NUMERIC type
# returns decimal.Decimal.
# *not* the FLOAT type however.
supports_native_decimal = False
if util.py3k:
supports_unicode_statements = True
supports_unicode_binds = True
returns_unicode_strings = True
description_encoding = None
else:
supports_unicode_statements = False
supports_unicode_binds = False
returns_unicode_strings = False
description_encoding = "use_encoding"
name = "default"
# length at which to truncate
# any identifier.
max_identifier_length = 9999
_user_defined_max_identifier_length = None
# length at which to truncate
# the name of an index.
# Usually None to indicate
# 'use max_identifier_length'.
# thanks to MySQL, sigh
max_index_name_length = None
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
colspecs = {}
default_paramstyle = "named"
supports_default_values = False
supports_empty_insert = True
supports_multivalues_insert = False
supports_is_distinct_from = True
supports_server_side_cursors = False
# extra record-level locking features (#4860)
supports_for_update_of = False
server_version_info = None
construct_arguments = None
"""Optional set of argument specifiers for various SQLAlchemy
constructs, typically schema items.
To implement, establish as a series of tuples, as in::
construct_arguments = [
(schema.Index, {
"using": False,
"where": None,
"ops": None
})
]
If the above construct is established on the PostgreSQL dialect,
the :class:`.Index` construct will now accept the keyword arguments
``postgresql_using``, ``postgresql_where``, nad ``postgresql_ops``.
Any other argument specified to the constructor of :class:`.Index`
which is prefixed with ``postgresql_`` will raise :class:`.ArgumentError`.
A dialect which does not include a ``construct_arguments`` member will
not participate in the argument validation system. For such a dialect,
any argument name is accepted by all participating constructs, within
the namespace of arguments prefixed with that dialect name. The rationale
here is so that third-party dialects that haven't yet implemented this
feature continue to function in the old way.
.. versionadded:: 0.9.2
.. seealso::
:class:`.DialectKWArgs` - implementing base class which consumes
:attr:`.DefaultDialect.construct_arguments`
"""
# indicates symbol names are
# UPPERCASEd if they are case insensitive
# within the database.
# if this is True, the methods normalize_name()
# and denormalize_name() must be provided.
requires_name_normalize = False
reflection_options = ()
dbapi_exception_translation_map = util.immutabledict()
"""mapping used in the extremely unusual case that a DBAPI's
published exceptions don't actually have the __name__ that they
are linked towards.
.. versionadded:: 1.0.5
"""
@util.deprecated_params(
convert_unicode=(
"1.3",
"The :paramref:`_sa.create_engine.convert_unicode` parameter "
"and corresponding dialect-level parameters are deprecated, "
"and will be removed in a future release. Modern DBAPIs support "
"Python Unicode natively and this parameter is unnecessary.",
)
)
def __init__(
self,
convert_unicode=False,
encoding="utf-8",
paramstyle=None,
dbapi=None,
implicit_returning=None,
supports_right_nested_joins=None,
case_sensitive=True,
supports_native_boolean=None,
empty_in_strategy="static",
max_identifier_length=None,
label_length=None,
**kwargs
):
if not getattr(self, "ported_sqla_06", True):
util.warn(
"The %s dialect is not yet ported to the 0.6 format"
% self.name
)
self.convert_unicode = convert_unicode
self.encoding = encoding
self.positional = False
self._ischema = None
self.dbapi = dbapi
if paramstyle is not None:
self.paramstyle = paramstyle
elif self.dbapi is not None:
self.paramstyle = self.dbapi.paramstyle
else:
self.paramstyle = self.default_paramstyle
if implicit_returning is not None:
self.implicit_returning = implicit_returning
self.positional = self.paramstyle in ("qmark", "format", "numeric")
self.identifier_preparer = self.preparer(self)
self.type_compiler = self.type_compiler(self)
if supports_right_nested_joins is not None:
self.supports_right_nested_joins = supports_right_nested_joins
if supports_native_boolean is not None:
self.supports_native_boolean = supports_native_boolean
self.case_sensitive = case_sensitive
self.empty_in_strategy = empty_in_strategy
if empty_in_strategy == "static":
self._use_static_in = True
elif empty_in_strategy in ("dynamic", "dynamic_warn"):
self._use_static_in = False
self._warn_on_empty_in = empty_in_strategy == "dynamic_warn"
else:
raise exc.ArgumentError(
"empty_in_strategy may be 'static', "
"'dynamic', or 'dynamic_warn'"
)
self._user_defined_max_identifier_length = max_identifier_length
if self._user_defined_max_identifier_length:
self.max_identifier_length = (
self._user_defined_max_identifier_length
)
self.label_length = label_length
if self.description_encoding == "use_encoding":
self._description_decoder = (
processors.to_unicode_processor_factory
)(encoding)
elif self.description_encoding is not None:
self._description_decoder = (
processors.to_unicode_processor_factory
)(self.description_encoding)
self._encoder = codecs.getencoder(self.encoding)
self._decoder = processors.to_unicode_processor_factory(self.encoding)
@util.memoized_property
def _type_memos(self):
return weakref.WeakKeyDictionary()
@property
def dialect_description(self):
return self.name + "+" + self.driver
@property
def supports_sane_rowcount_returning(self):
"""True if this dialect supports sane rowcount even if RETURNING is
in use.
For dialects that don't support RETURNING, this is synomous
with supports_sane_rowcount.
"""
return self.supports_sane_rowcount
@classmethod
def get_pool_class(cls, url):
return getattr(cls, "poolclass", pool.QueuePool)
@classmethod
def load_provisioning(cls):
package = ".".join(cls.__module__.split(".")[0:-1])
try:
__import__(package + ".provision")
except ImportError:
pass
def initialize(self, connection):
try:
self.server_version_info = self._get_server_version_info(
connection
)
except NotImplementedError:
self.server_version_info = None
try:
self.default_schema_name = self._get_default_schema_name(
connection
)
except NotImplementedError:
self.default_schema_name = None
try:
self.default_isolation_level = self.get_isolation_level(
connection.connection
)
except NotImplementedError:
self.default_isolation_level = None
self.returns_unicode_strings = self._check_unicode_returns(connection)
if (
self.description_encoding is not None
and self._check_unicode_description(connection)
):
self._description_decoder = self.description_encoding = None
if not self._user_defined_max_identifier_length:
max_ident_length = self._check_max_identifier_length(connection)
if max_ident_length:
self.max_identifier_length = max_ident_length
if (
self.label_length
and self.label_length > self.max_identifier_length
):
raise exc.ArgumentError(
"Label length of %d is greater than this dialect's"
" maximum identifier length of %d"
% (self.label_length, self.max_identifier_length)
)
def on_connect(self):
# inherits the docstring from interfaces.Dialect.on_connect
return None
def _check_max_identifier_length(self, connection):
"""Perform a connection / server version specific check to determine
the max_identifier_length.
If the dialect's class level max_identifier_length should be used,
can return None.
.. versionadded:: 1.3.9
"""
return None
def _check_unicode_returns(self, connection, additional_tests=None):
if util.py2k and not self.supports_unicode_statements:
cast_to = util.binary_type
else:
cast_to = util.text_type
if self.positional:
parameters = self.execute_sequence_format()
else:
parameters = {}
def check_unicode(test):
statement = cast_to(
expression.select([test]).compile(dialect=self)
)
try:
cursor = connection.connection.cursor()
connection._cursor_execute(cursor, statement, parameters)
row = cursor.fetchone()
cursor.close()
except exc.DBAPIError as de:
# note that _cursor_execute() will have closed the cursor
# if an exception is thrown.
util.warn(
"Exception attempting to "
"detect unicode returns: %r" % de
)
return False
else:
return isinstance(row[0], util.text_type)
tests = [
# detect plain VARCHAR
expression.cast(
expression.literal_column("'test plain returns'"),
sqltypes.VARCHAR(60),
),
# detect if there's an NVARCHAR type with different behavior
# available
expression.cast(
expression.literal_column("'test unicode returns'"),
sqltypes.Unicode(60),
),
]
if additional_tests:
tests += additional_tests
results = {check_unicode(test) for test in tests}
if results.issuperset([True, False]):
return "conditional"
else:
return results == {True}
def _check_unicode_description(self, connection):
# all DBAPIs on Py2K return cursor.description as encoded,
# until pypy2.1beta2 with sqlite, so let's just check it -
# it's likely others will start doing this too in Py2k.
if util.py2k and not self.supports_unicode_statements:
cast_to = util.binary_type
else:
cast_to = util.text_type
cursor = connection.connection.cursor()
try:
cursor.execute(
cast_to(
expression.select(
[expression.literal_column("'x'").label("some_label")]
).compile(dialect=self)
)
)
return isinstance(cursor.description[0][0], util.text_type)
finally:
cursor.close()
def type_descriptor(self, typeobj):
"""Provide a database-specific :class:`.TypeEngine` object, given
the generic object which comes from the types module.
This method looks for a dictionary called
``colspecs`` as a class or instance-level variable,
and passes on to :func:`_types.adapt_type`.
"""
return sqltypes.adapt_type(typeobj, self.colspecs)
def reflecttable(
self,
connection,
table,
include_columns,
exclude_columns,
resolve_fks,
**opts
):
insp = reflection.Inspector.from_engine(connection)
return insp.reflecttable(
table, include_columns, exclude_columns, resolve_fks, **opts
)
def get_pk_constraint(self, conn, table_name, schema=None, **kw):
"""Compatibility method, adapts the result of get_primary_keys()
for those dialects which don't implement get_pk_constraint().
"""
return {
"constrained_columns": self.get_primary_keys(
conn, table_name, schema=schema, **kw
)
}
def validate_identifier(self, ident):
if len(ident) > self.max_identifier_length:
raise exc.IdentifierError(
"Identifier '%s' exceeds maximum length of %d characters"
% (ident, self.max_identifier_length)
)
def connect(self, *cargs, **cparams):
# inherits the docstring from interfaces.Dialect.connect
return self.dbapi.connect(*cargs, **cparams)
def create_connect_args(self, url):
# inherits the docstring from interfaces.Dialect.create_connect_args
opts = url.translate_connect_args()
opts.update(url.query)
return [[], opts]
def set_engine_execution_options(self, engine, opts):
if "isolation_level" in opts:
isolation_level = opts["isolation_level"]
@event.listens_for(engine, "engine_connect")
def set_isolation(connection, branch):
if not branch:
self._set_connection_isolation(connection, isolation_level)
if "schema_translate_map" in opts:
getter = schema._schema_getter(opts["schema_translate_map"])
engine.schema_for_object = getter
@event.listens_for(engine, "engine_connect")
def set_schema_translate_map(connection, branch):
connection.schema_for_object = getter
def set_connection_execution_options(self, connection, opts):
if "isolation_level" in opts:
self._set_connection_isolation(connection, opts["isolation_level"])
if "schema_translate_map" in opts:
getter = schema._schema_getter(opts["schema_translate_map"])
connection.schema_for_object = getter
def _set_connection_isolation(self, connection, level):
if connection.in_transaction():
util.warn(
"Connection is already established with a Transaction; "
"setting isolation_level may implicitly rollback or commit "
"the existing transaction, or have no effect until "
"next transaction"
)
self.set_isolation_level(connection.connection, level)
connection.connection._connection_record.finalize_callback.append(
self.reset_isolation_level
)
def do_begin(self, dbapi_connection):
pass
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
@util.memoized_property
def _dialect_specific_select_one(self):
return str(expression.select([1]).compile(dialect=self))
def do_ping(self, dbapi_connection):
cursor = None
try:
cursor = dbapi_connection.cursor()
try:
cursor.execute(self._dialect_specific_select_one)
finally:
cursor.close()
except self.dbapi.Error as err:
if self.is_disconnect(err, dbapi_connection, cursor):
return False
else:
raise
else:
return True
def create_xid(self):
"""Create a random two-phase transaction ID.
This id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). Its format is unspecified.
"""
return "_sa_%032x" % random.randint(0, 2 ** 128)
def do_savepoint(self, connection, name):
connection.execute(expression.SavepointClause(name))
def do_rollback_to_savepoint(self, connection, name):
connection.execute(expression.RollbackToSavepointClause(name))
def do_release_savepoint(self, connection, name):
connection.execute(expression.ReleaseSavepointClause(name))
def do_executemany(self, cursor, statement, parameters, context=None):
cursor.executemany(statement, parameters)
def do_execute(self, cursor, statement, parameters, context=None):
cursor.execute(statement, parameters)
def do_execute_no_params(self, cursor, statement, context=None):
cursor.execute(statement)
def is_disconnect(self, e, connection, cursor):
return False
def reset_isolation_level(self, dbapi_conn):
# default_isolation_level is read from the first connection
# after the initial set of 'isolation_level', if any, so is
# the configured default of this dialect.
self.set_isolation_level(dbapi_conn, self.default_isolation_level)
def normalize_name(self, name):
if name is None:
return None
if util.py2k:
if isinstance(name, str):
name = name.decode(self.encoding)
name_lower = name.lower()
name_upper = name.upper()
if name_upper == name_lower:
# name has no upper/lower conversion, e.g. non-european characters.
# return unchanged
return name
elif name_upper == name and not (
self.identifier_preparer._requires_quotes
)(name_lower):
# name is all uppercase and doesn't require quoting; normalize
# to all lower case
return name_lower
elif name_lower == name:
# name is all lower case, which if denormalized means we need to
# force quoting on it
return quoted_name(name, quote=True)
else:
# name is mixed case, means it will be quoted in SQL when used
# later, no normalizes
return name
def denormalize_name(self, name):
if name is None:
return None
name_lower = name.lower()
name_upper = name.upper()
if name_upper == name_lower:
# name has no upper/lower conversion, e.g. non-european characters.
# return unchanged
return name
elif name_lower == name and not (
self.identifier_preparer._requires_quotes
)(name_lower):
name = name_upper
if util.py2k:
if not self.supports_unicode_binds:
name = name.encode(self.encoding)
else:
name = unicode(name) # noqa
return name
class _RendersLiteral(object):
def literal_processor(self, dialect):
def process(value):
return "'%s'" % value
return process
class _StrDateTime(_RendersLiteral, sqltypes.DateTime):
pass
class _StrDate(_RendersLiteral, sqltypes.Date):
pass
class _StrTime(_RendersLiteral, sqltypes.Time):
pass
class StrCompileDialect(DefaultDialect):
statement_compiler = compiler.StrSQLCompiler
ddl_compiler = compiler.DDLCompiler
type_compiler = compiler.StrSQLTypeCompiler
preparer = compiler.IdentifierPreparer
supports_sequences = True
sequences_optional = True
preexecute_autoincrement_sequences = False
implicit_returning = False
supports_native_boolean = True
supports_simple_order_by_label = True
colspecs = {
sqltypes.DateTime: _StrDateTime,
sqltypes.Date: _StrDate,
sqltypes.Time: _StrTime,
}
class DefaultExecutionContext(interfaces.ExecutionContext):
isinsert = False
isupdate = False
isdelete = False
is_crud = False
is_text = False
isddl = False
executemany = False
compiled = None
statement = None
result_column_struct = None
returned_defaults = None
_is_implicit_returning = False
_is_explicit_returning = False
# a hook for SQLite's translation of
# result column names
_translate_colname = None
_expanded_parameters = util.immutabledict()
@classmethod
def _init_ddl(cls, dialect, connection, dbapi_connection, compiled_ddl):
"""Initialize execution context for a DDLElement construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.compiled = compiled = compiled_ddl
self.isddl = True
self.execution_options = compiled.execution_options
if connection._execution_options:
self.execution_options = dict(self.execution_options)
self.execution_options.update(connection._execution_options)
if not dialect.supports_unicode_statements:
self.unicode_statement = util.text_type(compiled)
self.statement = dialect._encoder(self.unicode_statement)[0]
else:
self.statement = self.unicode_statement = util.text_type(compiled)
self.cursor = self.create_cursor()
self.compiled_parameters = []
if dialect.positional:
self.parameters = [dialect.execute_sequence_format()]
else:
self.parameters = [{}]
return self
@classmethod
def _init_compiled(
cls, dialect, connection, dbapi_connection, compiled, parameters
):
"""Initialize execution context for a Compiled construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.compiled = compiled
# this should be caught in the engine before
# we get here
assert compiled.can_execute
self.execution_options = compiled.execution_options.union(
connection._execution_options
)
self.result_column_struct = (
compiled._result_columns,
compiled._ordered_columns,
compiled._textual_ordered_columns,
)
self.unicode_statement = util.text_type(compiled)
if not dialect.supports_unicode_statements:
self.statement = self.unicode_statement.encode(
self.dialect.encoding
)
else:
self.statement = self.unicode_statement
self.isinsert = compiled.isinsert
self.isupdate = compiled.isupdate
self.isdelete = compiled.isdelete
self.is_text = compiled.isplaintext
if not parameters:
self.compiled_parameters = [compiled.construct_params()]
else:
self.compiled_parameters = [
compiled.construct_params(m, _group_number=grp)
for grp, m in enumerate(parameters)
]
self.executemany = len(parameters) > 1
self.cursor = self.create_cursor()
if self.isinsert or self.isupdate or self.isdelete:
self.is_crud = True
self._is_explicit_returning = bool(compiled.statement._returning)
self._is_implicit_returning = bool(
compiled.returning and not compiled.statement._returning
)
if self.compiled.insert_prefetch or self.compiled.update_prefetch:
if self.executemany:
self._process_executemany_defaults()
else:
self._process_executesingle_defaults()
processors = compiled._bind_processors
if compiled.contains_expanding_parameters:
# copy processors for this case as they will be mutated
processors = dict(processors)
positiontup = self._expand_in_parameters(compiled, processors)
elif compiled.positional:
positiontup = self.compiled.positiontup
# Convert the dictionary of bind parameter values
# into a dict or list to be sent to the DBAPI's
# execute() or executemany() method.
parameters = []
if compiled.positional:
for compiled_params in self.compiled_parameters:
param = []
for key in positiontup:
if key in processors:
param.append(processors[key](compiled_params[key]))
else:
param.append(compiled_params[key])
parameters.append(dialect.execute_sequence_format(param))
else:
encode = not dialect.supports_unicode_statements
for compiled_params in self.compiled_parameters:
if encode:
param = dict(
(
dialect._encoder(key)[0],
processors[key](compiled_params[key])
if key in processors
else compiled_params[key],
)
for key in compiled_params
)
else:
param = dict(
(
key,
processors[key](compiled_params[key])
if key in processors
else compiled_params[key],
)
for key in compiled_params
)
parameters.append(param)
self.parameters = dialect.execute_sequence_format(parameters)
return self
def _expand_in_parameters(self, compiled, processors):
"""handle special 'expanding' parameters, IN tuples that are rendered
on a per-parameter basis for an otherwise fixed SQL statement string.
"""
if self.executemany:
raise exc.InvalidRequestError(
"'expanding' parameters can't be used with " "executemany()"
)
if self.compiled.positional and self.compiled._numeric_binds:
# I'm not familiar with any DBAPI that uses 'numeric'
raise NotImplementedError(
"'expanding' bind parameters not supported with "
"'numeric' paramstyle at this time."
)
self._expanded_parameters = {}
compiled_params = self.compiled_parameters[0]
if compiled.positional:
positiontup = []
else:
positiontup = None
replacement_expressions = {}
to_update_sets = {}
for name in (
self.compiled.positiontup
if compiled.positional
else self.compiled.binds
):
parameter = self.compiled.binds[name]
if parameter.expanding:
if name in replacement_expressions:
to_update = to_update_sets[name]
else:
# we are removing the parameter from compiled_params
# because it is a list value, which is not expected by
# TypeEngine objects that would otherwise be asked to
# process it. the single name is being replaced with
# individual numbered parameters for each value in the
# param.
values = compiled_params.pop(name)
if not values:
to_update = to_update_sets[name] = []
replacement_expressions[
name
] = self.compiled.visit_empty_set_expr(
parameter._expanding_in_types
if parameter._expanding_in_types
else [parameter.type]
)
elif isinstance(values[0], (tuple, list)):
to_update = to_update_sets[name] = [
("%s_%s_%s" % (name, i, j), value)
for i, tuple_element in enumerate(values, 1)
for j, value in enumerate(tuple_element, 1)
]
replacement_expressions[name] = (
"VALUES " if self.dialect.tuple_in_values else ""
) + ", ".join(
"(%s)"
% ", ".join(
self.compiled.bindtemplate
% {
"name": to_update[
i * len(tuple_element) + j
][0]
}
for j, value in enumerate(tuple_element)
)
for i, tuple_element in enumerate(values)
)
else:
to_update = to_update_sets[name] = [
("%s_%s" % (name, i), value)
for i, value in enumerate(values, 1)
]
replacement_expressions[name] = ", ".join(
self.compiled.bindtemplate % {"name": key}
for key, value in to_update
)
compiled_params.update(to_update)
processors.update(
(key, processors[name])
for key, value in to_update
if name in processors
)
if compiled.positional:
positiontup.extend(name for name, value in to_update)
self._expanded_parameters[name] = [
expand_key for expand_key, value in to_update
]
elif compiled.positional:
positiontup.append(name)
def process_expanding(m):
return replacement_expressions[m.group(1)]
self.statement = re.sub(
r"\[EXPANDING_(\S+)\]", process_expanding, self.statement
)
return positiontup
@classmethod
def _init_statement(
cls, dialect, connection, dbapi_connection, statement, parameters
):
"""Initialize execution context for a string SQL statement."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.is_text = True
# plain text statement
self.execution_options = connection._execution_options
if not parameters:
if self.dialect.positional:
self.parameters = [dialect.execute_sequence_format()]
else:
self.parameters = [{}]
elif isinstance(parameters[0], dialect.execute_sequence_format):
self.parameters = parameters
elif isinstance(parameters[0], dict):
if dialect.supports_unicode_statements:
self.parameters = parameters
else:
self.parameters = [
{dialect._encoder(k)[0]: d[k] for k in d}
for d in parameters
] or [{}]
else:
self.parameters = [
dialect.execute_sequence_format(p) for p in parameters
]
self.executemany = len(parameters) > 1
if not dialect.supports_unicode_statements and isinstance(
statement, util.text_type
):
self.unicode_statement = statement
self.statement = dialect._encoder(statement)[0]
else:
self.statement = self.unicode_statement = statement
self.cursor = self.create_cursor()
return self
@classmethod
def _init_default(cls, dialect, connection, dbapi_connection):
"""Initialize execution context for a ColumnDefault construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.execution_options = connection._execution_options
self.cursor = self.create_cursor()
return self
@util.memoized_property
def engine(self):
return self.root_connection.engine
@util.memoized_property
def postfetch_cols(self):
return self.compiled.postfetch
@util.memoized_property
def prefetch_cols(self):
if self.isinsert:
return self.compiled.insert_prefetch
elif self.isupdate:
return self.compiled.update_prefetch
else:
return ()
@util.memoized_property
def returning_cols(self):
self.compiled.returning
@util.memoized_property
def no_parameters(self):
return self.execution_options.get("no_parameters", False)
@util.memoized_property
def should_autocommit(self):
autocommit = self.execution_options.get(
"autocommit",
not self.compiled
and self.statement
and expression.PARSE_AUTOCOMMIT
or False,
)
if autocommit is expression.PARSE_AUTOCOMMIT:
return self.should_autocommit_text(self.unicode_statement)
else:
return autocommit
def _execute_scalar(self, stmt, type_):
"""Execute a string statement on the current cursor, returning a
scalar result.
Used to fire off sequences, default phrases, and "select lastrowid"
types of statements individually or in the context of a parent INSERT
or UPDATE statement.
"""
conn = self.root_connection
if (
isinstance(stmt, util.text_type)
and not self.dialect.supports_unicode_statements
):
stmt = self.dialect._encoder(stmt)[0]
if self.dialect.positional:
default_params = self.dialect.execute_sequence_format()
else:
default_params = {}
conn._cursor_execute(self.cursor, stmt, default_params, context=self)
r = self.cursor.fetchone()[0]
if type_ is not None:
# apply type post processors to the result
proc = type_._cached_result_processor(
self.dialect, self.cursor.description[0][1]
)
if proc:
return proc(r)
return r
@property
def connection(self):
return self.root_connection._branch()
def should_autocommit_text(self, statement):
return AUTOCOMMIT_REGEXP.match(statement)
def _use_server_side_cursor(self):
if not self.dialect.supports_server_side_cursors:
return False
if self.dialect.server_side_cursors:
use_server_side = self.execution_options.get(
"stream_results", True
) and (
(
self.compiled
and isinstance(
self.compiled.statement, expression.Selectable
)
or (
(
not self.compiled
or isinstance(
self.compiled.statement, expression.TextClause
)
)
and self.statement
and SERVER_SIDE_CURSOR_RE.match(self.statement)
)
)
)
else:
use_server_side = self.execution_options.get(
"stream_results", False
)
return use_server_side
def create_cursor(self):
if self._use_server_side_cursor():
self._is_server_side = True
return self.create_server_side_cursor()
else:
self._is_server_side = False
return self._dbapi_connection.cursor()
def create_server_side_cursor(self):
raise NotImplementedError()
def pre_exec(self):
pass
def post_exec(self):
pass
def get_result_processor(self, type_, colname, coltype):
"""Return a 'result processor' for a given type as present in
cursor.description.
This has a default implementation that dialects can override
for context-sensitive result type handling.
"""
return type_._cached_result_processor(self.dialect, coltype)
def get_lastrowid(self):
"""return self.cursor.lastrowid, or equivalent, after an INSERT.
This may involve calling special cursor functions,
issuing a new SELECT on the cursor (or a new one),
or returning a stored value that was
calculated within post_exec().
This function will only be called for dialects
which support "implicit" primary key generation,
keep preexecute_autoincrement_sequences set to False,
and when no explicit id value was bound to the
statement.
The function is called once, directly after
post_exec() and before the transaction is committed
or ResultProxy is generated. If the post_exec()
method assigns a value to `self._lastrowid`, the
value is used in place of calling get_lastrowid().
Note that this method is *not* equivalent to the
``lastrowid`` method on ``ResultProxy``, which is a
direct proxy to the DBAPI ``lastrowid`` accessor
in all cases.
"""
return self.cursor.lastrowid
def handle_dbapi_exception(self, e):
pass
def get_result_proxy(self):
if self._is_server_side:
return result.BufferedRowResultProxy(self)
else:
return result.ResultProxy(self)
@property
def rowcount(self):
return self.cursor.rowcount
def supports_sane_rowcount(self):
return self.dialect.supports_sane_rowcount
def supports_sane_multi_rowcount(self):
return self.dialect.supports_sane_multi_rowcount
def _setup_crud_result_proxy(self):
if self.isinsert and not self.executemany:
if (
not self._is_implicit_returning
and not self.compiled.inline
and self.dialect.postfetch_lastrowid
):
self._setup_ins_pk_from_lastrowid()
elif not self._is_implicit_returning:
self._setup_ins_pk_from_empty()
result = self.get_result_proxy()
if self.isinsert:
if self._is_implicit_returning:
row = result.fetchone()
self.returned_defaults = row
self._setup_ins_pk_from_implicit_returning(row)
result._soft_close()
result._metadata = None
elif not self._is_explicit_returning:
result._soft_close()
result._metadata = None
elif self.isupdate and self._is_implicit_returning:
row = result.fetchone()
self.returned_defaults = row
result._soft_close()
result._metadata = None
elif result._metadata is None:
# no results, get rowcount
# (which requires open cursor on some drivers
# such as kintersbasdb, mxodbc)
result.rowcount
result._soft_close()
return result
def _setup_ins_pk_from_lastrowid(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
table = self.compiled.statement.table
compiled_params = self.compiled_parameters[0]
lastrowid = self.get_lastrowid()
if lastrowid is not None:
autoinc_col = table._autoincrement_column
if autoinc_col is not None:
# apply type post processors to the lastrowid
proc = autoinc_col.type._cached_result_processor(
self.dialect, None
)
if proc is not None:
lastrowid = proc(lastrowid)
self.inserted_primary_key = [
lastrowid
if c is autoinc_col
else compiled_params.get(key_getter(c), None)
for c in table.primary_key
]
else:
# don't have a usable lastrowid, so
# do the same as _setup_ins_pk_from_empty
self.inserted_primary_key = [
compiled_params.get(key_getter(c), None)
for c in table.primary_key
]
def _setup_ins_pk_from_empty(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
table = self.compiled.statement.table
compiled_params = self.compiled_parameters[0]
self.inserted_primary_key = [
compiled_params.get(key_getter(c), None) for c in table.primary_key
]
def _setup_ins_pk_from_implicit_returning(self, row):
if row is None:
self.inserted_primary_key = None
return
key_getter = self.compiled._key_getters_for_crud_column[2]
table = self.compiled.statement.table
compiled_params = self.compiled_parameters[0]
self.inserted_primary_key = [
row[col] if value is None else value
for col, value in [
(col, compiled_params.get(key_getter(col), None))
for col in table.primary_key
]
]
def lastrow_has_defaults(self):
return (self.isinsert or self.isupdate) and bool(
self.compiled.postfetch
)
def set_input_sizes(
self, translate=None, include_types=None, exclude_types=None
):
"""Given a cursor and ClauseParameters, call the appropriate
style of ``setinputsizes()`` on the cursor, using DB-API types
from the bind parameter's ``TypeEngine`` objects.
This method only called by those dialects which require it,
currently cx_oracle.
"""
if not hasattr(self.compiled, "bind_names"):
return
inputsizes = {}
for bindparam in self.compiled.bind_names:
dialect_impl = bindparam.type._unwrapped_dialect_impl(self.dialect)
dialect_impl_cls = type(dialect_impl)
dbtype = dialect_impl.get_dbapi_type(self.dialect.dbapi)
if (
dbtype is not None
and (
not exclude_types
or dbtype not in exclude_types
and dialect_impl_cls not in exclude_types
)
and (
not include_types
or dbtype in include_types
or dialect_impl_cls in include_types
)
):
inputsizes[bindparam] = dbtype
else:
inputsizes[bindparam] = None
if self.dialect._has_events:
self.dialect.dispatch.do_setinputsizes(
inputsizes, self.cursor, self.statement, self.parameters, self
)
if self.dialect.positional:
positional_inputsizes = []
for key in self.compiled.positiontup:
bindparam = self.compiled.binds[key]
dbtype = inputsizes.get(bindparam, None)
if dbtype is not None:
if key in self._expanded_parameters:
positional_inputsizes.extend(
[dbtype] * len(self._expanded_parameters[key])
)
else:
positional_inputsizes.append(dbtype)
try:
self.cursor.setinputsizes(*positional_inputsizes)
except BaseException as e:
self.root_connection._handle_dbapi_exception(
e, None, None, None, self
)
else:
keyword_inputsizes = {}
for bindparam, key in self.compiled.bind_names.items():
dbtype = inputsizes.get(bindparam, None)
if dbtype is not None:
if translate:
# TODO: this part won't work w/ the
# expanded_parameters feature, e.g. for cx_oracle
# quoted bound names
key = translate.get(key, key)
if not self.dialect.supports_unicode_binds:
key = self.dialect._encoder(key)[0]
if key in self._expanded_parameters:
keyword_inputsizes.update(
(expand_key, dbtype)
for expand_key in self._expanded_parameters[key]
)
else:
keyword_inputsizes[key] = dbtype
try:
self.cursor.setinputsizes(**keyword_inputsizes)
except BaseException as e:
self.root_connection._handle_dbapi_exception(
e, None, None, None, self
)
def _exec_default(self, column, default, type_):
if default.is_sequence:
return self.fire_sequence(default, type_)
elif default.is_callable:
self.current_column = column
return default.arg(self)
elif default.is_clause_element:
# TODO: expensive branching here should be
# pulled into _exec_scalar()
conn = self.connection
if not default._arg_is_typed:
default_arg = expression.type_coerce(default.arg, type_)
else:
default_arg = default.arg
c = expression.select([default_arg]).compile(bind=conn)
return conn._execute_compiled(c, (), {}).scalar()
else:
return default.arg
current_parameters = None
"""A dictionary of parameters applied to the current row.
This attribute is only available in the context of a user-defined default
generation function, e.g. as described at :ref:`context_default_functions`.
It consists of a dictionary which includes entries for each column/value
pair that is to be part of the INSERT or UPDATE statement. The keys of the
dictionary will be the key value of each :class:`_schema.Column`,
which is usually
synonymous with the name.
Note that the :attr:`.DefaultExecutionContext.current_parameters` attribute
does not accommodate for the "multi-values" feature of the
:meth:`_expression.Insert.values` method. The
:meth:`.DefaultExecutionContext.get_current_parameters` method should be
preferred.
.. seealso::
:meth:`.DefaultExecutionContext.get_current_parameters`
:ref:`context_default_functions`
"""
def get_current_parameters(self, isolate_multiinsert_groups=True):
"""Return a dictionary of parameters applied to the current row.
This method can only be used in the context of a user-defined default
generation function, e.g. as described at
:ref:`context_default_functions`. When invoked, a dictionary is
returned which includes entries for each column/value pair that is part
of the INSERT or UPDATE statement. The keys of the dictionary will be
the key value of each :class:`_schema.Column`,
which is usually synonymous
with the name.
:param isolate_multiinsert_groups=True: indicates that multi-valued
INSERT constructs created using :meth:`_expression.Insert.values`
should be
handled by returning only the subset of parameters that are local
to the current column default invocation. When ``False``, the
raw parameters of the statement are returned including the
naming convention used in the case of multi-valued INSERT.
.. versionadded:: 1.2 added
:meth:`.DefaultExecutionContext.get_current_parameters`
which provides more functionality over the existing
:attr:`.DefaultExecutionContext.current_parameters`
attribute.
.. seealso::
:attr:`.DefaultExecutionContext.current_parameters`
:ref:`context_default_functions`
"""
try:
parameters = self.current_parameters
column = self.current_column
except AttributeError:
raise exc.InvalidRequestError(
"get_current_parameters() can only be invoked in the "
"context of a Python side column default function"
)
if (
isolate_multiinsert_groups
and self.isinsert
and self.compiled.statement._has_multi_parameters
):
if column._is_multiparam_column:
index = column.index + 1
d = {column.original.key: parameters[column.key]}
else:
d = {column.key: parameters[column.key]}
index = 0
keys = self.compiled.statement.parameters[0].keys()
d.update(
(key, parameters["%s_m%d" % (key, index)]) for key in keys
)
return d
else:
return parameters
def get_insert_default(self, column):
if column.default is None:
return None
else:
return self._exec_default(column, column.default, column.type)
def get_update_default(self, column):
if column.onupdate is None:
return None
else:
return self._exec_default(column, column.onupdate, column.type)
def _process_executemany_defaults(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
scalar_defaults = {}
insert_prefetch = self.compiled.insert_prefetch
update_prefetch = self.compiled.update_prefetch
# pre-determine scalar Python-side defaults
# to avoid many calls of get_insert_default()/
# get_update_default()
for c in insert_prefetch:
if c.default and c.default.is_scalar:
scalar_defaults[c] = c.default.arg
for c in update_prefetch:
if c.onupdate and c.onupdate.is_scalar:
scalar_defaults[c] = c.onupdate.arg
for param in self.compiled_parameters:
self.current_parameters = param
for c in insert_prefetch:
if c in scalar_defaults:
val = scalar_defaults[c]
else:
val = self.get_insert_default(c)
if val is not None:
param[key_getter(c)] = val
for c in update_prefetch:
if c in scalar_defaults:
val = scalar_defaults[c]
else:
val = self.get_update_default(c)
if val is not None:
param[key_getter(c)] = val
del self.current_parameters
def _process_executesingle_defaults(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
self.current_parameters = (
compiled_parameters
) = self.compiled_parameters[0]
for c in self.compiled.insert_prefetch:
if c.default and not c.default.is_sequence and c.default.is_scalar:
val = c.default.arg
else:
val = self.get_insert_default(c)
if val is not None:
compiled_parameters[key_getter(c)] = val
for c in self.compiled.update_prefetch:
val = self.get_update_default(c)
if val is not None:
compiled_parameters[key_getter(c)] = val
del self.current_parameters
DefaultDialect.execution_ctx_cls = DefaultExecutionContext
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/engine/base.py
|
# engine/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import with_statement
import contextlib
import sys
from .interfaces import Connectable
from .interfaces import ExceptionContext
from .util import _distill_params
from .. import exc
from .. import interfaces
from .. import log
from .. import util
from ..sql import schema
from ..sql import util as sql_util
"""Defines :class:`_engine.Connection` and :class:`_engine.Engine`.
"""
class Connection(Connectable):
"""Provides high-level functionality for a wrapped DB-API connection.
Provides execution support for string-based SQL statements as well as
:class:`_expression.ClauseElement`, :class:`.Compiled` and
:class:`.DefaultGenerator`
objects. Provides a :meth:`begin` method to return :class:`.Transaction`
objects.
The Connection object is **not** thread-safe. While a Connection can be
shared among threads using properly synchronized access, it is still
possible that the underlying DBAPI connection may not support shared
access between threads. Check the DBAPI documentation for details.
The Connection object represents a single dbapi connection checked out
from the connection pool. In this state, the connection pool has no affect
upon the connection, including its expiration or timeout state. For the
connection pool to properly manage connections, connections should be
returned to the connection pool (i.e. ``connection.close()``) whenever the
connection is not in use.
.. index::
single: thread safety; Connection
"""
schema_for_object = schema._schema_getter(None)
"""Return the ".schema" attribute for an object.
Used for :class:`_schema.Table`, :class:`.Sequence` and similar objects,
and takes into account
the :paramref:`.Connection.execution_options.schema_translate_map`
parameter.
.. versionadded:: 1.1
.. seealso::
:ref:`schema_translating`
"""
def __init__(
self,
engine,
connection=None,
close_with_result=False,
_branch_from=None,
_execution_options=None,
_dispatch=None,
_has_events=None,
):
"""Construct a new Connection.
The constructor here is not public and is only called only by an
:class:`_engine.Engine`. See :meth:`_engine.Engine.connect` and
:meth:`_engine.Engine.contextual_connect` methods.
"""
self.engine = engine
self.dialect = engine.dialect
self.__branch_from = _branch_from
self.__branch = _branch_from is not None
if _branch_from:
self.__connection = connection
self._execution_options = _execution_options
self._echo = _branch_from._echo
self.should_close_with_result = False
self.dispatch = _dispatch
self._has_events = _branch_from._has_events
self.schema_for_object = _branch_from.schema_for_object
else:
self.__connection = (
connection
if connection is not None
else engine.raw_connection()
)
self.__transaction = None
self.__savepoint_seq = 0
self.should_close_with_result = close_with_result
self.__invalid = False
self.__can_reconnect = True
self._echo = self.engine._should_log_info()
if _has_events is None:
# if _has_events is sent explicitly as False,
# then don't join the dispatch of the engine; we don't
# want to handle any of the engine's events in that case.
self.dispatch = self.dispatch._join(engine.dispatch)
self._has_events = _has_events or (
_has_events is None and engine._has_events
)
assert not _execution_options
self._execution_options = engine._execution_options
if self._has_events or self.engine._has_events:
self.dispatch.engine_connect(self, self.__branch)
def _branch(self):
"""Return a new Connection which references this Connection's
engine and connection; but does not have close_with_result enabled,
and also whose close() method does nothing.
The Core uses this very sparingly, only in the case of
custom SQL default functions that are to be INSERTed as the
primary key of a row where we need to get the value back, so we have
to invoke it distinctly - this is a very uncommon case.
Userland code accesses _branch() when the connect() or
contextual_connect() methods are called. The branched connection
acts as much as possible like the parent, except that it stays
connected when a close() event occurs.
"""
if self.__branch_from:
return self.__branch_from._branch()
else:
return self.engine._connection_cls(
self.engine,
self.__connection,
_branch_from=self,
_execution_options=self._execution_options,
_has_events=self._has_events,
_dispatch=self.dispatch,
)
@property
def _root(self):
"""return the 'root' connection.
Returns 'self' if this connection is not a branch, else
returns the root connection from which we ultimately branched.
"""
if self.__branch_from:
return self.__branch_from
else:
return self
def _clone(self):
"""Create a shallow copy of this Connection.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
return c
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
def execution_options(self, **opt):
r""" Set non-SQL options for the connection which take effect
during execution.
The method returns a copy of this :class:`_engine.Connection`
which references
the same underlying DBAPI connection, but also defines the given
execution options which will take effect for a call to
:meth:`execute`. As the new :class:`_engine.Connection`
references the same
underlying resource, it's usually a good idea to ensure that the copies
will be discarded immediately, which is implicit if used as in::
result = connection.execution_options(stream_results=True).\
execute(stmt)
Note that any key/value can be passed to
:meth:`_engine.Connection.execution_options`,
and it will be stored in the
``_execution_options`` dictionary of the :class:`_engine.Connection`.
It
is suitable for usage by end-user schemes to communicate with
event listeners, for example.
The keywords that are currently recognized by SQLAlchemy itself
include all those listed under :meth:`.Executable.execution_options`,
as well as others that are specific to :class:`_engine.Connection`.
:param autocommit: Available on: Connection, statement.
When True, a COMMIT will be invoked after execution
when executed in 'autocommit' mode, i.e. when an explicit
transaction is not begun on the connection. Note that DBAPI
connections by default are always in a transaction - SQLAlchemy uses
rules applied to different kinds of statements to determine if
COMMIT will be invoked in order to provide its "autocommit" feature.
Typically, all INSERT/UPDATE/DELETE statements as well as
CREATE/DROP statements have autocommit behavior enabled; SELECT
constructs do not. Use this option when invoking a SELECT or other
specific SQL construct where COMMIT is desired (typically when
calling stored procedures and such), and an explicit
transaction is not in progress.
:param compiled_cache: Available on: Connection.
A dictionary where :class:`.Compiled` objects
will be cached when the :class:`_engine.Connection`
compiles a clause
expression into a :class:`.Compiled` object.
It is the user's responsibility to
manage the size of this dictionary, which will have keys
corresponding to the dialect, clause element, the column
names within the VALUES or SET clause of an INSERT or UPDATE,
as well as the "batch" mode for an INSERT or UPDATE statement.
The format of this dictionary is not guaranteed to stay the
same in future releases.
Note that the ORM makes use of its own "compiled" caches for
some operations, including flush operations. The caching
used by the ORM internally supersedes a cache dictionary
specified here.
:param isolation_level: Available on: :class:`_engine.Connection`.
Set the transaction isolation level for the lifespan of this
:class:`_engine.Connection` object.
Valid values include those string
values accepted by the :paramref:`_sa.create_engine.isolation_level`
parameter passed to :func:`_sa.create_engine`. These levels are
semi-database specific; see individual dialect documentation for
valid levels.
The isolation level option applies the isolation level by emitting
statements on the DBAPI connection, and **necessarily affects the
original Connection object overall**, not just the copy that is
returned by the call to :meth:`_engine.Connection.execution_options`
method. The isolation level will remain at the given setting until
the DBAPI connection itself is returned to the connection pool, i.e.
the :meth:`_engine.Connection.close` method on the original
:class:`_engine.Connection` is called,
where an event handler will emit
additional statements on the DBAPI connection in order to revert the
isolation level change.
.. warning:: The ``isolation_level`` execution option should
**not** be used when a transaction is already established, that
is, the :meth:`_engine.Connection.begin`
method or similar has been
called. A database cannot change the isolation level on a
transaction in progress, and different DBAPIs and/or
SQLAlchemy dialects may implicitly roll back or commit
the transaction, or not affect the connection at all.
.. note:: The ``isolation_level`` execution option is implicitly
reset if the :class:`_engine.Connection` is invalidated, e.g. via
the :meth:`_engine.Connection.invalidate` method, or if a
disconnection error occurs. The new connection produced after
the invalidation will not have the isolation level re-applied
to it automatically.
.. seealso::
:paramref:`_sa.create_engine.isolation_level`
- set per :class:`_engine.Engine` isolation level
:meth:`_engine.Connection.get_isolation_level`
- view current level
:ref:`SQLite Transaction Isolation <sqlite_isolation_level>`
:ref:`PostgreSQL Transaction Isolation <postgresql_isolation_level>`
:ref:`MySQL Transaction Isolation <mysql_isolation_level>`
:ref:`SQL Server Transaction Isolation <mssql_isolation_level>`
:ref:`session_transaction_isolation` - for the ORM
:param no_parameters: When ``True``, if the final parameter
list or dictionary is totally empty, will invoke the
statement on the cursor as ``cursor.execute(statement)``,
not passing the parameter collection at all.
Some DBAPIs such as psycopg2 and mysql-python consider
percent signs as significant only when parameters are
present; this option allows code to generate SQL
containing percent signs (and possibly other characters)
that is neutral regarding whether it's executed by the DBAPI
or piped into a script that's later invoked by
command line tools.
:param stream_results: Available on: Connection, statement.
Indicate to the dialect that results should be
"streamed" and not pre-buffered, if possible. This is a limitation
of many DBAPIs. The flag is currently understood only by the
psycopg2, mysqldb and pymysql dialects.
:param schema_translate_map: Available on: Connection, Engine.
A dictionary mapping schema names to schema names, that will be
applied to the :paramref:`_schema.Table.schema` element of each
:class:`_schema.Table`
encountered when SQL or DDL expression elements
are compiled into strings; the resulting schema name will be
converted based on presence in the map of the original name.
.. versionadded:: 1.1
.. seealso::
:ref:`schema_translating`
.. seealso::
:meth:`_engine.Engine.execution_options`
:meth:`.Executable.execution_options`
:meth:`_engine.Connection.get_execution_options`
""" # noqa
c = self._clone()
c._execution_options = c._execution_options.union(opt)
if self._has_events or self.engine._has_events:
self.dispatch.set_connection_execution_options(c, opt)
self.dialect.set_connection_execution_options(c, opt)
return c
def get_execution_options(self):
""" Get the non-SQL options which will take effect during execution.
.. versionadded:: 1.3
.. seealso::
:meth:`_engine.Connection.execution_options`
"""
return self._execution_options
@property
def closed(self):
"""Return True if this connection is closed."""
return (
"_Connection__connection" not in self.__dict__
and not self.__can_reconnect
)
@property
def invalidated(self):
"""Return True if this connection was invalidated."""
return self._root.__invalid
@property
def connection(self):
"""The underlying DB-API connection managed by this Connection.
.. seealso::
:ref:`dbapi_connections`
"""
try:
return self.__connection
except AttributeError:
# escape "except AttributeError" before revalidating
# to prevent misleading stacktraces in Py3K
pass
try:
return self._revalidate_connection()
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def get_isolation_level(self):
"""Return the current isolation level assigned to this
:class:`_engine.Connection`.
This will typically be the default isolation level as determined
by the dialect, unless if the
:paramref:`.Connection.execution_options.isolation_level`
feature has been used to alter the isolation level on a
per-:class:`_engine.Connection` basis.
This attribute will typically perform a live SQL operation in order
to procure the current isolation level, so the value returned is the
actual level on the underlying DBAPI connection regardless of how
this state was set. Compare to the
:attr:`_engine.Connection.default_isolation_level` accessor
which returns the dialect-level setting without performing a SQL
query.
.. versionadded:: 0.9.9
.. seealso::
:attr:`_engine.Connection.default_isolation_level`
- view default level
:paramref:`_sa.create_engine.isolation_level`
- set per :class:`_engine.Engine` isolation level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`_engine.Connection` isolation level
"""
try:
return self.dialect.get_isolation_level(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
@property
def default_isolation_level(self):
"""The default isolation level assigned to this
:class:`_engine.Connection`.
This is the isolation level setting that the
:class:`_engine.Connection`
has when first procured via the :meth:`_engine.Engine.connect` method.
This level stays in place until the
:paramref:`.Connection.execution_options.isolation_level` is used
to change the setting on a per-:class:`_engine.Connection` basis.
Unlike :meth:`_engine.Connection.get_isolation_level`,
this attribute is set
ahead of time from the first connection procured by the dialect,
so SQL query is not invoked when this accessor is called.
.. versionadded:: 0.9.9
.. seealso::
:meth:`_engine.Connection.get_isolation_level`
- view current level
:paramref:`_sa.create_engine.isolation_level`
- set per :class:`_engine.Engine` isolation level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`_engine.Connection` isolation level
"""
return self.dialect.default_isolation_level
def _revalidate_connection(self):
if self.__branch_from:
return self.__branch_from._revalidate_connection()
if self.__can_reconnect and self.__invalid:
if self.__transaction is not None:
raise exc.InvalidRequestError(
"Can't reconnect until invalid "
"transaction is rolled back"
)
self.__connection = self.engine.raw_connection(_connection=self)
self.__invalid = False
return self.__connection
raise exc.ResourceClosedError("This Connection is closed")
@property
def _connection_is_valid(self):
# use getattr() for is_valid to support exceptions raised in
# dialect initializer, where the connection is not wrapped in
# _ConnectionFairy
return getattr(self.__connection, "is_valid", False)
@property
def _still_open_and_connection_is_valid(self):
return (
not self.closed
and not self.invalidated
and getattr(self.__connection, "is_valid", False)
)
@property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`_engine.Connection`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`_engine.Connection`.
"""
return self.connection.info
def connect(self):
"""Returns a branched version of this :class:`_engine.Connection`.
The :meth:`_engine.Connection.close` method on the returned
:class:`_engine.Connection` can be called and this
:class:`_engine.Connection` will remain open.
This method provides usage symmetry with
:meth:`_engine.Engine.connect`, including for usage
with context managers.
"""
return self._branch()
def _contextual_connect(self, **kwargs):
return self._branch()
def invalidate(self, exception=None):
"""Invalidate the underlying DBAPI connection associated with
this :class:`_engine.Connection`.
The underlying DBAPI connection is literally closed (if
possible), and is discarded. Its source connection pool will
typically lazily create a new connection to replace it.
Upon the next use (where "use" typically means using the
:meth:`_engine.Connection.execute` method or similar),
this :class:`_engine.Connection` will attempt to
procure a new DBAPI connection using the services of the
:class:`_pool.Pool` as a source of connectivity (e.g.
a "reconnection").
If a transaction was in progress (e.g. the
:meth:`_engine.Connection.begin` method has been called) when
:meth:`_engine.Connection.invalidate` method is called, at the DBAPI
level all state associated with this transaction is lost, as
the DBAPI connection is closed. The :class:`_engine.Connection`
will not allow a reconnection to proceed until the
:class:`.Transaction` object is ended, by calling the
:meth:`.Transaction.rollback` method; until that point, any attempt at
continuing to use the :class:`_engine.Connection` will raise an
:class:`~sqlalchemy.exc.InvalidRequestError`.
This is to prevent applications from accidentally
continuing an ongoing transactional operations despite the
fact that the transaction has been lost due to an
invalidation.
The :meth:`_engine.Connection.invalidate` method,
just like auto-invalidation,
will at the connection pool level invoke the
:meth:`_events.PoolEvents.invalidate` event.
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.invalidated:
return
if self.closed:
raise exc.ResourceClosedError("This Connection is closed")
if self._root._connection_is_valid:
self._root.__connection.invalidate(exception)
del self._root.__connection
self._root.__invalid = True
def detach(self):
"""Detach the underlying DB-API connection from its connection pool.
E.g.::
with engine.connect() as conn:
conn.detach()
conn.execute("SET search_path TO schema1, schema2")
# work with connection
# connection is fully closed (since we used "with:", can
# also call .close())
This :class:`_engine.Connection` instance will remain usable.
When closed
(or exited from a context manager context as above),
the DB-API connection will be literally closed and not
returned to its originating pool.
This method can be used to insulate the rest of an application
from a modified state on a connection (such as a transaction
isolation level or similar).
"""
self.__connection.detach()
def begin(self):
"""Begin a transaction and return a transaction handle.
The returned object is an instance of :class:`.Transaction`.
This object represents the "scope" of the transaction,
which completes when either the :meth:`.Transaction.rollback`
or :meth:`.Transaction.commit` method is called.
Nested calls to :meth:`.begin` on the same :class:`_engine.Connection`
will return new :class:`.Transaction` objects that represent
an emulated transaction within the scope of the enclosing
transaction, that is::
trans = conn.begin() # outermost transaction
trans2 = conn.begin() # "nested"
trans2.commit() # does nothing
trans.commit() # actually commits
Calls to :meth:`.Transaction.commit` only have an effect
when invoked via the outermost :class:`.Transaction` object, though the
:meth:`.Transaction.rollback` method of any of the
:class:`.Transaction` objects will roll back the
transaction.
.. seealso::
:meth:`_engine.Connection.begin_nested` - use a SAVEPOINT
:meth:`_engine.Connection.begin_twophase` -
use a two phase /XID transaction
:meth:`_engine.Engine.begin` - context manager available from
:class:`_engine.Engine`
"""
if self.__branch_from:
return self.__branch_from.begin()
if self.__transaction is None:
self.__transaction = RootTransaction(self)
return self.__transaction
else:
return Transaction(self, self.__transaction)
def begin_nested(self):
"""Begin a nested transaction and return a transaction handle.
The returned object is an instance of :class:`.NestedTransaction`.
Nested transactions require SAVEPOINT support in the
underlying database. Any transaction in the hierarchy may
``commit`` and ``rollback``, however the outermost transaction
still controls the overall ``commit`` or ``rollback`` of the
transaction of a whole.
.. seealso::
:meth:`_engine.Connection.begin`
:meth:`_engine.Connection.begin_twophase`
"""
if self.__branch_from:
return self.__branch_from.begin_nested()
if self.__transaction is None:
self.__transaction = RootTransaction(self)
else:
self.__transaction = NestedTransaction(self, self.__transaction)
return self.__transaction
def begin_twophase(self, xid=None):
"""Begin a two-phase or XA transaction and return a transaction
handle.
The returned object is an instance of :class:`.TwoPhaseTransaction`,
which in addition to the methods provided by
:class:`.Transaction`, also provides a
:meth:`~.TwoPhaseTransaction.prepare` method.
:param xid: the two phase transaction id. If not supplied, a
random id will be generated.
.. seealso::
:meth:`_engine.Connection.begin`
:meth:`_engine.Connection.begin_twophase`
"""
if self.__branch_from:
return self.__branch_from.begin_twophase(xid=xid)
if self.__transaction is not None:
raise exc.InvalidRequestError(
"Cannot start a two phase transaction when a transaction "
"is already in progress."
)
if xid is None:
xid = self.engine.dialect.create_xid()
self.__transaction = TwoPhaseTransaction(self, xid)
return self.__transaction
def recover_twophase(self):
return self.engine.dialect.do_recover_twophase(self)
def rollback_prepared(self, xid, recover=False):
self.engine.dialect.do_rollback_twophase(self, xid, recover=recover)
def commit_prepared(self, xid, recover=False):
self.engine.dialect.do_commit_twophase(self, xid, recover=recover)
def in_transaction(self):
"""Return True if a transaction is in progress."""
return self._root.__transaction is not None
def _begin_impl(self, transaction):
assert not self.__branch_from
if self._echo:
self.engine.logger.info("BEGIN (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin(self)
try:
self.engine.dialect.do_begin(self.connection)
if self.connection._reset_agent is None:
self.connection._reset_agent = transaction
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _rollback_impl(self):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback(self)
if self._still_open_and_connection_is_valid:
if self._echo:
self.engine.logger.info("ROLLBACK")
try:
self.engine.dialect.do_rollback(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
if (
not self.__invalid
and self.connection._reset_agent is self.__transaction
):
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _commit_impl(self, autocommit=False):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.commit(self)
if self._echo:
self.engine.logger.info("COMMIT")
try:
self.engine.dialect.do_commit(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
if (
not self.__invalid
and self.connection._reset_agent is self.__transaction
):
self.connection._reset_agent = None
self.__transaction = None
def _savepoint_impl(self, name=None):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.savepoint(self, name)
if name is None:
self.__savepoint_seq += 1
name = "sa_savepoint_%s" % self.__savepoint_seq
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_savepoint(self, name)
return name
def _discard_transaction(self, trans):
if trans is self.__transaction:
if trans._parent is trans:
self.__transaction = None
else:
self.__transaction = trans._parent
if self._still_open_and_connection_is_valid:
if self.__connection._reset_agent is trans:
self.__connection._reset_agent = None
def _rollback_to_savepoint_impl(self, name, context):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback_savepoint(self, name, context)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_rollback_to_savepoint(self, name)
self.__transaction = context
def _release_savepoint_impl(self, name, context):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.release_savepoint(self, name, context)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_release_savepoint(self, name)
self.__transaction = context
def _begin_twophase_impl(self, transaction):
assert not self.__branch_from
if self._echo:
self.engine.logger.info("BEGIN TWOPHASE (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin_twophase(self, transaction.xid)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_begin_twophase(self, transaction.xid)
if self.connection._reset_agent is None:
self.connection._reset_agent = transaction
def _prepare_twophase_impl(self, xid):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.prepare_twophase(self, xid)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
self.engine.dialect.do_prepare_twophase(self, xid)
def _rollback_twophase_impl(self, xid, is_prepared):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback_twophase(self, xid, is_prepared)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_rollback_twophase(
self, xid, is_prepared
)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _commit_twophase_impl(self, xid, is_prepared):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.commit_twophase(self, xid, is_prepared)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_commit_twophase(self, xid, is_prepared)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _autorollback(self):
if not self._root.in_transaction():
self._root._rollback_impl()
def close(self):
"""Close this :class:`_engine.Connection`.
This results in a release of the underlying database
resources, that is, the DBAPI connection referenced
internally. The DBAPI connection is typically restored
back to the connection-holding :class:`_pool.Pool` referenced
by the :class:`_engine.Engine` that produced this
:class:`_engine.Connection`. Any transactional state present on
the DBAPI connection is also unconditionally released via
the DBAPI connection's ``rollback()`` method, regardless
of any :class:`.Transaction` object that may be
outstanding with regards to this :class:`_engine.Connection`.
After :meth:`_engine.Connection.close` is called, the
:class:`_engine.Connection` is permanently in a closed state,
and will allow no further operations.
"""
if self.__branch_from:
try:
del self.__connection
except AttributeError:
pass
finally:
self.__can_reconnect = False
return
try:
conn = self.__connection
except AttributeError:
pass
else:
conn.close()
if conn._reset_agent is self.__transaction:
conn._reset_agent = None
# the close() process can end up invalidating us,
# as the pool will call our transaction as the "reset_agent"
# for rollback(), which can then cause an invalidation
if not self.__invalid:
del self.__connection
self.__can_reconnect = False
self.__transaction = None
def scalar(self, object_, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying result/cursor is closed after execution.
"""
return self.execute(object_, *multiparams, **params).scalar()
def execute(self, object_, *multiparams, **params):
r"""Executes a SQL statement construct and returns a
:class:`_engine.ResultProxy`.
:param object: The statement to be executed. May be
one of:
* a plain string
* any :class:`_expression.ClauseElement` construct that is also
a subclass of :class:`.Executable`, such as a
:func:`_expression.select` construct
* a :class:`.FunctionElement`, such as that generated
by :data:`.func`, will be automatically wrapped in
a SELECT statement, which is then executed.
* a :class:`.DDLElement` object
* a :class:`.DefaultGenerator` object
* a :class:`.Compiled` object
:param \*multiparams/\**params: represent bound parameter
values to be used in the execution. Typically,
the format is either a collection of one or more
dictionaries passed to \*multiparams::
conn.execute(
table.insert(),
{"id":1, "value":"v1"},
{"id":2, "value":"v2"}
)
...or individual key/values interpreted by \**params::
conn.execute(
table.insert(), id=1, value="v1"
)
In the case that a plain SQL string is passed, and the underlying
DBAPI accepts positional bind parameters, a collection of tuples
or individual values in \*multiparams may be passed::
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
(1, "v1"), (2, "v2")
)
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
1, "v1"
)
Note above, the usage of a question mark "?" or other
symbol is contingent upon the "paramstyle" accepted by the DBAPI
in use, which may be any of "qmark", "named", "pyformat", "format",
"numeric". See `pep-249 <http://www.python.org/dev/peps/pep-0249/>`_
for details on paramstyle.
To execute a textual SQL statement which uses bound parameters in a
DBAPI-agnostic way, use the :func:`_expression.text` construct.
"""
if isinstance(object_, util.string_types[0]):
return self._execute_text(object_, multiparams, params)
try:
meth = object_._execute_on_connection
except AttributeError as err:
util.raise_(
exc.ObjectNotExecutableError(object_), replace_context=err
)
else:
return meth(self, multiparams, params)
def _execute_function(self, func, multiparams, params):
"""Execute a sql.FunctionElement object."""
return self._execute_clauseelement(func.select(), multiparams, params)
def _execute_default(self, default, multiparams, params):
"""Execute a schema.ColumnDefault object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
default, multiparams, params = fn(
self, default, multiparams, params
)
try:
try:
conn = self.__connection
except AttributeError:
# escape "except AttributeError" before revalidating
# to prevent misleading stacktraces in Py3K
conn = None
if conn is None:
conn = self._revalidate_connection()
dialect = self.dialect
ctx = dialect.execution_ctx_cls._init_default(dialect, self, conn)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
ret = ctx._exec_default(None, default, None)
if self.should_close_with_result:
self.close()
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self, default, multiparams, params, ret
)
return ret
def _execute_ddl(self, ddl, multiparams, params):
"""Execute a schema.DDL object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
ddl, multiparams, params = fn(self, ddl, multiparams, params)
dialect = self.dialect
compiled = ddl.compile(
dialect=dialect,
schema_translate_map=self.schema_for_object
if not self.schema_for_object.is_default
else None,
)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_ddl,
compiled,
None,
compiled,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self, ddl, multiparams, params, ret)
return ret
def _execute_clauseelement(self, elem, multiparams, params):
"""Execute a sql.ClauseElement object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
elem, multiparams, params = fn(self, elem, multiparams, params)
distilled_params = _distill_params(multiparams, params)
if distilled_params:
# ensure we don't retain a link to the view object for keys()
# which links to the values, which we don't want to cache
keys = list(distilled_params[0].keys())
else:
keys = []
dialect = self.dialect
if "compiled_cache" in self._execution_options:
key = (
dialect,
elem,
tuple(sorted(keys)),
self.schema_for_object.hash_key,
len(distilled_params) > 1,
)
compiled_sql = self._execution_options["compiled_cache"].get(key)
if compiled_sql is None:
compiled_sql = elem.compile(
dialect=dialect,
column_keys=keys,
inline=len(distilled_params) > 1,
schema_translate_map=self.schema_for_object
if not self.schema_for_object.is_default
else None,
)
self._execution_options["compiled_cache"][key] = compiled_sql
else:
compiled_sql = elem.compile(
dialect=dialect,
column_keys=keys,
inline=len(distilled_params) > 1,
schema_translate_map=self.schema_for_object
if not self.schema_for_object.is_default
else None,
)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled_sql,
distilled_params,
compiled_sql,
distilled_params,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self, elem, multiparams, params, ret)
return ret
def _execute_compiled(self, compiled, multiparams, params):
"""Execute a sql.Compiled object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
compiled, multiparams, params = fn(
self, compiled, multiparams, params
)
dialect = self.dialect
parameters = _distill_params(multiparams, params)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled,
parameters,
compiled,
parameters,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self, compiled, multiparams, params, ret
)
return ret
def _execute_text(self, statement, multiparams, params):
"""Execute a string SQL statement."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
statement, multiparams, params = fn(
self, statement, multiparams, params
)
dialect = self.dialect
parameters = _distill_params(multiparams, params)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_statement,
statement,
parameters,
statement,
parameters,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self, statement, multiparams, params, ret
)
return ret
def _execute_context(
self, dialect, constructor, statement, parameters, *args
):
"""Create an :class:`.ExecutionContext` and execute, returning
a :class:`_engine.ResultProxy`."""
try:
try:
conn = self.__connection
except AttributeError:
# escape "except AttributeError" before revalidating
# to prevent misleading stacktraces in Py3K
conn = None
if conn is None:
conn = self._revalidate_connection()
context = constructor(dialect, self, conn, *args)
except BaseException as e:
self._handle_dbapi_exception(
e, util.text_type(statement), parameters, None, None
)
if context.compiled:
context.pre_exec()
cursor, statement, parameters = (
context.cursor,
context.statement,
context.parameters,
)
if not context.executemany:
parameters = parameters[0]
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = fn(
self,
cursor,
statement,
parameters,
context,
context.executemany,
)
if self._echo:
self.engine.logger.info(statement)
if not self.engine.hide_parameters:
self.engine.logger.info(
"%r",
sql_util._repr_params(
parameters, batches=10, ismulti=context.executemany
),
)
else:
self.engine.logger.info(
"[SQL parameters hidden due to hide_parameters=True]"
)
evt_handled = False
try:
if context.executemany:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_executemany:
if fn(cursor, statement, parameters, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_executemany(
cursor, statement, parameters, context
)
elif not parameters and context.no_parameters:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute_no_params:
if fn(cursor, statement, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute_no_params(
cursor, statement, context
)
else:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute:
if fn(cursor, statement, parameters, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute(
cursor, statement, parameters, context
)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(
self,
cursor,
statement,
parameters,
context,
context.executemany,
)
if context.compiled:
context.post_exec()
if context.is_crud or context.is_text:
result = context._setup_crud_result_proxy()
else:
result = context.get_result_proxy()
if result._metadata is None:
result._soft_close()
if context.should_autocommit and self._root.__transaction is None:
self._root._commit_impl(autocommit=True)
# for "connectionless" execution, we have to close this
# Connection after the statement is complete.
if self.should_close_with_result:
# ResultProxy already exhausted rows / has no rows.
# close us now
if result._soft_closed:
self.close()
else:
# ResultProxy will close this Connection when no more
# rows to fetch.
result._autoclose_connection = True
except BaseException as e:
self._handle_dbapi_exception(
e, statement, parameters, cursor, context
)
return result
def _cursor_execute(self, cursor, statement, parameters, context=None):
"""Execute a statement + params on the given cursor.
Adds appropriate logging and exception handling.
This method is used by DefaultDialect for special-case
executions, such as for sequences and column defaults.
The path of statement execution in the majority of cases
terminates at _execute_context().
"""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = fn(
self, cursor, statement, parameters, context, False
)
if self._echo:
self.engine.logger.info(statement)
self.engine.logger.info("%r", parameters)
try:
for fn in (
()
if not self.dialect._has_events
else self.dialect.dispatch.do_execute
):
if fn(cursor, statement, parameters, context):
break
else:
self.dialect.do_execute(cursor, statement, parameters, context)
except BaseException as e:
self._handle_dbapi_exception(
e, statement, parameters, cursor, context
)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(
self, cursor, statement, parameters, context, False
)
def _safe_close_cursor(self, cursor):
"""Close the given cursor, catching exceptions
and turning into log warnings.
"""
try:
cursor.close()
except Exception:
# log the error through the connection pool's logger.
self.engine.pool.logger.error(
"Error closing cursor", exc_info=True
)
_reentrant_error = False
_is_disconnect = False
def _handle_dbapi_exception(
self, e, statement, parameters, cursor, context
):
exc_info = sys.exc_info()
if context and context.exception is None:
context.exception = e
is_exit_exception = not isinstance(e, Exception)
if not self._is_disconnect:
self._is_disconnect = (
isinstance(e, self.dialect.dbapi.Error)
and not self.closed
and self.dialect.is_disconnect(
e,
self.__connection if not self.invalidated else None,
cursor,
)
) or (is_exit_exception and not self.closed)
if context:
context.is_disconnect = self._is_disconnect
invalidate_pool_on_disconnect = not is_exit_exception
if self._reentrant_error:
util.raise_(
exc.DBAPIError.instance(
statement,
parameters,
e,
self.dialect.dbapi.Error,
hide_parameters=self.engine.hide_parameters,
dialect=self.dialect,
ismulti=context.executemany
if context is not None
else None,
),
with_traceback=exc_info[2],
from_=e,
)
self._reentrant_error = True
try:
# non-DBAPI error - if we already got a context,
# or there's no string statement, don't wrap it
should_wrap = isinstance(e, self.dialect.dbapi.Error) or (
statement is not None
and context is None
and not is_exit_exception
)
if should_wrap:
sqlalchemy_exception = exc.DBAPIError.instance(
statement,
parameters,
e,
self.dialect.dbapi.Error,
hide_parameters=self.engine.hide_parameters,
connection_invalidated=self._is_disconnect,
dialect=self.dialect,
ismulti=context.executemany
if context is not None
else None,
)
else:
sqlalchemy_exception = None
newraise = None
if (
self._has_events or self.engine._has_events
) and not self._execution_options.get(
"skip_user_error_events", False
):
# legacy dbapi_error event
if should_wrap and context:
self.dispatch.dbapi_error(
self, cursor, statement, parameters, context, e
)
# new handle_error event
ctx = ExceptionContextImpl(
e,
sqlalchemy_exception,
self.engine,
self,
cursor,
statement,
parameters,
context,
self._is_disconnect,
invalidate_pool_on_disconnect,
)
for fn in self.dispatch.handle_error:
try:
# handler returns an exception;
# call next handler in a chain
per_fn = fn(ctx)
if per_fn is not None:
ctx.chained_exception = newraise = per_fn
except Exception as _raised:
# handler raises an exception - stop processing
newraise = _raised
break
if self._is_disconnect != ctx.is_disconnect:
self._is_disconnect = ctx.is_disconnect
if sqlalchemy_exception:
sqlalchemy_exception.connection_invalidated = (
ctx.is_disconnect
)
# set up potentially user-defined value for
# invalidate pool.
invalidate_pool_on_disconnect = (
ctx.invalidate_pool_on_disconnect
)
if should_wrap and context:
context.handle_dbapi_exception(e)
if not self._is_disconnect:
if cursor:
self._safe_close_cursor(cursor)
with util.safe_reraise(warn_only=True):
self._autorollback()
if newraise:
util.raise_(newraise, with_traceback=exc_info[2], from_=e)
elif should_wrap:
util.raise_(
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
)
else:
util.raise_(exc_info[1], with_traceback=exc_info[2])
finally:
del self._reentrant_error
if self._is_disconnect:
del self._is_disconnect
if not self.invalidated:
dbapi_conn_wrapper = self.__connection
if invalidate_pool_on_disconnect:
self.engine.pool._invalidate(dbapi_conn_wrapper, e)
self.invalidate(e)
if self.should_close_with_result:
self.close()
@classmethod
def _handle_dbapi_exception_noconnection(cls, e, dialect, engine):
exc_info = sys.exc_info()
is_disconnect = dialect.is_disconnect(e, None, None)
should_wrap = isinstance(e, dialect.dbapi.Error)
if should_wrap:
sqlalchemy_exception = exc.DBAPIError.instance(
None,
None,
e,
dialect.dbapi.Error,
hide_parameters=engine.hide_parameters,
connection_invalidated=is_disconnect,
)
else:
sqlalchemy_exception = None
newraise = None
if engine._has_events:
ctx = ExceptionContextImpl(
e,
sqlalchemy_exception,
engine,
None,
None,
None,
None,
None,
is_disconnect,
True,
)
for fn in engine.dispatch.handle_error:
try:
# handler returns an exception;
# call next handler in a chain
per_fn = fn(ctx)
if per_fn is not None:
ctx.chained_exception = newraise = per_fn
except Exception as _raised:
# handler raises an exception - stop processing
newraise = _raised
break
if sqlalchemy_exception and is_disconnect != ctx.is_disconnect:
sqlalchemy_exception.connection_invalidated = (
is_disconnect
) = ctx.is_disconnect
if newraise:
util.raise_(newraise, with_traceback=exc_info[2], from_=e)
elif should_wrap:
util.raise_(
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
)
else:
util.raise_(exc_info[1], with_traceback=exc_info[2])
def transaction(self, callable_, *args, **kwargs):
r"""Execute the given function within a transaction boundary.
The function is passed this :class:`_engine.Connection`
as the first argument, followed by the given \*args and \**kwargs,
e.g.::
def do_something(conn, x, y):
conn.execute("some statement", {'x':x, 'y':y})
conn.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`_engine.Connection.begin`::
with conn.begin():
conn.execute("some statement", {'x':5, 'y':10})
As well as with :meth:`_engine.Engine.begin`::
with engine.begin() as conn:
conn.execute("some statement", {'x':5, 'y':10})
.. seealso::
:meth:`_engine.Engine.begin` - engine-level transactional
context
:meth:`_engine.Engine.transaction` - engine-level version of
:meth:`_engine.Connection.transaction`
"""
trans = self.begin()
try:
ret = self.run_callable(callable_, *args, **kwargs)
trans.commit()
return ret
except:
with util.safe_reraise():
trans.rollback()
def run_callable(self, callable_, *args, **kwargs):
r"""Given a callable object or function, execute it, passing
a :class:`_engine.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`_engine.Connection` argument.
This function, along with :meth:`_engine.Engine.run_callable`,
allows a function to be run with a :class:`_engine.Connection`
or :class:`_engine.Engine` object without the need to know
which one is being dealt with.
"""
return callable_(self, *args, **kwargs)
def _run_visitor(self, visitorcallable, element, **kwargs):
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
class ExceptionContextImpl(ExceptionContext):
"""Implement the :class:`.ExceptionContext` interface."""
def __init__(
self,
exception,
sqlalchemy_exception,
engine,
connection,
cursor,
statement,
parameters,
context,
is_disconnect,
invalidate_pool_on_disconnect,
):
self.engine = engine
self.connection = connection
self.sqlalchemy_exception = sqlalchemy_exception
self.original_exception = exception
self.execution_context = context
self.statement = statement
self.parameters = parameters
self.is_disconnect = is_disconnect
self.invalidate_pool_on_disconnect = invalidate_pool_on_disconnect
class Transaction(object):
"""Represent a database transaction in progress.
The :class:`.Transaction` object is procured by
calling the :meth:`_engine.Connection.begin` method of
:class:`_engine.Connection`::
from sqlalchemy import create_engine
engine = create_engine("postgresql://scott:tiger@localhost/test")
connection = engine.connect()
trans = connection.begin()
connection.execute("insert into x (a, b) values (1, 2)")
trans.commit()
The object provides :meth:`.rollback` and :meth:`.commit`
methods in order to control transaction boundaries. It
also implements a context manager interface so that
the Python ``with`` statement can be used with the
:meth:`_engine.Connection.begin` method::
with connection.begin():
connection.execute("insert into x (a, b) values (1, 2)")
The Transaction object is **not** threadsafe.
.. seealso::
:meth:`_engine.Connection.begin`
:meth:`_engine.Connection.begin_twophase`
:meth:`_engine.Connection.begin_nested`
.. index::
single: thread safety; Transaction
"""
def __init__(self, connection, parent):
self.connection = connection
self._actual_parent = parent
self.is_active = True
@property
def _parent(self):
return self._actual_parent or self
def close(self):
"""Close this :class:`.Transaction`.
If this transaction is the base transaction in a begin/commit
nesting, the transaction will rollback(). Otherwise, the
method returns.
This is used to cancel a Transaction without affecting the scope of
an enclosing transaction.
"""
if self._parent.is_active and self._parent is self:
self.rollback()
self.connection._discard_transaction(self)
def rollback(self):
"""Roll back this :class:`.Transaction`.
"""
if self._parent.is_active:
self._do_rollback()
self.is_active = False
def _do_rollback(self):
self._parent.rollback()
def commit(self):
"""Commit this :class:`.Transaction`."""
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self._do_commit()
self.is_active = False
def _do_commit(self):
pass
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
if type_ is None and self.is_active:
try:
self.commit()
except:
with util.safe_reraise():
self.rollback()
else:
self.rollback()
class RootTransaction(Transaction):
def __init__(self, connection):
super(RootTransaction, self).__init__(connection, None)
self.connection._begin_impl(self)
def _do_rollback(self):
if self.is_active:
self.connection._rollback_impl()
def _do_commit(self):
if self.is_active:
self.connection._commit_impl()
class NestedTransaction(Transaction):
"""Represent a 'nested', or SAVEPOINT transaction.
A new :class:`.NestedTransaction` object may be procured
using the :meth:`_engine.Connection.begin_nested` method.
The interface is the same as that of :class:`.Transaction`.
"""
def __init__(self, connection, parent):
super(NestedTransaction, self).__init__(connection, parent)
self._savepoint = self.connection._savepoint_impl()
def _do_rollback(self):
if self.is_active:
self.connection._rollback_to_savepoint_impl(
self._savepoint, self._parent
)
def _do_commit(self):
if self.is_active:
self.connection._release_savepoint_impl(
self._savepoint, self._parent
)
class TwoPhaseTransaction(Transaction):
"""Represent a two-phase transaction.
A new :class:`.TwoPhaseTransaction` object may be procured
using the :meth:`_engine.Connection.begin_twophase` method.
The interface is the same as that of :class:`.Transaction`
with the addition of the :meth:`prepare` method.
"""
def __init__(self, connection, xid):
super(TwoPhaseTransaction, self).__init__(connection, None)
self._is_prepared = False
self.xid = xid
self.connection._begin_twophase_impl(self)
def prepare(self):
"""Prepare this :class:`.TwoPhaseTransaction`.
After a PREPARE, the transaction can be committed.
"""
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self.connection._prepare_twophase_impl(self.xid)
self._is_prepared = True
def _do_rollback(self):
self.connection._rollback_twophase_impl(self.xid, self._is_prepared)
def _do_commit(self):
self.connection._commit_twophase_impl(self.xid, self._is_prepared)
class Engine(Connectable, log.Identified):
"""
Connects a :class:`~sqlalchemy.pool.Pool` and
:class:`~sqlalchemy.engine.interfaces.Dialect` together to provide a
source of database connectivity and behavior.
An :class:`_engine.Engine` object is instantiated publicly using the
:func:`~sqlalchemy.create_engine` function.
.. seealso::
:doc:`/core/engines`
:ref:`connections_toplevel`
"""
_execution_options = util.immutabledict()
_has_events = False
_connection_cls = Connection
schema_for_object = schema._schema_getter(None)
"""Return the ".schema" attribute for an object.
Used for :class:`_schema.Table`, :class:`.Sequence` and similar objects,
and takes into account
the :paramref:`.Connection.execution_options.schema_translate_map`
parameter.
.. versionadded:: 1.1
.. seealso::
:ref:`schema_translating`
"""
def __init__(
self,
pool,
dialect,
url,
logging_name=None,
echo=None,
proxy=None,
execution_options=None,
hide_parameters=False,
):
self.pool = pool
self.url = url
self.dialect = dialect
if logging_name:
self.logging_name = logging_name
self.echo = echo
self.hide_parameters = hide_parameters
log.instance_logger(self, echoflag=echo)
if proxy:
interfaces.ConnectionProxy._adapt_listener(self, proxy)
if execution_options:
self.update_execution_options(**execution_options)
@property
def engine(self):
return self
def update_execution_options(self, **opt):
r"""Update the default execution_options dictionary
of this :class:`_engine.Engine`.
The given keys/values in \**opt are added to the
default execution options that will be used for
all connections. The initial contents of this dictionary
can be sent via the ``execution_options`` parameter
to :func:`_sa.create_engine`.
.. seealso::
:meth:`_engine.Connection.execution_options`
:meth:`_engine.Engine.execution_options`
"""
self._execution_options = self._execution_options.union(opt)
self.dispatch.set_engine_execution_options(self, opt)
self.dialect.set_engine_execution_options(self, opt)
def execution_options(self, **opt):
"""Return a new :class:`_engine.Engine` that will provide
:class:`_engine.Connection` objects with the given execution options.
The returned :class:`_engine.Engine` remains related to the original
:class:`_engine.Engine` in that it shares the same connection pool and
other state:
* The :class:`_pool.Pool` used by the new :class:`_engine.Engine`
is the
same instance. The :meth:`_engine.Engine.dispose`
method will replace
the connection pool instance for the parent engine as well
as this one.
* Event listeners are "cascaded" - meaning, the new
:class:`_engine.Engine`
inherits the events of the parent, and new events can be associated
with the new :class:`_engine.Engine` individually.
* The logging configuration and logging_name is copied from the parent
:class:`_engine.Engine`.
The intent of the :meth:`_engine.Engine.execution_options` method is
to implement "sharding" schemes where multiple :class:`_engine.Engine`
objects refer to the same connection pool, but are differentiated
by options that would be consumed by a custom event::
primary_engine = create_engine("mysql://")
shard1 = primary_engine.execution_options(shard_id="shard1")
shard2 = primary_engine.execution_options(shard_id="shard2")
Above, the ``shard1`` engine serves as a factory for
:class:`_engine.Connection`
objects that will contain the execution option
``shard_id=shard1``, and ``shard2`` will produce
:class:`_engine.Connection`
objects that contain the execution option ``shard_id=shard2``.
An event handler can consume the above execution option to perform
a schema switch or other operation, given a connection. Below
we emit a MySQL ``use`` statement to switch databases, at the same
time keeping track of which database we've established using the
:attr:`_engine.Connection.info` dictionary,
which gives us a persistent
storage space that follows the DBAPI connection::
from sqlalchemy import event
from sqlalchemy.engine import Engine
shards = {"default": "base", shard_1: "db1", "shard_2": "db2"}
@event.listens_for(Engine, "before_cursor_execute")
def _switch_shard(conn, cursor, stmt,
params, context, executemany):
shard_id = conn._execution_options.get('shard_id', "default")
current_shard = conn.info.get("current_shard", None)
if current_shard != shard_id:
cursor.execute("use %s" % shards[shard_id])
conn.info["current_shard"] = shard_id
.. seealso::
:meth:`_engine.Connection.execution_options`
- update execution options
on a :class:`_engine.Connection` object.
:meth:`_engine.Engine.update_execution_options`
- update the execution
options for a given :class:`_engine.Engine` in place.
:meth:`_engine.Engine.get_execution_options`
"""
return OptionEngine(self, opt)
def get_execution_options(self):
""" Get the non-SQL options which will take effect during execution.
.. versionadded: 1.3
.. seealso::
:meth:`_engine.Engine.execution_options`
"""
return self._execution_options
@property
def name(self):
"""String name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.name
@property
def driver(self):
"""Driver name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.driver
echo = log.echo_property()
def __repr__(self):
return "Engine(%r)" % self.url
def dispose(self):
"""Dispose of the connection pool used by this :class:`_engine.Engine`
.
This has the effect of fully closing all **currently checked in**
database connections. Connections that are still checked out
will **not** be closed, however they will no longer be associated
with this :class:`_engine.Engine`,
so when they are closed individually,
eventually the :class:`_pool.Pool` which they are associated with will
be garbage collected and they will be closed out fully, if
not already closed on checkin.
A new connection pool is created immediately after the old one has
been disposed. This new pool, like all SQLAlchemy connection pools,
does not make any actual connections to the database until one is
first requested, so as long as the :class:`_engine.Engine`
isn't used again,
no new connections will be made.
.. seealso::
:ref:`engine_disposal`
"""
self.pool.dispose()
self.pool = self.pool.recreate()
self.dispatch.engine_disposed(self)
def _execute_default(self, default):
with self._contextual_connect() as conn:
return conn._execute_default(default, (), {})
@contextlib.contextmanager
def _optional_conn_ctx_manager(self, connection=None):
if connection is None:
with self._contextual_connect() as conn:
yield conn
else:
yield connection
def _run_visitor(
self, visitorcallable, element, connection=None, **kwargs
):
with self._optional_conn_ctx_manager(connection) as conn:
conn._run_visitor(visitorcallable, element, **kwargs)
class _trans_ctx(object):
def __init__(self, conn, transaction, close_with_result):
self.conn = conn
self.transaction = transaction
self.close_with_result = close_with_result
def __enter__(self):
return self.conn
def __exit__(self, type_, value, traceback):
if type_ is not None:
self.transaction.rollback()
else:
self.transaction.commit()
if not self.close_with_result:
self.conn.close()
def begin(self, close_with_result=False):
"""Return a context manager delivering a :class:`_engine.Connection`
with a :class:`.Transaction` established.
E.g.::
with engine.begin() as conn:
conn.execute("insert into table (x, y, z) values (1, 2, 3)")
conn.execute("my_special_procedure(5)")
Upon successful operation, the :class:`.Transaction`
is committed. If an error is raised, the :class:`.Transaction`
is rolled back.
The ``close_with_result`` flag is normally ``False``, and indicates
that the :class:`_engine.Connection` will be closed when the operation
is complete. When set to ``True``, it indicates the
:class:`_engine.Connection` is in "single use" mode, where the
:class:`_engine.ResultProxy` returned by the first call to
:meth:`_engine.Connection.execute` will close the
:class:`_engine.Connection` when
that :class:`_engine.ResultProxy` has exhausted all result rows.
.. seealso::
:meth:`_engine.Engine.connect` - procure a
:class:`_engine.Connection` from
an :class:`_engine.Engine`.
:meth:`_engine.Connection.begin` - start a :class:`.Transaction`
for a particular :class:`_engine.Connection`.
"""
conn = self._contextual_connect(close_with_result=close_with_result)
try:
trans = conn.begin()
except:
with util.safe_reraise():
conn.close()
return Engine._trans_ctx(conn, trans, close_with_result)
def transaction(self, callable_, *args, **kwargs):
r"""Execute the given function within a transaction boundary.
The function is passed a :class:`_engine.Connection` newly procured
from :meth:`_engine.Engine.contextual_connect` as the first argument,
followed by the given \*args and \**kwargs.
e.g.::
def do_something(conn, x, y):
conn.execute("some statement", {'x':x, 'y':y})
engine.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`_engine.Engine.begin`::
with engine.begin() as conn:
conn.execute("some statement", {'x':5, 'y':10})
.. seealso::
:meth:`_engine.Engine.begin` - engine-level transactional
context
:meth:`_engine.Connection.transaction`
- connection-level version of
:meth:`_engine.Engine.transaction`
"""
with self._contextual_connect() as conn:
return conn.transaction(callable_, *args, **kwargs)
def run_callable(self, callable_, *args, **kwargs):
r"""Given a callable object or function, execute it, passing
a :class:`_engine.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`_engine.Connection` argument.
This function, along with :meth:`_engine.Connection.run_callable`,
allows a function to be run with a :class:`_engine.Connection`
or :class:`_engine.Engine` object without the need to know
which one is being dealt with.
"""
with self._contextual_connect() as conn:
return conn.run_callable(callable_, *args, **kwargs)
def execute(self, statement, *multiparams, **params):
"""Executes the given construct and returns a
:class:`_engine.ResultProxy`.
The arguments are the same as those used by
:meth:`_engine.Connection.execute`.
Here, a :class:`_engine.Connection` is acquired using the
:meth:`_engine.Engine.contextual_connect` method,
and the statement executed
with that connection. The returned :class:`_engine.ResultProxy`
is flagged
such that when the :class:`_engine.ResultProxy` is exhausted and its
underlying cursor is closed, the :class:`_engine.Connection`
created here
will also be closed, which allows its associated DBAPI connection
resource to be returned to the connection pool.
"""
connection = self._contextual_connect(close_with_result=True)
return connection.execute(statement, *multiparams, **params)
def scalar(self, statement, *multiparams, **params):
return self.execute(statement, *multiparams, **params).scalar()
def _execute_clauseelement(self, elem, multiparams=None, params=None):
connection = self._contextual_connect(close_with_result=True)
return connection._execute_clauseelement(elem, multiparams, params)
def _execute_compiled(self, compiled, multiparams, params):
connection = self._contextual_connect(close_with_result=True)
return connection._execute_compiled(compiled, multiparams, params)
def connect(self, **kwargs):
"""Return a new :class:`_engine.Connection` object.
The :class:`_engine.Connection` object is a facade that uses a DBAPI
connection internally in order to communicate with the database. This
connection is procured from the connection-holding :class:`_pool.Pool`
referenced by this :class:`_engine.Engine`. When the
:meth:`_engine.Connection.close` method of the
:class:`_engine.Connection` object
is called, the underlying DBAPI connection is then returned to the
connection pool, where it may be used again in a subsequent call to
:meth:`_engine.Engine.connect`.
"""
return self._connection_cls(self, **kwargs)
@util.deprecated(
"1.3",
"The :meth:`_engine.Engine.contextual_connect` method is deprecated. "
"This "
"method is an artifact of the threadlocal engine strategy which is "
"also to be deprecated. For explicit connections from an "
":class:`_engine.Engine`, use the :meth:`_engine.Engine.connect` "
"method.",
)
def contextual_connect(self, close_with_result=False, **kwargs):
"""Return a :class:`_engine.Connection`
object which may be part of some
ongoing context.
By default, this method does the same thing as
:meth:`_engine.Engine.connect`.
Subclasses of :class:`_engine.Engine` may override this method
to provide contextual behavior.
:param close_with_result: When True, the first
:class:`_engine.ResultProxy`
created by the :class:`_engine.Connection` will call the
:meth:`_engine.Connection.close`
method of that connection as soon as any
pending result rows are exhausted. This is used to supply the
"connectionless execution" behavior provided by the
:meth:`_engine.Engine.execute` method.
"""
return self._contextual_connect(
close_with_result=close_with_result, **kwargs
)
def _contextual_connect(self, close_with_result=False, **kwargs):
return self._connection_cls(
self,
self._wrap_pool_connect(self.pool.connect, None),
close_with_result=close_with_result,
**kwargs
)
def table_names(self, schema=None, connection=None):
"""Return a list of all table names available in the database.
:param schema: Optional, retrieve names from a non-default schema.
:param connection: Optional, use a specified connection. Default is
the ``contextual_connect`` for this ``Engine``.
"""
with self._optional_conn_ctx_manager(connection) as conn:
return self.dialect.get_table_names(conn, schema)
def has_table(self, table_name, schema=None):
"""Return True if the given backend has a table of the given name.
.. seealso::
:ref:`metadata_reflection_inspector` - detailed schema inspection
using the :class:`_reflection.Inspector` interface.
:class:`.quoted_name` - used to pass quoting information along
with a schema identifier.
"""
return self.run_callable(self.dialect.has_table, table_name, schema)
def _wrap_pool_connect(self, fn, connection):
dialect = self.dialect
try:
return fn()
except dialect.dbapi.Error as e:
if connection is None:
Connection._handle_dbapi_exception_noconnection(
e, dialect, self
)
else:
util.raise_(
sys.exc_info()[1], with_traceback=sys.exc_info()[2]
)
def raw_connection(self, _connection=None):
"""Return a "raw" DBAPI connection from the connection pool.
The returned object is a proxied version of the DBAPI
connection object used by the underlying driver in use.
The object will have all the same behavior as the real DBAPI
connection, except that its ``close()`` method will result in the
connection being returned to the pool, rather than being closed
for real.
This method provides direct DBAPI connection access for
special situations when the API provided by
:class:`_engine.Connection`
is not needed. When a :class:`_engine.Connection` object is already
present, the DBAPI connection is available using
the :attr:`_engine.Connection.connection` accessor.
.. seealso::
:ref:`dbapi_connections`
"""
return self._wrap_pool_connect(
self.pool.unique_connection, _connection
)
class OptionEngine(Engine):
_sa_propagate_class_events = False
def __init__(self, proxied, execution_options):
self._proxied = proxied
self.url = proxied.url
self.dialect = proxied.dialect
self.logging_name = proxied.logging_name
self.echo = proxied.echo
self.hide_parameters = proxied.hide_parameters
log.instance_logger(self, echoflag=self.echo)
# note: this will propagate events that are assigned to the parent
# engine after this OptionEngine is created. Since we share
# the events of the parent we also disallow class-level events
# to apply to the OptionEngine class directly.
#
# the other way this can work would be to transfer existing
# events only, using:
# self.dispatch._update(proxied.dispatch)
#
# that might be more appropriate however it would be a behavioral
# change for logic that assigns events to the parent engine and
# would like it to take effect for the already-created sub-engine.
self.dispatch = self.dispatch._join(proxied.dispatch)
self._execution_options = proxied._execution_options
self.update_execution_options(**execution_options)
def _get_pool(self):
return self._proxied.pool
def _set_pool(self, pool):
self._proxied.pool = pool
pool = property(_get_pool, _set_pool)
def _get_has_events(self):
return self._proxied._has_events or self.__dict__.get(
"_has_events", False
)
def _set_has_events(self, value):
self.__dict__["_has_events"] = value
_has_events = property(_get_has_events, _set_has_events)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/pool/dbapi_proxy.py
|
# sqlalchemy/pool/dbapi_proxy.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""DBAPI proxy utility.
Provides transparent connection pooling on top of a Python DBAPI.
This is legacy SQLAlchemy functionality that is not typically used
today.
"""
from .impl import QueuePool
from .. import util
from ..util import threading
proxies = {}
@util.deprecated(
"1.3",
"The :func:`.pool.manage` function is deprecated, and will be "
"removed in a future release.",
)
def manage(module, **params):
r"""Return a proxy for a DB-API module that automatically
pools connections.
Given a DB-API 2.0 module and pool management parameters, returns
a proxy for the module that will automatically pool connections,
creating new connection pools for each distinct set of connection
arguments sent to the decorated module's connect() function.
:param module: a DB-API 2.0 database module
:param poolclass: the class used by the pool module to provide
pooling. Defaults to :class:`.QueuePool`.
:param \**params: will be passed through to *poolclass*
"""
try:
return proxies[module]
except KeyError:
return proxies.setdefault(module, _DBProxy(module, **params))
def clear_managers():
"""Remove all current DB-API 2.0 managers.
All pools and connections are disposed.
"""
for manager in proxies.values():
manager.close()
proxies.clear()
class _DBProxy(object):
"""Layers connection pooling behavior on top of a standard DB-API module.
Proxies a DB-API 2.0 connect() call to a connection pool keyed to the
specific connect parameters. Other functions and attributes are delegated
to the underlying DB-API module.
"""
def __init__(self, module, poolclass=QueuePool, **kw):
"""Initializes a new proxy.
module
a DB-API 2.0 module
poolclass
a Pool class, defaulting to QueuePool
Other parameters are sent to the Pool object's constructor.
"""
self.module = module
self.kw = kw
self.poolclass = poolclass
self.pools = {}
self._create_pool_mutex = threading.Lock()
def close(self):
for key in list(self.pools):
del self.pools[key]
def __del__(self):
self.close()
def __getattr__(self, key):
return getattr(self.module, key)
def get_pool(self, *args, **kw):
key = self._serialize(*args, **kw)
try:
return self.pools[key]
except KeyError:
self._create_pool_mutex.acquire()
try:
if key not in self.pools:
kw.pop("sa_pool_key", None)
pool = self.poolclass(
lambda: self.module.connect(*args, **kw), **self.kw
)
self.pools[key] = pool
return pool
else:
return self.pools[key]
finally:
self._create_pool_mutex.release()
def connect(self, *args, **kw):
"""Activate a connection to the database.
Connect to the database using this DBProxy's module and the given
connect arguments. If the arguments match an existing pool, the
connection will be returned from the pool's current thread-local
connection instance, or if there is no thread-local connection
instance it will be checked out from the set of pooled connections.
If the pool has no available connections and allows new connections
to be created, a new database connection will be made.
"""
return self.get_pool(*args, **kw).connect()
def dispose(self, *args, **kw):
"""Dispose the pool referenced by the given connect arguments."""
key = self._serialize(*args, **kw)
try:
del self.pools[key]
except KeyError:
pass
def _serialize(self, *args, **kw):
if "sa_pool_key" in kw:
return kw["sa_pool_key"]
return tuple(list(args) + [(k, kw[k]) for k in sorted(kw)])
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/pool/__init__.py
|
# sqlalchemy/pool/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Connection pooling for DB-API connections.
Provides a number of connection pool implementations for a variety of
usage scenarios and thread behavior requirements imposed by the
application, DB-API or database itself.
Also provides a DB-API 2.0 connection proxying mechanism allowing
regular DB-API connect() methods to be transparently managed by a
SQLAlchemy connection pool.
"""
from .base import _ConnectionFairy # noqa
from .base import _ConnectionRecord # noqa
from .base import _finalize_fairy # noqa
from .base import _refs # noqa
from .base import Pool
from .base import reset_commit
from .base import reset_none
from .base import reset_rollback
from .dbapi_proxy import clear_managers
from .dbapi_proxy import manage
from .impl import AssertionPool
from .impl import NullPool
from .impl import QueuePool
from .impl import SingletonThreadPool
from .impl import StaticPool
__all__ = [
"Pool",
"reset_commit",
"reset_none",
"reset_rollback",
"clear_managers",
"manage",
"AssertionPool",
"NullPool",
"QueuePool",
"SingletonThreadPool",
"StaticPool",
]
# as these are likely to be used in various test suites, debugging
# setups, keep them in the sqlalchemy.pool namespace
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/pool/impl.py
|
# sqlalchemy/pool.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Pool implementation classes.
"""
import traceback
import weakref
from .base import _ConnectionFairy
from .base import _ConnectionRecord
from .base import Pool
from .. import exc
from .. import util
from ..util import chop_traceback
from ..util import queue as sqla_queue
from ..util import threading
class QueuePool(Pool):
"""A :class:`_pool.Pool`
that imposes a limit on the number of open connections.
:class:`.QueuePool` is the default pooling implementation used for
all :class:`_engine.Engine` objects, unless the SQLite dialect is in use.
"""
def __init__(
self,
creator,
pool_size=5,
max_overflow=10,
timeout=30,
use_lifo=False,
**kw
):
r"""
Construct a QueuePool.
:param creator: a callable function that returns a DB-API
connection object, same as that of :paramref:`_pool.Pool.creator`.
:param pool_size: The size of the pool to be maintained,
defaults to 5. This is the largest number of connections that
will be kept persistently in the pool. Note that the pool
begins with no connections; once this number of connections
is requested, that number of connections will remain.
``pool_size`` can be set to 0 to indicate no size limit; to
disable pooling, use a :class:`~sqlalchemy.pool.NullPool`
instead.
:param max_overflow: The maximum overflow size of the
pool. When the number of checked-out connections reaches the
size set in pool_size, additional connections will be
returned up to this limit. When those additional connections
are returned to the pool, they are disconnected and
discarded. It follows then that the total number of
simultaneous connections the pool will allow is pool_size +
`max_overflow`, and the total number of "sleeping"
connections the pool will allow is pool_size. `max_overflow`
can be set to -1 to indicate no overflow limit; no limit
will be placed on the total number of concurrent
connections. Defaults to 10.
:param timeout: The number of seconds to wait before giving up
on returning a connection. Defaults to 30.
:param use_lifo: use LIFO (last-in-first-out) when retrieving
connections instead of FIFO (first-in-first-out). Using LIFO, a
server-side timeout scheme can reduce the number of connections used
during non-peak periods of use. When planning for server-side
timeouts, ensure that a recycle or pre-ping strategy is in use to
gracefully handle stale connections.
.. versionadded:: 1.3
.. seealso::
:ref:`pool_use_lifo`
:ref:`pool_disconnects`
:param \**kw: Other keyword arguments including
:paramref:`_pool.Pool.recycle`, :paramref:`_pool.Pool.echo`,
:paramref:`_pool.Pool.reset_on_return` and others are passed to the
:class:`_pool.Pool` constructor.
"""
Pool.__init__(self, creator, **kw)
self._pool = sqla_queue.Queue(pool_size, use_lifo=use_lifo)
self._overflow = 0 - pool_size
self._max_overflow = max_overflow
self._timeout = timeout
self._overflow_lock = threading.Lock()
def _do_return_conn(self, conn):
try:
self._pool.put(conn, False)
except sqla_queue.Full:
try:
conn.close()
finally:
self._dec_overflow()
def _do_get(self):
use_overflow = self._max_overflow > -1
try:
wait = use_overflow and self._overflow >= self._max_overflow
return self._pool.get(wait, self._timeout)
except sqla_queue.Empty:
# don't do things inside of "except Empty", because when we say
# we timed out or can't connect and raise, Python 3 tells
# people the real error is queue.Empty which it isn't.
pass
if use_overflow and self._overflow >= self._max_overflow:
if not wait:
return self._do_get()
else:
raise exc.TimeoutError(
"QueuePool limit of size %d overflow %d reached, "
"connection timed out, timeout %d"
% (self.size(), self.overflow(), self._timeout),
code="3o7r",
)
if self._inc_overflow():
try:
return self._create_connection()
except:
with util.safe_reraise():
self._dec_overflow()
else:
return self._do_get()
def _inc_overflow(self):
if self._max_overflow == -1:
self._overflow += 1
return True
with self._overflow_lock:
if self._overflow < self._max_overflow:
self._overflow += 1
return True
else:
return False
def _dec_overflow(self):
if self._max_overflow == -1:
self._overflow -= 1
return True
with self._overflow_lock:
self._overflow -= 1
return True
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(
self._creator,
pool_size=self._pool.maxsize,
max_overflow=self._max_overflow,
timeout=self._timeout,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
dialect=self._dialect,
)
def dispose(self):
while True:
try:
conn = self._pool.get(False)
conn.close()
except sqla_queue.Empty:
break
self._overflow = 0 - self.size()
self.logger.info("Pool disposed. %s", self.status())
def status(self):
return (
"Pool size: %d Connections in pool: %d "
"Current Overflow: %d Current Checked out "
"connections: %d"
% (
self.size(),
self.checkedin(),
self.overflow(),
self.checkedout(),
)
)
def size(self):
return self._pool.maxsize
def timeout(self):
return self._timeout
def checkedin(self):
return self._pool.qsize()
def overflow(self):
return self._overflow
def checkedout(self):
return self._pool.maxsize - self._pool.qsize() + self._overflow
class NullPool(Pool):
"""A Pool which does not pool connections.
Instead it literally opens and closes the underlying DB-API connection
per each connection open/close.
Reconnect-related functions such as ``recycle`` and connection
invalidation are not supported by this Pool implementation, since
no connections are held persistently.
"""
def status(self):
return "NullPool"
def _do_return_conn(self, conn):
conn.close()
def _do_get(self):
return self._create_connection()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(
self._creator,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
dialect=self._dialect,
)
def dispose(self):
pass
class SingletonThreadPool(Pool):
"""A Pool that maintains one connection per thread.
Maintains one connection per each thread, never moving a connection to a
thread other than the one which it was created in.
.. warning:: the :class:`.SingletonThreadPool` will call ``.close()``
on arbitrary connections that exist beyond the size setting of
``pool_size``, e.g. if more unique **thread identities**
than what ``pool_size`` states are used. This cleanup is
non-deterministic and not sensitive to whether or not the connections
linked to those thread identities are currently in use.
:class:`.SingletonThreadPool` may be improved in a future release,
however in its current status it is generally used only for test
scenarios using a SQLite ``:memory:`` database and is not recommended
for production use.
Options are the same as those of :class:`_pool.Pool`, as well as:
:param pool_size: The number of threads in which to maintain connections
at once. Defaults to five.
:class:`.SingletonThreadPool` is used by the SQLite dialect
automatically when a memory-based database is used.
See :ref:`sqlite_toplevel`.
"""
def __init__(self, creator, pool_size=5, **kw):
Pool.__init__(self, creator, **kw)
self._conn = threading.local()
self._fairy = threading.local()
self._all_conns = set()
self.size = pool_size
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(
self._creator,
pool_size=self.size,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
dialect=self._dialect,
)
def dispose(self):
"""Dispose of this pool."""
for conn in self._all_conns:
try:
conn.close()
except Exception:
# pysqlite won't even let you close a conn from a thread
# that didn't create it
pass
self._all_conns.clear()
def _cleanup(self):
while len(self._all_conns) >= self.size:
c = self._all_conns.pop()
c.close()
def status(self):
return "SingletonThreadPool id:%d size: %d" % (
id(self),
len(self._all_conns),
)
def _do_return_conn(self, conn):
pass
def _do_get(self):
try:
c = self._conn.current()
if c:
return c
except AttributeError:
pass
c = self._create_connection()
self._conn.current = weakref.ref(c)
if len(self._all_conns) >= self.size:
self._cleanup()
self._all_conns.add(c)
return c
def connect(self):
# vendored from Pool to include use_threadlocal behavior
try:
rec = self._fairy.current()
except AttributeError:
pass
else:
if rec is not None:
return rec._checkout_existing()
return _ConnectionFairy._checkout(self, self._fairy)
def _return_conn(self, record):
try:
del self._fairy.current
except AttributeError:
pass
self._do_return_conn(record)
class StaticPool(Pool):
"""A Pool of exactly one connection, used for all requests.
Reconnect-related functions such as ``recycle`` and connection
invalidation (which is also used to support auto-reconnect) are not
currently supported by this Pool implementation but may be implemented
in a future release.
"""
@util.memoized_property
def _conn(self):
return self._creator()
@util.memoized_property
def connection(self):
return _ConnectionRecord(self)
def status(self):
return "StaticPool"
def dispose(self):
if "_conn" in self.__dict__:
self._conn.close()
self._conn = None
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(
creator=self._creator,
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
dialect=self._dialect,
)
def _create_connection(self):
return self._conn
def _do_return_conn(self, conn):
pass
def _do_get(self):
return self.connection
class AssertionPool(Pool):
"""A :class:`_pool.Pool` that allows at most one checked out connection at
any given time.
This will raise an exception if more than one connection is checked out
at a time. Useful for debugging code that is using more connections
than desired.
"""
def __init__(self, *args, **kw):
self._conn = None
self._checked_out = False
self._store_traceback = kw.pop("store_traceback", True)
self._checkout_traceback = None
Pool.__init__(self, *args, **kw)
def status(self):
return "AssertionPool"
def _do_return_conn(self, conn):
if not self._checked_out:
raise AssertionError("connection is not checked out")
self._checked_out = False
assert conn is self._conn
def dispose(self):
self._checked_out = False
if self._conn:
self._conn.close()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(
self._creator,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
dialect=self._dialect,
)
def _do_get(self):
if self._checked_out:
if self._checkout_traceback:
suffix = " at:\n%s" % "".join(
chop_traceback(self._checkout_traceback)
)
else:
suffix = ""
raise AssertionError("connection is already checked out" + suffix)
if not self._conn:
self._conn = self._create_connection()
self._checked_out = True
if self._store_traceback:
self._checkout_traceback = traceback.format_stack()
return self._conn
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/pool/base.py
|
# sqlalchemy/pool.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base constructs for connection pools.
"""
from collections import deque
import time
import weakref
from .. import event
from .. import exc
from .. import interfaces
from .. import log
from .. import util
from ..util import threading
reset_rollback = util.symbol("reset_rollback")
reset_commit = util.symbol("reset_commit")
reset_none = util.symbol("reset_none")
class _ConnDialect(object):
"""partial implementation of :class:`.Dialect`
which provides DBAPI connection methods.
When a :class:`_pool.Pool` is combined with an :class:`_engine.Engine`,
the :class:`_engine.Engine` replaces this with its own
:class:`.Dialect`.
"""
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
def do_ping(self, dbapi_connection):
raise NotImplementedError(
"The ping feature requires that a dialect is "
"passed to the connection pool."
)
class Pool(log.Identified):
"""Abstract base class for connection pools."""
_dialect = _ConnDialect()
@util.deprecated_params(
use_threadlocal=(
"1.3",
"The :paramref:`_pool.Pool.use_threadlocal` parameter is "
"deprecated and will be removed in a future release.",
),
listeners=(
"0.7",
":class:`.PoolListener` is deprecated in favor of the "
":class:`_events.PoolEvents` listener interface. The "
":paramref:`_pool.Pool.listeners` parameter will be removed in a "
"future release.",
),
)
def __init__(
self,
creator,
recycle=-1,
echo=None,
use_threadlocal=False,
logging_name=None,
reset_on_return=True,
listeners=None,
events=None,
dialect=None,
pre_ping=False,
_dispatch=None,
):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to a value other than -1, number of
seconds between connection recycling, which means upon
checkout, if this timeout is surpassed the connection will be
closed and replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: if True, the connection pool will log
informational output such as when connections are invalidated
as well as when connections are recycled to the default log handler,
which defaults to ``sys.stdout`` for output.. If set to the string
``"debug"``, the logging will include pool checkouts and checkins.
The :paramref:`_pool.Pool.echo` parameter can also be set from the
:func:`_sa.create_engine` call by using the
:paramref:`_sa.create_engine.echo_pool` parameter.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object that is already
checked out. This is a legacy use case and the flag has no
effect when using the pool with a :class:`_engine.Engine` object.
:param reset_on_return: Determine steps to take on
connections as they are returned to the pool.
reset_on_return can have any of these values:
* ``"rollback"`` - call rollback() on the connection,
to release locks and transaction resources.
This is the default value. The vast majority
of use cases should leave this value set.
* ``True`` - same as 'rollback', this is here for
backwards compatibility.
* ``"commit"`` - call commit() on the connection,
to release locks and transaction resources.
A commit here may be desirable for databases that
cache query plans if a commit is emitted,
such as Microsoft SQL Server. However, this
value is more dangerous than 'rollback' because
any data changes present on the transaction
are committed unconditionally.
* ``None`` - don't do anything on the connection.
This setting should generally only be made on a database
that has no transaction support at all,
namely MySQL MyISAM; when used on this backend, performance
can be improved as the "rollback" call is still expensive on
MySQL. It is **strongly recommended** that this setting not be
used for transaction-supporting databases in conjunction with
a persistent pool such as :class:`.QueuePool`, as it opens
the possibility for connections still in a transaction to be
idle in the pool. The setting may be appropriate in the
case of :class:`.NullPool` or special circumstances where
the connection pool in use is not being used to maintain connection
lifecycle.
* ``False`` - same as None, this is here for
backwards compatibility.
:param events: a list of 2-tuples, each of the form
``(callable, target)`` which will be passed to :func:`.event.listen`
upon construction. Provided here so that event listeners
can be assigned via :func:`_sa.create_engine` before dialect-level
listeners are applied.
:param listeners: A list of :class:`.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool.
:param dialect: a :class:`.Dialect` that will handle the job
of calling rollback(), close(), or commit() on DBAPI connections.
If omitted, a built-in "stub" dialect is used. Applications that
make use of :func:`_sa.create_engine` should not use this parameter
as it is handled by the engine creation strategy.
.. versionadded:: 1.1 - ``dialect`` is now a public parameter
to the :class:`_pool.Pool`.
:param pre_ping: if True, the pool will emit a "ping" (typically
"SELECT 1", but is dialect-specific) on the connection
upon checkout, to test if the connection is alive or not. If not,
the connection is transparently re-connected and upon success, all
other pooled connections established prior to that timestamp are
invalidated. Requires that a dialect is passed as well to
interpret the disconnection error.
.. versionadded:: 1.2
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
self._recycle = recycle
self._invalidate_time = 0
self._use_threadlocal = use_threadlocal
self._pre_ping = pre_ping
self._reset_on_return = util.symbol.parse_user_argument(
reset_on_return,
{
reset_rollback: ["rollback", True],
reset_none: ["none", None, False],
reset_commit: ["commit"],
},
"reset_on_return",
resolve_symbol_names=False,
)
self.echo = echo
if _dispatch:
self.dispatch._update(_dispatch, only_propagate=False)
if dialect:
self._dialect = dialect
if events:
for fn, target in events:
event.listen(self, target, fn)
if listeners:
for l in listeners:
self.add_listener(l)
@property
def _creator(self):
return self.__dict__["_creator"]
@_creator.setter
def _creator(self, creator):
self.__dict__["_creator"] = creator
self._invoke_creator = self._should_wrap_creator(creator)
def _should_wrap_creator(self, creator):
"""Detect if creator accepts a single argument, or is sent
as a legacy style no-arg function.
"""
try:
argspec = util.get_callable_argspec(self._creator, no_self=True)
except TypeError:
return lambda crec: creator()
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
# look for the exact arg signature that DefaultStrategy
# sends us
if (argspec[0], argspec[3]) == (["connection_record"], (None,)):
return creator
# or just a single positional
elif positionals == 1:
return creator
# all other cases, just wrap and assume legacy "creator" callable
# thing
else:
return lambda crec: creator()
def _close_connection(self, connection):
self.logger.debug("Closing connection %r", connection)
try:
self._dialect.do_close(connection)
except Exception:
self.logger.error(
"Exception closing connection %r", connection, exc_info=True
)
@util.deprecated(
"0.7",
"The :meth:`_pool.Pool.add_listener` method is deprecated and "
"will be removed in a future release. Please use the "
":class:`_events.PoolEvents` listener interface.",
)
def add_listener(self, listener):
"""Add a :class:`.PoolListener`-like object to this pool.
``listener`` may be an object that implements some or all of
PoolListener, or a dictionary of callables containing implementations
of some or all of the named methods in PoolListener.
"""
interfaces.PoolListener._adapt_listener(self, listener)
def unique_connection(self):
"""Produce a DBAPI connection that is not referenced by any
thread-local context.
This method is equivalent to :meth:`_pool.Pool.connect` when the
:paramref:`_pool.Pool.use_threadlocal` flag is not set to True.
When :paramref:`_pool.Pool.use_threadlocal` is True, the
:meth:`_pool.Pool.unique_connection`
method provides a means of bypassing
the threadlocal context.
"""
return _ConnectionFairy._checkout(self)
def _create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def _invalidate(self, connection, exception=None, _checkin=True):
"""Mark all connections established within the generation
of the given connection as invalidated.
If this pool's last invalidate time is before when the given
connection was created, update the timestamp til now. Otherwise,
no action is performed.
Connections with a start time prior to this pool's invalidation
time will be recycled upon next checkout.
"""
rec = getattr(connection, "_connection_record", None)
if not rec or self._invalidate_time < rec.starttime:
self._invalidate_time = time.time()
if _checkin and getattr(connection, "is_valid", False):
connection.invalidate(exception)
def recreate(self):
"""Return a new :class:`_pool.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunction with :meth:`dispose`
to close out an entire :class:`_pool.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, as it only affects connections that are
idle in the pool.
.. seealso::
:meth:`Pool.recreate`
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
if not self._use_threadlocal:
return _ConnectionFairy._checkout(self)
try:
rec = self._threadconns.current()
except AttributeError:
pass
else:
if rec is not None:
return rec._checkout_existing()
return _ConnectionFairy._checkout(self, self._threadconns)
def _return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`_pool.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
if self._use_threadlocal:
try:
del self._threadconns.current
except AttributeError:
pass
self._do_return_conn(record)
def _do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def _do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
class _ConnectionRecord(object):
"""Internal object which maintains an individual DBAPI connection
referenced by a :class:`_pool.Pool`.
The :class:`._ConnectionRecord` object always exists for any particular
DBAPI connection whether or not that DBAPI connection has been
"checked out". This is in contrast to the :class:`._ConnectionFairy`
which is only a public facade to the DBAPI connection while it is checked
out.
A :class:`._ConnectionRecord` may exist for a span longer than that
of a single DBAPI connection. For example, if the
:meth:`._ConnectionRecord.invalidate`
method is called, the DBAPI connection associated with this
:class:`._ConnectionRecord`
will be discarded, but the :class:`._ConnectionRecord` may be used again,
in which case a new DBAPI connection is produced when the
:class:`_pool.Pool`
next uses this record.
The :class:`._ConnectionRecord` is delivered along with connection
pool events, including :meth:`_events.PoolEvents.connect` and
:meth:`_events.PoolEvents.checkout`, however :class:`._ConnectionRecord`
still
remains an internal object whose API and internals may change.
.. seealso::
:class:`._ConnectionFairy`
"""
def __init__(self, pool, connect=True):
self.__pool = pool
if connect:
self.__connect(first_connect_check=True)
self.finalize_callback = deque()
fairy_ref = None
starttime = None
connection = None
"""A reference to the actual DBAPI connection being tracked.
May be ``None`` if this :class:`._ConnectionRecord` has been marked
as invalidated; a new DBAPI connection may replace it if the owning
pool calls upon this :class:`._ConnectionRecord` to reconnect.
"""
_soft_invalidate_time = 0
@util.memoized_property
def info(self):
"""The ``.info`` dictionary associated with the DBAPI connection.
This dictionary is shared among the :attr:`._ConnectionFairy.info`
and :attr:`_engine.Connection.info` accessors.
.. note::
The lifespan of this dictionary is linked to the
DBAPI connection itself, meaning that it is **discarded** each time
the DBAPI connection is closed and/or invalidated. The
:attr:`._ConnectionRecord.record_info` dictionary remains
persistent throughout the lifespan of the
:class:`._ConnectionRecord` container.
"""
return {}
@util.memoized_property
def record_info(self):
"""An "info' dictionary associated with the connection record
itself.
Unlike the :attr:`._ConnectionRecord.info` dictionary, which is linked
to the lifespan of the DBAPI connection, this dictionary is linked
to the lifespan of the :class:`._ConnectionRecord` container itself
and will remain persistent throughout the life of the
:class:`._ConnectionRecord`.
.. versionadded:: 1.1
"""
return {}
@classmethod
def checkout(cls, pool):
rec = pool._do_get()
try:
dbapi_connection = rec.get_connection()
except Exception as err:
with util.safe_reraise():
rec._checkin_failed(err)
echo = pool._should_log_debug()
fairy = _ConnectionFairy(dbapi_connection, rec, echo)
rec.fairy_ref = weakref.ref(
fairy,
lambda ref: _finalize_fairy
and _finalize_fairy(None, rec, pool, ref, echo),
)
_refs.add(rec)
if echo:
pool.logger.debug(
"Connection %r checked out from pool", dbapi_connection
)
return fairy
def _checkin_failed(self, err):
self.invalidate(e=err)
self.checkin(_no_fairy_ref=True)
def checkin(self, _no_fairy_ref=False):
if self.fairy_ref is None and not _no_fairy_ref:
util.warn("Double checkin attempted on %s" % self)
return
self.fairy_ref = None
connection = self.connection
pool = self.__pool
while self.finalize_callback:
finalizer = self.finalize_callback.pop()
finalizer(connection)
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, self)
pool._return_conn(self)
@property
def in_use(self):
return self.fairy_ref is not None
@property
def last_connect_time(self):
return self.starttime
def close(self):
if self.connection is not None:
self.__close()
def invalidate(self, e=None, soft=False):
"""Invalidate the DBAPI connection held by this :class:`._ConnectionRecord`.
This method is called for all connection invalidations, including
when the :meth:`._ConnectionFairy.invalidate` or
:meth:`_engine.Connection.invalidate` methods are called,
as well as when any
so-called "automatic invalidation" condition occurs.
:param e: an exception object indicating a reason for the invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
# already invalidated
if self.connection is None:
return
if soft:
self.__pool.dispatch.soft_invalidate(self.connection, self, e)
else:
self.__pool.dispatch.invalidate(self.connection, self, e)
if e is not None:
self.__pool.logger.info(
"%sInvalidate connection %r (reason: %s:%s)",
"Soft " if soft else "",
self.connection,
e.__class__.__name__,
e,
)
else:
self.__pool.logger.info(
"%sInvalidate connection %r",
"Soft " if soft else "",
self.connection,
)
if soft:
self._soft_invalidate_time = time.time()
else:
self.__close()
self.connection = None
def get_connection(self):
recycle = False
# NOTE: the various comparisons here are assuming that measurable time
# passes between these state changes. however, time.time() is not
# guaranteed to have sub-second precision. comparisons of
# "invalidation time" to "starttime" should perhaps use >= so that the
# state change can take place assuming no measurable time has passed,
# however this does not guarantee correct behavior here as if time
# continues to not pass, it will try to reconnect repeatedly until
# these timestamps diverge, so in that sense using > is safer. Per
# https://stackoverflow.com/a/1938096/34549, Windows time.time() may be
# within 16 milliseconds accuracy, so unit tests for connection
# invalidation need a sleep of at least this long between initial start
# time and invalidation for the logic below to work reliably.
if self.connection is None:
self.info.clear()
self.__connect()
elif (
self.__pool._recycle > -1
and time.time() - self.starttime > self.__pool._recycle
):
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling", self.connection
)
recycle = True
elif self.__pool._invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to pool invalidation; "
+ "recycling",
self.connection,
)
recycle = True
elif self._soft_invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to local soft invalidation; "
+ "recycling",
self.connection,
)
recycle = True
if recycle:
self.__close()
self.info.clear()
self.__connect()
return self.connection
def __close(self):
self.finalize_callback.clear()
if self.__pool.dispatch.close:
self.__pool.dispatch.close(self.connection, self)
self.__pool._close_connection(self.connection)
self.connection = None
def __connect(self, first_connect_check=False):
pool = self.__pool
# ensure any existing connection is removed, so that if
# creator fails, this attribute stays None
self.connection = None
try:
self.starttime = time.time()
connection = pool._invoke_creator(self)
pool.logger.debug("Created new connection %r", connection)
self.connection = connection
except Exception as e:
with util.safe_reraise():
pool.logger.debug("Error on connect(): %s", e)
else:
if first_connect_check:
pool.dispatch.first_connect.for_modify(
pool.dispatch
).exec_once_unless_exception(self.connection, self)
if pool.dispatch.connect:
pool.dispatch.connect(self.connection, self)
def _finalize_fairy(
connection, connection_record, pool, ref, echo, fairy=None
):
"""Cleanup for a :class:`._ConnectionFairy` whether or not it's already
been garbage collected.
"""
_refs.discard(connection_record)
if ref is not None:
if connection_record.fairy_ref is not ref:
return
assert connection is None
connection = connection_record.connection
if connection is not None:
if connection_record and echo:
pool.logger.debug(
"Connection %r being returned to pool", connection
)
try:
fairy = fairy or _ConnectionFairy(
connection, connection_record, echo
)
assert fairy.connection is connection
fairy._reset(pool)
# Immediately close detached instances
if not connection_record:
if pool.dispatch.close_detached:
pool.dispatch.close_detached(connection)
pool._close_connection(connection)
except BaseException as e:
pool.logger.error(
"Exception during reset or similar", exc_info=True
)
if connection_record:
connection_record.invalidate(e=e)
if not isinstance(e, Exception):
raise
if connection_record and connection_record.fairy_ref is not None:
connection_record.checkin()
_refs = set()
class _ConnectionFairy(object):
"""Proxies a DBAPI connection and provides return-on-dereference
support.
This is an internal object used by the :class:`_pool.Pool` implementation
to provide context management to a DBAPI connection delivered by
that :class:`_pool.Pool`.
The name "fairy" is inspired by the fact that the
:class:`._ConnectionFairy` object's lifespan is transitory, as it lasts
only for the length of a specific DBAPI connection being checked out from
the pool, and additionally that as a transparent proxy, it is mostly
invisible.
.. seealso::
:class:`._ConnectionRecord`
"""
def __init__(self, dbapi_connection, connection_record, echo):
self.connection = dbapi_connection
self._connection_record = connection_record
self._echo = echo
connection = None
"""A reference to the actual DBAPI connection being tracked."""
_connection_record = None
"""A reference to the :class:`._ConnectionRecord` object associated
with the DBAPI connection.
This is currently an internal accessor which is subject to change.
"""
_reset_agent = None
"""Refer to an object with a ``.commit()`` and ``.rollback()`` method;
if non-None, the "reset-on-return" feature will call upon this object
rather than directly against the dialect-level do_rollback() and
do_commit() methods.
In practice, a :class:`_engine.Connection` assigns a :class:`.Transaction`
object
to this variable when one is in scope so that the :class:`.Transaction`
takes the job of committing or rolling back on return if
:meth:`_engine.Connection.close` is called while the :class:`.Transaction`
still exists.
This is essentially an "event handler" of sorts but is simplified as an
instance variable both for performance/simplicity as well as that there
can only be one "reset agent" at a time.
"""
@classmethod
def _checkout(cls, pool, threadconns=None, fairy=None):
if not fairy:
fairy = _ConnectionRecord.checkout(pool)
fairy._pool = pool
fairy._counter = 0
if threadconns is not None:
threadconns.current = weakref.ref(fairy)
if fairy.connection is None:
raise exc.InvalidRequestError("This connection is closed")
fairy._counter += 1
if (
not pool.dispatch.checkout and not pool._pre_ping
) or fairy._counter != 1:
return fairy
# Pool listeners can trigger a reconnection on checkout, as well
# as the pre-pinger.
# there are three attempts made here, but note that if the database
# is not accessible from a connection standpoint, those won't proceed
# here.
attempts = 2
while attempts > 0:
try:
if pool._pre_ping:
if fairy._echo:
pool.logger.debug(
"Pool pre-ping on connection %s", fairy.connection
)
result = pool._dialect.do_ping(fairy.connection)
if not result:
if fairy._echo:
pool.logger.debug(
"Pool pre-ping on connection %s failed, "
"will invalidate pool",
fairy.connection,
)
raise exc.InvalidatePoolError()
pool.dispatch.checkout(
fairy.connection, fairy._connection_record, fairy
)
return fairy
except exc.DisconnectionError as e:
if e.invalidate_pool:
pool.logger.info(
"Disconnection detected on checkout, "
"invalidating all pooled connections prior to "
"current timestamp (reason: %r)",
e,
)
fairy._connection_record.invalidate(e)
pool._invalidate(fairy, e, _checkin=False)
else:
pool.logger.info(
"Disconnection detected on checkout, "
"invalidating individual connection %s (reason: %r)",
fairy.connection,
e,
)
fairy._connection_record.invalidate(e)
try:
fairy.connection = (
fairy._connection_record.get_connection()
)
except Exception as err:
with util.safe_reraise():
fairy._connection_record._checkin_failed(err)
attempts -= 1
pool.logger.info("Reconnection attempts exhausted on checkout")
fairy.invalidate()
raise exc.InvalidRequestError("This connection is closed")
def _checkout_existing(self):
return _ConnectionFairy._checkout(self._pool, fairy=self)
def _checkin(self):
_finalize_fairy(
self.connection,
self._connection_record,
self._pool,
None,
self._echo,
fairy=self,
)
self.connection = None
self._connection_record = None
_close = _checkin
def _reset(self, pool):
if pool.dispatch.reset:
pool.dispatch.reset(self, self._connection_record)
if pool._reset_on_return is reset_rollback:
if self._echo:
pool.logger.debug(
"Connection %s rollback-on-return%s",
self.connection,
", via agent" if self._reset_agent else "",
)
if self._reset_agent:
if not self._reset_agent.is_active:
util.warn(
"Reset agent is not active. "
"This should not occur unless there was already "
"a connectivity error in progress."
)
pool._dialect.do_rollback(self)
else:
self._reset_agent.rollback()
else:
pool._dialect.do_rollback(self)
elif pool._reset_on_return is reset_commit:
if self._echo:
pool.logger.debug(
"Connection %s commit-on-return%s",
self.connection,
", via agent" if self._reset_agent else "",
)
if self._reset_agent:
if not self._reset_agent.is_active:
util.warn(
"Reset agent is not active. "
"This should not occur unless there was already "
"a connectivity error in progress."
)
pool._dialect.do_commit(self)
else:
self._reset_agent.commit()
else:
pool._dialect.do_commit(self)
@property
def _logger(self):
return self._pool.logger
@property
def is_valid(self):
"""Return True if this :class:`._ConnectionFairy` still refers
to an active DBAPI connection."""
return self.connection is not None
@util.memoized_property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.ConnectionFairy`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`._ConnectionFairy`. It is shared
with the :attr:`._ConnectionRecord.info` and
:attr:`_engine.Connection.info`
accessors.
The dictionary associated with a particular DBAPI connection is
discarded when the connection itself is discarded.
"""
return self._connection_record.info
@property
def record_info(self):
"""Info dictionary associated with the :class:`._ConnectionRecord
container referred to by this :class:`.ConnectionFairy`.
Unlike the :attr:`._ConnectionFairy.info` dictionary, the lifespan
of this dictionary is persistent across connections that are
disconnected and/or invalidated within the lifespan of a
:class:`._ConnectionRecord`.
.. versionadded:: 1.1
"""
if self._connection_record:
return self._connection_record.record_info
else:
return None
def invalidate(self, e=None, soft=False):
"""Mark this connection as invalidated.
This method can be called directly, and is also called as a result
of the :meth:`_engine.Connection.invalidate` method. When invoked,
the DBAPI connection is immediately closed and discarded from
further use by the pool. The invalidation mechanism proceeds
via the :meth:`._ConnectionRecord.invalidate` internal method.
:param e: an exception object indicating a reason for the invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.connection is None:
util.warn("Can't invalidate an already-closed connection.")
return
if self._connection_record:
self._connection_record.invalidate(e=e, soft=soft)
if not soft:
self.connection = None
self._checkin()
def cursor(self, *args, **kwargs):
"""Return a new DBAPI cursor for the underlying connection.
This method is a proxy for the ``connection.cursor()`` DBAPI
method.
"""
return self.connection.cursor(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.connection, key)
def detach(self):
"""Separate this connection from its Pool.
This means that the connection will no longer be returned to the
pool when closed, and will instead be literally closed. The
containing ConnectionRecord is separated from the DB-API connection,
and will create a new connection when next used.
Note that any overall connection limiting constraints imposed by a
Pool implementation may be violated after a detach, as the detached
connection is removed from the pool's knowledge and control.
"""
if self._connection_record is not None:
rec = self._connection_record
_refs.remove(rec)
rec.fairy_ref = None
rec.connection = None
# TODO: should this be _return_conn?
self._pool._do_return_conn(self._connection_record)
self.info = self.info.copy()
self._connection_record = None
if self._pool.dispatch.detach:
self._pool.dispatch.detach(self.connection, rec)
def close(self):
self._counter -= 1
if self._counter == 0:
self._checkin()
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/event/legacy.py
|
# event/legacy.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to handle adaption of legacy call signatures,
generation of deprecation notes and docstrings.
"""
from .. import util
def _legacy_signature(since, argnames, converter=None):
def leg(fn):
if not hasattr(fn, "_legacy_signatures"):
fn._legacy_signatures = []
fn._legacy_signatures.append((since, argnames, converter))
return fn
return leg
def _wrap_fn_for_legacy(dispatch_collection, fn, argspec):
for since, argnames, conv in dispatch_collection.legacy_signatures:
if argnames[-1] == "**kw":
has_kw = True
argnames = argnames[0:-1]
else:
has_kw = False
if len(argnames) == len(argspec.args) and has_kw is bool(
argspec.varkw
):
if conv:
assert not has_kw
def wrap_leg(*args):
return fn(*conv(*args))
else:
def wrap_leg(*args, **kw):
argdict = dict(zip(dispatch_collection.arg_names, args))
args = [argdict[name] for name in argnames]
if has_kw:
return fn(*args, **kw)
else:
return fn(*args)
return wrap_leg
else:
return fn
def _indent(text, indent):
return "\n".join(indent + line for line in text.split("\n"))
def _standard_listen_example(dispatch_collection, sample_target, fn):
example_kw_arg = _indent(
"\n".join(
"%(arg)s = kw['%(arg)s']" % {"arg": arg}
for arg in dispatch_collection.arg_names[0:2]
),
" ",
)
if dispatch_collection.legacy_signatures:
current_since = max(
since
for since, args, conv in dispatch_collection.legacy_signatures
)
else:
current_since = None
text = (
"from sqlalchemy import event\n\n"
"# standard decorator style%(current_since)s\n"
"@event.listens_for(%(sample_target)s, '%(event_name)s')\n"
"def receive_%(event_name)s("
"%(named_event_arguments)s%(has_kw_arguments)s):\n"
" \"listen for the '%(event_name)s' event\"\n"
"\n # ... (event handling logic) ...\n"
)
if len(dispatch_collection.arg_names) > 3:
text += (
"\n# named argument style (new in 0.9)\n"
"@event.listens_for("
"%(sample_target)s, '%(event_name)s', named=True)\n"
"def receive_%(event_name)s(**kw):\n"
" \"listen for the '%(event_name)s' event\"\n"
"%(example_kw_arg)s\n"
"\n # ... (event handling logic) ...\n"
)
text %= {
"current_since": " (arguments as of %s)" % current_since
if current_since
else "",
"event_name": fn.__name__,
"has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "",
"named_event_arguments": ", ".join(dispatch_collection.arg_names),
"example_kw_arg": example_kw_arg,
"sample_target": sample_target,
}
return text
def _legacy_listen_examples(dispatch_collection, sample_target, fn):
text = ""
for since, args, conv in dispatch_collection.legacy_signatures:
text += (
"\n# DEPRECATED calling style (pre-%(since)s, "
"will be removed in a future release)\n"
"@event.listens_for(%(sample_target)s, '%(event_name)s')\n"
"def receive_%(event_name)s("
"%(named_event_arguments)s%(has_kw_arguments)s):\n"
" \"listen for the '%(event_name)s' event\"\n"
"\n # ... (event handling logic) ...\n"
% {
"since": since,
"event_name": fn.__name__,
"has_kw_arguments": " **kw"
if dispatch_collection.has_kw
else "",
"named_event_arguments": ", ".join(args),
"sample_target": sample_target,
}
)
return text
def _version_signature_changes(parent_dispatch_cls, dispatch_collection):
since, args, conv = dispatch_collection.legacy_signatures[0]
return (
"\n.. deprecated:: %(since)s\n"
" The :class:`.%(clsname)s.%(event_name)s` event now accepts the \n"
" arguments ``%(named_event_arguments)s%(has_kw_arguments)s``.\n"
" Support for listener functions which accept the previous \n"
' argument signature(s) listed above as "deprecated" will be \n'
" removed in a future release."
% {
"since": since,
"clsname": parent_dispatch_cls.__name__,
"event_name": dispatch_collection.name,
"named_event_arguments": ", ".join(dispatch_collection.arg_names),
"has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "",
}
)
def _augment_fn_docs(dispatch_collection, parent_dispatch_cls, fn):
header = (
".. container:: event_signatures\n\n"
" Example argument forms::\n"
"\n"
)
sample_target = getattr(parent_dispatch_cls, "_target_class_doc", "obj")
text = header + _indent(
_standard_listen_example(dispatch_collection, sample_target, fn),
" " * 8,
)
if dispatch_collection.legacy_signatures:
text += _indent(
_legacy_listen_examples(dispatch_collection, sample_target, fn),
" " * 8,
)
text += _version_signature_changes(
parent_dispatch_cls, dispatch_collection
)
return util.inject_docstring_text(fn.__doc__, text, 1)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/event/registry.py
|
# event/registry.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides managed registration services on behalf of :func:`.listen`
arguments.
By "managed registration", we mean that event listening functions and
other objects can be added to various collections in such a way that their
membership in all those collections can be revoked at once, based on
an equivalent :class:`._EventKey`.
"""
from __future__ import absolute_import
import collections
import types
import weakref
from .. import exc
from .. import util
_key_to_collection = collections.defaultdict(dict)
"""
Given an original listen() argument, can locate all
listener collections and the listener fn contained
(target, identifier, fn) -> {
ref(listenercollection) -> ref(listener_fn)
ref(listenercollection) -> ref(listener_fn)
ref(listenercollection) -> ref(listener_fn)
}
"""
_collection_to_key = collections.defaultdict(dict)
"""
Given a _ListenerCollection or _ClsLevelListener, can locate
all the original listen() arguments and the listener fn contained
ref(listenercollection) -> {
ref(listener_fn) -> (target, identifier, fn),
ref(listener_fn) -> (target, identifier, fn),
ref(listener_fn) -> (target, identifier, fn),
}
"""
def _collection_gced(ref):
# defaultdict, so can't get a KeyError
if not _collection_to_key or ref not in _collection_to_key:
return
listener_to_key = _collection_to_key.pop(ref)
for key in listener_to_key.values():
if key in _key_to_collection:
# defaultdict, so can't get a KeyError
dispatch_reg = _key_to_collection[key]
dispatch_reg.pop(ref)
if not dispatch_reg:
_key_to_collection.pop(key)
def _stored_in_collection(event_key, owner):
key = event_key._key
dispatch_reg = _key_to_collection[key]
owner_ref = owner.ref
listen_ref = weakref.ref(event_key._listen_fn)
if owner_ref in dispatch_reg:
return False
dispatch_reg[owner_ref] = listen_ref
listener_to_key = _collection_to_key[owner_ref]
listener_to_key[listen_ref] = key
return True
def _removed_from_collection(event_key, owner):
key = event_key._key
dispatch_reg = _key_to_collection[key]
listen_ref = weakref.ref(event_key._listen_fn)
owner_ref = owner.ref
dispatch_reg.pop(owner_ref, None)
if not dispatch_reg:
del _key_to_collection[key]
if owner_ref in _collection_to_key:
listener_to_key = _collection_to_key[owner_ref]
listener_to_key.pop(listen_ref)
def _stored_in_collection_multi(newowner, oldowner, elements):
if not elements:
return
oldowner = oldowner.ref
newowner = newowner.ref
old_listener_to_key = _collection_to_key[oldowner]
new_listener_to_key = _collection_to_key[newowner]
for listen_fn in elements:
listen_ref = weakref.ref(listen_fn)
key = old_listener_to_key[listen_ref]
dispatch_reg = _key_to_collection[key]
if newowner in dispatch_reg:
assert dispatch_reg[newowner] == listen_ref
else:
dispatch_reg[newowner] = listen_ref
new_listener_to_key[listen_ref] = key
def _clear(owner, elements):
if not elements:
return
owner = owner.ref
listener_to_key = _collection_to_key[owner]
for listen_fn in elements:
listen_ref = weakref.ref(listen_fn)
key = listener_to_key[listen_ref]
dispatch_reg = _key_to_collection[key]
dispatch_reg.pop(owner, None)
if not dispatch_reg:
del _key_to_collection[key]
class _EventKey(object):
"""Represent :func:`.listen` arguments.
"""
__slots__ = (
"target",
"identifier",
"fn",
"fn_key",
"fn_wrap",
"dispatch_target",
)
def __init__(self, target, identifier, fn, dispatch_target, _fn_wrap=None):
self.target = target
self.identifier = identifier
self.fn = fn
if isinstance(fn, types.MethodType):
self.fn_key = id(fn.__func__), id(fn.__self__)
else:
self.fn_key = id(fn)
self.fn_wrap = _fn_wrap
self.dispatch_target = dispatch_target
@property
def _key(self):
return (id(self.target), self.identifier, self.fn_key)
def with_wrapper(self, fn_wrap):
if fn_wrap is self._listen_fn:
return self
else:
return _EventKey(
self.target,
self.identifier,
self.fn,
self.dispatch_target,
_fn_wrap=fn_wrap,
)
def with_dispatch_target(self, dispatch_target):
if dispatch_target is self.dispatch_target:
return self
else:
return _EventKey(
self.target,
self.identifier,
self.fn,
dispatch_target,
_fn_wrap=self.fn_wrap,
)
def listen(self, *args, **kw):
once = kw.pop("once", False)
once_unless_exception = kw.pop("_once_unless_exception", False)
named = kw.pop("named", False)
target, identifier, fn = (
self.dispatch_target,
self.identifier,
self._listen_fn,
)
dispatch_collection = getattr(target.dispatch, identifier)
adjusted_fn = dispatch_collection._adjust_fn_spec(fn, named)
self = self.with_wrapper(adjusted_fn)
stub_function = getattr(
self.dispatch_target.dispatch._events, self.identifier
)
if hasattr(stub_function, "_sa_warn"):
stub_function._sa_warn()
if once or once_unless_exception:
self.with_wrapper(
util.only_once(
self._listen_fn, retry_on_exception=once_unless_exception
)
).listen(*args, **kw)
else:
self.dispatch_target.dispatch._listen(self, *args, **kw)
def remove(self):
key = self._key
if key not in _key_to_collection:
raise exc.InvalidRequestError(
"No listeners found for event %s / %r / %s "
% (self.target, self.identifier, self.fn)
)
dispatch_reg = _key_to_collection.pop(key)
for collection_ref, listener_ref in dispatch_reg.items():
collection = collection_ref()
listener_fn = listener_ref()
if collection is not None and listener_fn is not None:
collection.remove(self.with_wrapper(listener_fn))
def contains(self):
"""Return True if this event key is registered to listen.
"""
return self._key in _key_to_collection
def base_listen(
self, propagate=False, insert=False, named=False, retval=None
):
target, identifier = self.dispatch_target, self.identifier
dispatch_collection = getattr(target.dispatch, identifier)
if insert:
dispatch_collection.for_modify(target.dispatch).insert(
self, propagate
)
else:
dispatch_collection.for_modify(target.dispatch).append(
self, propagate
)
@property
def _listen_fn(self):
return self.fn_wrap or self.fn
def append_to_list(self, owner, list_):
if _stored_in_collection(self, owner):
list_.append(self._listen_fn)
return True
else:
return False
def remove_from_list(self, owner, list_):
_removed_from_collection(self, owner)
list_.remove(self._listen_fn)
def prepend_to_list(self, owner, list_):
if _stored_in_collection(self, owner):
list_.appendleft(self._listen_fn)
return True
else:
return False
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/event/__init__.py
|
# event/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .api import CANCEL # noqa
from .api import contains # noqa
from .api import listen # noqa
from .api import listens_for # noqa
from .api import NO_RETVAL # noqa
from .api import remove # noqa
from .attr import RefCollection # noqa
from .base import dispatcher # noqa
from .base import Events # noqa
from .legacy import _legacy_signature # noqa
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/event/api.py
|
# event/api.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Public API functions for the event system.
"""
from __future__ import absolute_import
from .base import _registrars
from .registry import _EventKey
from .. import exc
from .. import util
CANCEL = util.symbol("CANCEL")
NO_RETVAL = util.symbol("NO_RETVAL")
def _event_key(target, identifier, fn):
for evt_cls in _registrars[identifier]:
tgt = evt_cls._accept_with(target)
if tgt is not None:
return _EventKey(target, identifier, fn, tgt)
else:
raise exc.InvalidRequestError(
"No such event '%s' for target '%s'" % (identifier, target)
)
def listen(target, identifier, fn, *args, **kw):
"""Register a listener function for the given target.
The :func:`.listen` function is part of the primary interface for the
SQLAlchemy event system, documented at :ref:`event_toplevel`.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
event.listen(
UniqueConstraint,
"after_parent_attach",
unique_constraint_name)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
def on_config():
do_config()
event.listen(Mapper, "before_configure", on_config, once=True)
.. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
.. warning:: The ``once`` argument does not imply automatic de-registration
of the listener function after it has been invoked a first time; a
listener entry will remain associated with the target object.
Associating an arbitrarily high number of listeners without explictitly
removing them will cause memory to grow unbounded even if ``once=True``
is specified.
.. note::
The :func:`.listen` function cannot be called at the same time
that the target event is being run. This has implications
for thread safety, and also means an event cannot be added
from inside the listener function for itself. The list of
events to be run are present inside of a mutable collection
that can't be changed during iteration.
Event registration and removal is not intended to be a "high
velocity" operation; it is a configurational operation. For
systems that need to quickly associate and deassociate with
events at high scale, use a mutable structure that is handled
from inside of a single listener.
.. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now
used as the container for the list of events, which explicitly
disallows collection mutation while the collection is being
iterated.
.. seealso::
:func:`.listens_for`
:func:`.remove`
"""
_event_key(target, identifier, fn).listen(*args, **kw)
def listens_for(target, identifier, *args, **kw):
"""Decorate a function as a listener for the given target + identifier.
The :func:`.listens_for` decorator is part of the primary interface for the
SQLAlchemy event system, documented at :ref:`event_toplevel`.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
@event.listens_for(UniqueConstraint, "after_parent_attach")
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
@event.listens_for(Mapper, "before_configure", once=True)
def on_config():
do_config()
.. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
.. warning:: The ``once`` argument does not imply automatic de-registration
of the listener function after it has been invoked a first time; a
listener entry will remain associated with the target object.
Associating an arbitrarily high number of listeners without explictitly
removing them will cause memory to grow unbounded even if ``once=True``
is specified.
.. seealso::
:func:`.listen` - general description of event listening
"""
def decorate(fn):
listen(target, identifier, fn, *args, **kw)
return fn
return decorate
def remove(target, identifier, fn):
"""Remove an event listener.
The arguments here should match exactly those which were sent to
:func:`.listen`; all the event registration which proceeded as a result
of this call will be reverted by calling :func:`.remove` with the same
arguments.
e.g.::
# if a function was registered like this...
@event.listens_for(SomeMappedClass, "before_insert", propagate=True)
def my_listener_function(*arg):
pass
# ... it's removed like this
event.remove(SomeMappedClass, "before_insert", my_listener_function)
Above, the listener function associated with ``SomeMappedClass`` was also
propagated to subclasses of ``SomeMappedClass``; the :func:`.remove`
function will revert all of these operations.
.. versionadded:: 0.9.0
.. note::
The :func:`.remove` function cannot be called at the same time
that the target event is being run. This has implications
for thread safety, and also means an event cannot be removed
from inside the listener function for itself. The list of
events to be run are present inside of a mutable collection
that can't be changed during iteration.
Event registration and removal is not intended to be a "high
velocity" operation; it is a configurational operation. For
systems that need to quickly associate and deassociate with
events at high scale, use a mutable structure that is handled
from inside of a single listener.
.. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now
used as the container for the list of events, which explicitly
disallows collection mutation while the collection is being
iterated.
.. seealso::
:func:`.listen`
"""
_event_key(target, identifier, fn).remove()
def contains(target, identifier, fn):
"""Return True if the given target/ident/fn is set up to listen.
.. versionadded:: 0.9.0
"""
return _event_key(target, identifier, fn).contains()
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/event/attr.py
|
# event/attr.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Attribute implementation for _Dispatch classes.
The various listener targets for a particular event class are represented
as attributes, which refer to collections of listeners to be fired off.
These collections can exist at the class level as well as at the instance
level. An event is fired off using code like this::
some_object.dispatch.first_connect(arg1, arg2)
Above, ``some_object.dispatch`` would be an instance of ``_Dispatch`` and
``first_connect`` is typically an instance of ``_ListenerCollection``
if event listeners are present, or ``_EmptyListener`` if none are present.
The attribute mechanics here spend effort trying to ensure listener functions
are available with a minimum of function call overhead, that unnecessary
objects aren't created (i.e. many empty per-instance listener collections),
as well as that everything is garbage collectable when owning references are
lost. Other features such as "propagation" of listener functions across
many ``_Dispatch`` instances, "joining" of multiple ``_Dispatch`` instances,
as well as support for subclass propagation (e.g. events assigned to
``Pool`` vs. ``QueuePool``) are all implemented here.
"""
from __future__ import absolute_import
from __future__ import with_statement
import collections
from itertools import chain
import weakref
from . import legacy
from . import registry
from .. import exc
from .. import util
from ..util import threading
class RefCollection(util.MemoizedSlots):
__slots__ = ("ref",)
def _memoized_attr_ref(self):
return weakref.ref(self, registry._collection_gced)
class _empty_collection(object):
def append(self, element):
pass
def extend(self, other):
pass
def remove(self, element):
pass
def __iter__(self):
return iter([])
def clear(self):
pass
class _ClsLevelDispatch(RefCollection):
"""Class-level events on :class:`._Dispatch` classes."""
__slots__ = (
"name",
"arg_names",
"has_kw",
"legacy_signatures",
"_clslevel",
"__weakref__",
)
def __init__(self, parent_dispatch_cls, fn):
self.name = fn.__name__
argspec = util.inspect_getfullargspec(fn)
self.arg_names = argspec.args[1:]
self.has_kw = bool(argspec.varkw)
self.legacy_signatures = list(
reversed(
sorted(
getattr(fn, "_legacy_signatures", []), key=lambda s: s[0]
)
)
)
fn.__doc__ = legacy._augment_fn_docs(self, parent_dispatch_cls, fn)
self._clslevel = weakref.WeakKeyDictionary()
def _adjust_fn_spec(self, fn, named):
if named:
fn = self._wrap_fn_for_kw(fn)
if self.legacy_signatures:
try:
argspec = util.get_callable_argspec(fn, no_self=True)
except TypeError:
pass
else:
fn = legacy._wrap_fn_for_legacy(self, fn, argspec)
return fn
def _wrap_fn_for_kw(self, fn):
def wrap_kw(*args, **kw):
argdict = dict(zip(self.arg_names, args))
argdict.update(kw)
return fn(**argdict)
return wrap_kw
def insert(self, event_key, propagate):
target = event_key.dispatch_target
assert isinstance(
target, type
), "Class-level Event targets must be classes."
if not getattr(target, "_sa_propagate_class_events", True):
raise exc.InvalidRequestError(
"Can't assign an event directly to the %s class" % target
)
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._assign_cls_collection(cls)
self._clslevel[cls].appendleft(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def append(self, event_key, propagate):
target = event_key.dispatch_target
assert isinstance(
target, type
), "Class-level Event targets must be classes."
if not getattr(target, "_sa_propagate_class_events", True):
raise exc.InvalidRequestError(
"Can't assign an event directly to the %s class" % target
)
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._assign_cls_collection(cls)
self._clslevel[cls].append(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def _assign_cls_collection(self, target):
if getattr(target, "_sa_propagate_class_events", True):
self._clslevel[target] = collections.deque()
else:
self._clslevel[target] = _empty_collection()
def update_subclass(self, target):
if target not in self._clslevel:
self._assign_cls_collection(target)
clslevel = self._clslevel[target]
for cls in target.__mro__[1:]:
if cls in self._clslevel:
clslevel.extend(
[fn for fn in self._clslevel[cls] if fn not in clslevel]
)
def remove(self, event_key):
target = event_key.dispatch_target
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls in self._clslevel:
self._clslevel[cls].remove(event_key._listen_fn)
registry._removed_from_collection(event_key, self)
def clear(self):
"""Clear all class level listeners"""
to_clear = set()
for dispatcher in self._clslevel.values():
to_clear.update(dispatcher)
dispatcher.clear()
registry._clear(self, to_clear)
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _ClsLevelDispatch at the class level of
a dispatcher, this returns self.
"""
return self
class _InstanceLevelDispatch(RefCollection):
__slots__ = ()
def _adjust_fn_spec(self, fn, named):
return self.parent._adjust_fn_spec(fn, named)
class _EmptyListener(_InstanceLevelDispatch):
"""Serves as a proxy interface to the events
served by a _ClsLevelDispatch, when there are no
instance-level events present.
Is replaced by _ListenerCollection when instance-level
events are added.
"""
propagate = frozenset()
listeners = ()
__slots__ = "parent", "parent_listeners", "name"
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
self.parent = parent # _ClsLevelDispatch
self.parent_listeners = parent._clslevel[target_cls]
self.name = parent.name
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _EmptyListener at the instance level of
a dispatcher, this generates a new
_ListenerCollection, applies it to the instance,
and returns it.
"""
result = _ListenerCollection(self.parent, obj._instance_cls)
if getattr(obj, self.name) is self:
setattr(obj, self.name, result)
else:
assert isinstance(getattr(obj, self.name), _JoinedListener)
return result
def _needs_modify(self, *args, **kw):
raise NotImplementedError("need to call for_modify()")
exec_once = (
exec_once_unless_exception
) = insert = append = remove = clear = _needs_modify
def __call__(self, *args, **kw):
"""Execute this event."""
for fn in self.parent_listeners:
fn(*args, **kw)
def __len__(self):
return len(self.parent_listeners)
def __iter__(self):
return iter(self.parent_listeners)
def __bool__(self):
return bool(self.parent_listeners)
__nonzero__ = __bool__
class _CompoundListener(_InstanceLevelDispatch):
__slots__ = "_exec_once_mutex", "_exec_once"
def _memoized_attr__exec_once_mutex(self):
return threading.Lock()
def _exec_once_impl(self, retry_on_exception, *args, **kw):
with self._exec_once_mutex:
if not self._exec_once:
try:
self(*args, **kw)
exception = False
except:
exception = True
raise
finally:
if not exception or not retry_on_exception:
self._exec_once = True
def exec_once(self, *args, **kw):
"""Execute this event, but only if it has not been
executed already for this collection."""
if not self._exec_once:
self._exec_once_impl(False, *args, **kw)
def exec_once_unless_exception(self, *args, **kw):
"""Execute this event, but only if it has not been
executed already for this collection, or was called
by a previous exec_once_unless_exception call and
raised an exception.
If exec_once was already called, then this method will never run
the callable regardless of whether it raised or not.
.. versionadded:: 1.3.8
"""
if not self._exec_once:
self._exec_once_impl(True, *args, **kw)
def __call__(self, *args, **kw):
"""Execute this event."""
for fn in self.parent_listeners:
fn(*args, **kw)
for fn in self.listeners:
fn(*args, **kw)
def __len__(self):
return len(self.parent_listeners) + len(self.listeners)
def __iter__(self):
return chain(self.parent_listeners, self.listeners)
def __bool__(self):
return bool(self.listeners or self.parent_listeners)
__nonzero__ = __bool__
class _ListenerCollection(_CompoundListener):
"""Instance-level attributes on instances of :class:`._Dispatch`.
Represents a collection of listeners.
As of 0.7.9, _ListenerCollection is only first
created via the _EmptyListener.for_modify() method.
"""
__slots__ = (
"parent_listeners",
"parent",
"name",
"listeners",
"propagate",
"__weakref__",
)
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
self._exec_once = False
self.parent_listeners = parent._clslevel[target_cls]
self.parent = parent
self.name = parent.name
self.listeners = collections.deque()
self.propagate = set()
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _ListenerCollection at the instance level of
a dispatcher, this returns self.
"""
return self
def _update(self, other, only_propagate=True):
"""Populate from the listeners in another :class:`_Dispatch`
object."""
existing_listeners = self.listeners
existing_listener_set = set(existing_listeners)
self.propagate.update(other.propagate)
other_listeners = [
l
for l in other.listeners
if l not in existing_listener_set
and not only_propagate
or l in self.propagate
]
existing_listeners.extend(other_listeners)
to_associate = other.propagate.union(other_listeners)
registry._stored_in_collection_multi(self, other, to_associate)
def insert(self, event_key, propagate):
if event_key.prepend_to_list(self, self.listeners):
if propagate:
self.propagate.add(event_key._listen_fn)
def append(self, event_key, propagate):
if event_key.append_to_list(self, self.listeners):
if propagate:
self.propagate.add(event_key._listen_fn)
def remove(self, event_key):
self.listeners.remove(event_key._listen_fn)
self.propagate.discard(event_key._listen_fn)
registry._removed_from_collection(event_key, self)
def clear(self):
registry._clear(self, self.listeners)
self.propagate.clear()
self.listeners.clear()
class _JoinedListener(_CompoundListener):
__slots__ = "parent", "name", "local", "parent_listeners"
def __init__(self, parent, name, local):
self._exec_once = False
self.parent = parent
self.name = name
self.local = local
self.parent_listeners = self.local
@property
def listeners(self):
return getattr(self.parent, self.name)
def _adjust_fn_spec(self, fn, named):
return self.local._adjust_fn_spec(fn, named)
def for_modify(self, obj):
self.local = self.parent_listeners = self.local.for_modify(obj)
return self
def insert(self, event_key, propagate):
self.local.insert(event_key, propagate)
def append(self, event_key, propagate):
self.local.append(event_key, propagate)
def remove(self, event_key):
self.local.remove(event_key)
def clear(self):
raise NotImplementedError()
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/event/base.py
|
# event/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base implementation classes.
The public-facing ``Events`` serves as the base class for an event interface;
its public attributes represent different kinds of events. These attributes
are mirrored onto a ``_Dispatch`` class, which serves as a container for
collections of listener functions. These collections are represented both
at the class level of a particular ``_Dispatch`` class as well as within
instances of ``_Dispatch``.
"""
from __future__ import absolute_import
import weakref
from .attr import _ClsLevelDispatch
from .attr import _EmptyListener
from .attr import _JoinedListener
from .. import util
_registrars = util.defaultdict(list)
def _is_event_name(name):
return not name.startswith("_") and name != "dispatch"
class _UnpickleDispatch(object):
"""Serializable callable that re-generates an instance of
:class:`_Dispatch` given a particular :class:`.Events` subclass.
"""
def __call__(self, _instance_cls):
for cls in _instance_cls.__mro__:
if "dispatch" in cls.__dict__:
return cls.__dict__["dispatch"].dispatch._for_class(
_instance_cls
)
else:
raise AttributeError("No class with a 'dispatch' member present.")
class _Dispatch(object):
"""Mirror the event listening definitions of an Events class with
listener collections.
Classes which define a "dispatch" member will return a
non-instantiated :class:`._Dispatch` subclass when the member
is accessed at the class level. When the "dispatch" member is
accessed at the instance level of its owner, an instance
of the :class:`._Dispatch` class is returned.
A :class:`._Dispatch` class is generated for each :class:`.Events`
class defined, by the :func:`._create_dispatcher_class` function.
The original :class:`.Events` classes remain untouched.
This decouples the construction of :class:`.Events` subclasses from
the implementation used by the event internals, and allows
inspecting tools like Sphinx to work in an unsurprising
way against the public API.
"""
# In one ORM edge case, an attribute is added to _Dispatch,
# so __dict__ is used in just that case and potentially others.
__slots__ = "_parent", "_instance_cls", "__dict__", "_empty_listeners"
_empty_listener_reg = weakref.WeakKeyDictionary()
def __init__(self, parent, instance_cls=None):
self._parent = parent
self._instance_cls = instance_cls
if instance_cls:
try:
self._empty_listeners = self._empty_listener_reg[instance_cls]
except KeyError:
self._empty_listeners = self._empty_listener_reg[
instance_cls
] = {
ls.name: _EmptyListener(ls, instance_cls)
for ls in parent._event_descriptors
}
else:
self._empty_listeners = {}
def __getattr__(self, name):
# Assign EmptyListeners as attributes on demand
# to reduce startup time for new dispatch objects.
try:
ls = self._empty_listeners[name]
except KeyError:
raise AttributeError(name)
else:
setattr(self, ls.name, ls)
return ls
@property
def _event_descriptors(self):
for k in self._event_names:
# Yield _ClsLevelDispatch related
# to relevant event name.
yield getattr(self, k)
@property
def _listen(self):
return self._events._listen
def _for_class(self, instance_cls):
return self.__class__(self, instance_cls)
def _for_instance(self, instance):
instance_cls = instance.__class__
return self._for_class(instance_cls)
def _join(self, other):
"""Create a 'join' of this :class:`._Dispatch` and another.
This new dispatcher will dispatch events to both
:class:`._Dispatch` objects.
"""
if "_joined_dispatch_cls" not in self.__class__.__dict__:
cls = type(
"Joined%s" % self.__class__.__name__,
(_JoinedDispatcher,),
{"__slots__": self._event_names},
)
self.__class__._joined_dispatch_cls = cls
return self._joined_dispatch_cls(self, other)
def __reduce__(self):
return _UnpickleDispatch(), (self._instance_cls,)
def _update(self, other, only_propagate=True):
"""Populate from the listeners in another :class:`_Dispatch`
object."""
for ls in other._event_descriptors:
if isinstance(ls, _EmptyListener):
continue
getattr(self, ls.name).for_modify(self)._update(
ls, only_propagate=only_propagate
)
def _clear(self):
for ls in self._event_descriptors:
ls.for_modify(self).clear()
class _EventMeta(type):
"""Intercept new Event subclasses and create
associated _Dispatch classes."""
def __init__(cls, classname, bases, dict_):
_create_dispatcher_class(cls, classname, bases, dict_)
type.__init__(cls, classname, bases, dict_)
def _create_dispatcher_class(cls, classname, bases, dict_):
"""Create a :class:`._Dispatch` class corresponding to an
:class:`.Events` class."""
# there's all kinds of ways to do this,
# i.e. make a Dispatch class that shares the '_listen' method
# of the Event class, this is the straight monkeypatch.
if hasattr(cls, "dispatch"):
dispatch_base = cls.dispatch.__class__
else:
dispatch_base = _Dispatch
event_names = [k for k in dict_ if _is_event_name(k)]
dispatch_cls = type(
"%sDispatch" % classname, (dispatch_base,), {"__slots__": event_names}
)
dispatch_cls._event_names = event_names
dispatch_inst = cls._set_dispatch(cls, dispatch_cls)
for k in dispatch_cls._event_names:
setattr(dispatch_inst, k, _ClsLevelDispatch(cls, dict_[k]))
_registrars[k].append(cls)
for super_ in dispatch_cls.__bases__:
if issubclass(super_, _Dispatch) and super_ is not _Dispatch:
for ls in super_._events.dispatch._event_descriptors:
setattr(dispatch_inst, ls.name, ls)
dispatch_cls._event_names.append(ls.name)
if getattr(cls, "_dispatch_target", None):
cls._dispatch_target.dispatch = dispatcher(cls)
def _remove_dispatcher(cls):
for k in cls.dispatch._event_names:
_registrars[k].remove(cls)
if not _registrars[k]:
del _registrars[k]
class Events(util.with_metaclass(_EventMeta, object)):
"""Define event listening functions for a particular target type."""
@staticmethod
def _set_dispatch(cls, dispatch_cls):
# This allows an Events subclass to define additional utility
# methods made available to the target via
# "self.dispatch._events.<utilitymethod>"
# @staticemethod to allow easy "super" calls while in a metaclass
# constructor.
cls.dispatch = dispatch_cls(None)
dispatch_cls._events = cls
return cls.dispatch
@classmethod
def _accept_with(cls, target):
def dispatch_is(*types):
return all(isinstance(target.dispatch, t) for t in types)
def dispatch_parent_is(t):
return isinstance(target.dispatch.parent, t)
# Mapper, ClassManager, Session override this to
# also accept classes, scoped_sessions, sessionmakers, etc.
if hasattr(target, "dispatch"):
if (
dispatch_is(cls.dispatch.__class__)
or dispatch_is(type, cls.dispatch.__class__)
or (
dispatch_is(_JoinedDispatcher)
and dispatch_parent_is(cls.dispatch.__class__)
)
):
return target
@classmethod
def _listen(cls, event_key, propagate=False, insert=False, named=False):
event_key.base_listen(propagate=propagate, insert=insert, named=named)
@classmethod
def _remove(cls, event_key):
event_key.remove()
@classmethod
def _clear(cls):
cls.dispatch._clear()
class _JoinedDispatcher(object):
"""Represent a connection between two _Dispatch objects."""
__slots__ = "local", "parent", "_instance_cls"
def __init__(self, local, parent):
self.local = local
self.parent = parent
self._instance_cls = self.local._instance_cls
def __getattr__(self, name):
# Assign _JoinedListeners as attributes on demand
# to reduce startup time for new dispatch objects.
ls = getattr(self.local, name)
jl = _JoinedListener(self.parent, ls.name, ls)
setattr(self, ls.name, jl)
return jl
@property
def _listen(self):
return self.parent._listen
@property
def _events(self):
return self.parent._events
class dispatcher(object):
"""Descriptor used by target classes to
deliver the _Dispatch class at the class level
and produce new _Dispatch instances for target
instances.
"""
def __init__(self, events):
self.dispatch = events.dispatch
self.events = events
def __get__(self, obj, cls):
if obj is None:
return self.dispatch
obj.__dict__["dispatch"] = disp = self.dispatch._for_instance(obj)
return disp
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/functions.py
|
# sql/functions.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL function API, factories, and built-in functions.
"""
from . import annotation
from . import operators
from . import schema
from . import sqltypes
from . import util as sqlutil
from .base import ColumnCollection
from .base import Executable
from .elements import _clone
from .elements import _literal_as_binds
from .elements import _type_from_args
from .elements import BinaryExpression
from .elements import BindParameter
from .elements import Cast
from .elements import ClauseList
from .elements import ColumnElement
from .elements import Extract
from .elements import FunctionFilter
from .elements import Grouping
from .elements import literal_column
from .elements import Over
from .elements import WithinGroup
from .selectable import Alias
from .selectable import FromClause
from .selectable import Select
from .visitors import VisitableType
from .. import util
_registry = util.defaultdict(dict)
_case_sensitive_registry = util.defaultdict(lambda: util.defaultdict(dict))
_CASE_SENSITIVE = util.symbol(
name="case_sensitive_function",
doc="Symbol to mark the functions that are switched into case-sensitive "
"mode.",
)
def register_function(identifier, fn, package="_default"):
"""Associate a callable with a particular func. name.
This is normally called by _GenericMeta, but is also
available by itself so that a non-Function construct
can be associated with the :data:`.func` accessor (i.e.
CAST, EXTRACT).
"""
reg = _registry[package]
case_sensitive_reg = _case_sensitive_registry[package]
raw_identifier = identifier
identifier = util.text_type(identifier).lower()
# Check if a function with the same lowercase identifier is registered.
if identifier in reg and reg[identifier] is not _CASE_SENSITIVE:
if raw_identifier in case_sensitive_reg[identifier]:
util.warn(
"The GenericFunction '{}' is already registered and "
"is going to be overriden.".format(identifier)
)
reg[identifier] = fn
else:
# If a function with the same lowercase identifier is registered,
# then these 2 functions are considered as case-sensitive.
# Note: This case should raise an error in a later release.
util.warn_deprecated(
"GenericFunction '{}' is already registered with "
"different letter case, so the previously registered function "
"'{}' is switched into case-sensitive mode. "
"GenericFunction objects will be fully case-insensitive in a "
"future release.".format(
raw_identifier,
list(case_sensitive_reg[identifier].keys())[0],
)
)
reg[identifier] = _CASE_SENSITIVE
# Check if a function with different letter case identifier is registered.
elif identifier in case_sensitive_reg:
# Note: This case will be removed in a later release.
if raw_identifier not in case_sensitive_reg[identifier]:
util.warn_deprecated(
"GenericFunction(s) '{}' are already registered with "
"different letter cases and might interact with '{}'. "
"GenericFunction objects will be fully case-insensitive in a "
"future release.".format(
sorted(case_sensitive_reg[identifier].keys()),
raw_identifier,
)
)
else:
util.warn(
"The GenericFunction '{}' is already registered and "
"is going to be overriden.".format(raw_identifier)
)
# Register by default
else:
reg[identifier] = fn
# Always register in case-sensitive registry
case_sensitive_reg[identifier][raw_identifier] = fn
class FunctionElement(Executable, ColumnElement, FromClause):
"""Base for SQL function-oriented constructs.
.. seealso::
:ref:`coretutorial_functions` - in the Core tutorial
:class:`.Function` - named SQL function.
:data:`.func` - namespace which produces registered or ad-hoc
:class:`.Function` instances.
:class:`.GenericFunction` - allows creation of registered function
types.
"""
packagenames = ()
_has_args = False
def __init__(self, *clauses, **kwargs):
r"""Construct a :class:`.FunctionElement`.
:param \*clauses: list of column expressions that form the arguments
of the SQL function call.
:param \**kwargs: additional kwargs are typically consumed by
subclasses.
.. seealso::
:data:`.func`
:class:`.Function`
"""
args = [_literal_as_binds(c, self.name) for c in clauses]
self._has_args = self._has_args or bool(args)
self.clause_expr = ClauseList(
operator=operators.comma_op, group_contents=True, *args
).self_group()
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_function(self, multiparams, params)
@property
def columns(self):
"""The set of columns exported by this :class:`.FunctionElement`.
Function objects currently have no result column names built in;
this method returns a single-element column collection with
an anonymously named column.
An interim approach to providing named columns for a function
as a FROM clause is to build a :func:`_expression.select` with the
desired columns::
from sqlalchemy.sql import column
stmt = select([column('x'), column('y')]).\
select_from(func.myfunction())
"""
return ColumnCollection(self.label(None))
@util.memoized_property
def clauses(self):
"""Return the underlying :class:`.ClauseList` which contains
the arguments for this :class:`.FunctionElement`.
"""
return self.clause_expr.element
def over(self, partition_by=None, order_by=None, rows=None, range_=None):
"""Produce an OVER clause against this function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
The expression::
func.row_number().over(order_by='x')
is shorthand for::
from sqlalchemy import over
over(func.row_number(), order_by='x')
See :func:`_expression.over` for a full description.
"""
return Over(
self,
partition_by=partition_by,
order_by=order_by,
rows=rows,
range_=range_,
)
def within_group(self, *order_by):
"""Produce a WITHIN GROUP (ORDER BY expr) clause against this function.
Used against so-called "ordered set aggregate" and "hypothetical
set aggregate" functions, including :class:`.percentile_cont`,
:class:`.rank`, :class:`.dense_rank`, etc.
See :func:`_expression.within_group` for a full description.
.. versionadded:: 1.1
"""
return WithinGroup(self, *order_by)
def filter(self, *criterion):
"""Produce a FILTER clause against this function.
Used against aggregate and window functions,
for database backends that support the "FILTER" clause.
The expression::
func.count(1).filter(True)
is shorthand for::
from sqlalchemy import funcfilter
funcfilter(func.count(1), True)
.. versionadded:: 1.0.0
.. seealso::
:class:`.FunctionFilter`
:func:`.funcfilter`
"""
if not criterion:
return self
return FunctionFilter(self, *criterion)
def as_comparison(self, left_index, right_index):
"""Interpret this expression as a boolean comparison between two values.
A hypothetical SQL function "is_equal()" which compares to values
for equality would be written in the Core expression language as::
expr = func.is_equal("a", "b")
If "is_equal()" above is comparing "a" and "b" for equality, the
:meth:`.FunctionElement.as_comparison` method would be invoked as::
expr = func.is_equal("a", "b").as_comparison(1, 2)
Where above, the integer value "1" refers to the first argument of the
"is_equal()" function and the integer value "2" refers to the second.
This would create a :class:`.BinaryExpression` that is equivalent to::
BinaryExpression("a", "b", operator=op.eq)
However, at the SQL level it would still render as
"is_equal('a', 'b')".
The ORM, when it loads a related object or collection, needs to be able
to manipulate the "left" and "right" sides of the ON clause of a JOIN
expression. The purpose of this method is to provide a SQL function
construct that can also supply this information to the ORM, when used
with the :paramref:`_orm.relationship.primaryjoin` parameter.
The return
value is a containment object called :class:`.FunctionAsBinary`.
An ORM example is as follows::
class Venue(Base):
__tablename__ = 'venue'
id = Column(Integer, primary_key=True)
name = Column(String)
descendants = relationship(
"Venue",
primaryjoin=func.instr(
remote(foreign(name)), name + "/"
).as_comparison(1, 2) == 1,
viewonly=True,
order_by=name
)
Above, the "Venue" class can load descendant "Venue" objects by
determining if the name of the parent Venue is contained within the
start of the hypothetical descendant value's name, e.g. "parent1" would
match up to "parent1/child1", but not to "parent2/child1".
Possible use cases include the "materialized path" example given above,
as well as making use of special SQL functions such as geometric
functions to create join conditions.
:param left_index: the integer 1-based index of the function argument
that serves as the "left" side of the expression.
:param right_index: the integer 1-based index of the function argument
that serves as the "right" side of the expression.
.. versionadded:: 1.3
"""
return FunctionAsBinary(self, left_index, right_index)
@property
def _from_objects(self):
return self.clauses._from_objects
def get_children(self, **kwargs):
return (self.clause_expr,)
def _copy_internals(self, clone=_clone, **kw):
self.clause_expr = clone(self.clause_expr, **kw)
self._reset_exported()
FunctionElement.clauses._reset(self)
def within_group_type(self, within_group):
"""For types that define their return type as based on the criteria
within a WITHIN GROUP (ORDER BY) expression, called by the
:class:`.WithinGroup` construct.
Returns None by default, in which case the function's normal ``.type``
is used.
"""
return None
def alias(self, name=None, flat=False):
r"""Produce a :class:`_expression.Alias` construct against this
:class:`.FunctionElement`.
This construct wraps the function in a named alias which
is suitable for the FROM clause, in the style accepted for example
by PostgreSQL.
e.g.::
from sqlalchemy.sql import column
stmt = select([column('data_view')]).\
select_from(SomeTable).\
select_from(func.unnest(SomeTable.data).alias('data_view')
)
Would produce:
.. sourcecode:: sql
SELECT data_view
FROM sometable, unnest(sometable.data) AS data_view
.. versionadded:: 0.9.8 The :meth:`.FunctionElement.alias` method
is now supported. Previously, this method's behavior was
undefined and did not behave consistently across versions.
"""
return Alias._construct(self, name)
def select(self):
"""Produce a :func:`_expression.select` construct
against this :class:`.FunctionElement`.
This is shorthand for::
s = select([function_element])
"""
s = Select([self])
if self._execution_options:
s = s.execution_options(**self._execution_options)
return s
def scalar(self):
"""Execute this :class:`.FunctionElement` against an embedded
'bind' and return a scalar value.
This first calls :meth:`~.FunctionElement.select` to
produce a SELECT construct.
Note that :class:`.FunctionElement` can be passed to
the :meth:`.Connectable.scalar` method of :class:`_engine.Connection`
or :class:`_engine.Engine`.
"""
return self.select().execute().scalar()
def execute(self):
"""Execute this :class:`.FunctionElement` against an embedded
'bind'.
This first calls :meth:`~.FunctionElement.select` to
produce a SELECT construct.
Note that :class:`.FunctionElement` can be passed to
the :meth:`.Connectable.execute` method of :class:`_engine.Connection`
or :class:`_engine.Engine`.
"""
return self.select().execute()
def _bind_param(self, operator, obj, type_=None):
return BindParameter(
None,
obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True,
type_=type_,
)
def self_group(self, against=None):
# for the moment, we are parenthesizing all array-returning
# expressions against getitem. This may need to be made
# more portable if in the future we support other DBs
# besides postgresql.
if against is operators.getitem and isinstance(
self.type, sqltypes.ARRAY
):
return Grouping(self)
else:
return super(FunctionElement, self).self_group(against=against)
class FunctionAsBinary(BinaryExpression):
def __init__(self, fn, left_index, right_index):
left = fn.clauses.clauses[left_index - 1]
right = fn.clauses.clauses[right_index - 1]
self.sql_function = fn
self.left_index = left_index
self.right_index = right_index
super(FunctionAsBinary, self).__init__(
left,
right,
operators.function_as_comparison_op,
type_=sqltypes.BOOLEANTYPE,
)
@property
def left(self):
return self.sql_function.clauses.clauses[self.left_index - 1]
@left.setter
def left(self, value):
self.sql_function.clauses.clauses[self.left_index - 1] = value
@property
def right(self):
return self.sql_function.clauses.clauses[self.right_index - 1]
@right.setter
def right(self, value):
self.sql_function.clauses.clauses[self.right_index - 1] = value
def _copy_internals(self, **kw):
clone = kw.pop("clone")
self.sql_function = clone(self.sql_function, **kw)
super(FunctionAsBinary, self)._copy_internals(**kw)
class _FunctionGenerator(object):
"""Generate SQL function expressions.
:data:`.func` is a special object instance which generates SQL
functions based on name-based attributes, e.g.::
>>> print(func.count(1))
count(:param_1)
The returned object is an instance of :class:`.Function`, and is a
column-oriented SQL element like any other, and is used in that way::
>>> print(select([func.count(table.c.id)]))
SELECT count(sometable.id) FROM sometable
Any name can be given to :data:`.func`. If the function name is unknown to
SQLAlchemy, it will be rendered exactly as is. For common SQL functions
which SQLAlchemy is aware of, the name may be interpreted as a *generic
function* which will be compiled appropriately to the target database::
>>> print(func.current_timestamp())
CURRENT_TIMESTAMP
To call functions which are present in dot-separated packages,
specify them in the same manner::
>>> print(func.stats.yield_curve(5, 10))
stats.yield_curve(:yield_curve_1, :yield_curve_2)
SQLAlchemy can be made aware of the return type of functions to enable
type-specific lexical and result-based behavior. For example, to ensure
that a string-based function returns a Unicode value and is similarly
treated as a string in expressions, specify
:class:`~sqlalchemy.types.Unicode` as the type:
>>> print(func.my_string(u'hi', type_=Unicode) + ' ' +
... func.my_string(u'there', type_=Unicode))
my_string(:my_string_1) || :my_string_2 || my_string(:my_string_3)
The object returned by a :data:`.func` call is usually an instance of
:class:`.Function`.
This object meets the "column" interface, including comparison and labeling
functions. The object can also be passed the :meth:`~.Connectable.execute`
method of a :class:`_engine.Connection` or :class:`_engine.Engine`,
where it will be
wrapped inside of a SELECT statement first::
print(connection.execute(func.current_timestamp()).scalar())
In a few exception cases, the :data:`.func` accessor
will redirect a name to a built-in expression such as :func:`.cast`
or :func:`.extract`, as these names have well-known meaning
but are not exactly the same as "functions" from a SQLAlchemy
perspective.
Functions which are interpreted as "generic" functions know how to
calculate their return type automatically. For a listing of known generic
functions, see :ref:`generic_functions`.
.. note::
The :data:`.func` construct has only limited support for calling
standalone "stored procedures", especially those with special
parameterization concerns.
See the section :ref:`stored_procedures` for details on how to use
the DBAPI-level ``callproc()`` method for fully traditional stored
procedures.
.. seealso::
:ref:`coretutorial_functions` - in the Core Tutorial
:class:`.Function`
"""
def __init__(self, **opts):
self.__names = []
self.opts = opts
def __getattr__(self, name):
# passthru __ attributes; fixes pydoc
if name.startswith("__"):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
elif name.endswith("_"):
name = name[0:-1]
f = _FunctionGenerator(**self.opts)
f.__names = list(self.__names) + [name]
return f
def __call__(self, *c, **kwargs):
o = self.opts.copy()
o.update(kwargs)
tokens = len(self.__names)
if tokens == 2:
package, fname = self.__names
elif tokens == 1:
package, fname = "_default", self.__names[0]
else:
package = None
if package is not None:
func = _registry[package].get(fname.lower())
if func is _CASE_SENSITIVE:
case_sensitive_reg = _case_sensitive_registry[package]
func = case_sensitive_reg.get(fname.lower()).get(fname)
if func is not None:
return func(*c, **o)
return Function(
self.__names[-1], packagenames=self.__names[0:-1], *c, **o
)
func = _FunctionGenerator()
func.__doc__ = _FunctionGenerator.__doc__
modifier = _FunctionGenerator(group=False)
class Function(FunctionElement):
r"""Describe a named SQL function.
The :class:`.Function` object is typically generated from the
:data:`.func` generation object.
:param \*clauses: list of column expressions that form the arguments
of the SQL function call.
:param type\_: optional :class:`.TypeEngine` datatype object that will be
used as the return value of the column expression generated by this
function call.
:param packagenames: a string which indicates package prefix names
to be prepended to the function name when the SQL is generated.
The :data:`.func` generator creates these when it is called using
dotted format, e.g.::
func.mypackage.some_function(col1, col2)
.. seealso::
:ref:`coretutorial_functions`
:data:`.func` - namespace which produces registered or ad-hoc
:class:`.Function` instances.
:class:`.GenericFunction` - allows creation of registered function
types.
"""
__visit_name__ = "function"
def __init__(self, name, *clauses, **kw):
"""Construct a :class:`.Function`.
The :data:`.func` construct is normally used to construct
new :class:`.Function` instances.
"""
self.packagenames = kw.pop("packagenames", None) or []
self.name = name
self._bind = kw.get("bind", None)
self.type = sqltypes.to_instance(kw.get("type_", None))
FunctionElement.__init__(self, *clauses, **kw)
def _bind_param(self, operator, obj, type_=None):
return BindParameter(
self.name,
obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
type_=type_,
unique=True,
)
class _GenericMeta(VisitableType):
def __init__(cls, clsname, bases, clsdict):
if annotation.Annotated not in cls.__mro__:
cls.name = name = clsdict.get("name", clsname)
cls.identifier = identifier = clsdict.get("identifier", name)
package = clsdict.pop("package", "_default")
# legacy
if "__return_type__" in clsdict:
cls.type = clsdict["__return_type__"]
# Check _register attribute status
cls._register = getattr(cls, "_register", True)
# Register the function if required
if cls._register:
register_function(identifier, cls, package)
else:
# Set _register to True to register child classes by default
cls._register = True
super(_GenericMeta, cls).__init__(clsname, bases, clsdict)
class GenericFunction(util.with_metaclass(_GenericMeta, Function)):
"""Define a 'generic' function.
A generic function is a pre-established :class:`.Function`
class that is instantiated automatically when called
by name from the :data:`.func` attribute. Note that
calling any name from :data:`.func` has the effect that
a new :class:`.Function` instance is created automatically,
given that name. The primary use case for defining
a :class:`.GenericFunction` class is so that a function
of a particular name may be given a fixed return type.
It can also include custom argument parsing schemes as well
as additional methods.
Subclasses of :class:`.GenericFunction` are automatically
registered under the name of the class. For
example, a user-defined function ``as_utc()`` would
be available immediately::
from sqlalchemy.sql.functions import GenericFunction
from sqlalchemy.types import DateTime
class as_utc(GenericFunction):
type = DateTime
print(select([func.as_utc()]))
User-defined generic functions can be organized into
packages by specifying the "package" attribute when defining
:class:`.GenericFunction`. Third party libraries
containing many functions may want to use this in order
to avoid name conflicts with other systems. For example,
if our ``as_utc()`` function were part of a package
"time"::
class as_utc(GenericFunction):
type = DateTime
package = "time"
The above function would be available from :data:`.func`
using the package name ``time``::
print(select([func.time.as_utc()]))
A final option is to allow the function to be accessed
from one name in :data:`.func` but to render as a different name.
The ``identifier`` attribute will override the name used to
access the function as loaded from :data:`.func`, but will retain
the usage of ``name`` as the rendered name::
class GeoBuffer(GenericFunction):
type = Geometry
package = "geo"
name = "ST_Buffer"
identifier = "buffer"
The above function will render as follows::
>>> print(func.geo.buffer())
ST_Buffer()
The name will be rendered as is, however without quoting unless the name
contains special characters that require quoting. To force quoting
on or off for the name, use the :class:`.sqlalchemy.sql.quoted_name`
construct::
from sqlalchemy.sql import quoted_name
class GeoBuffer(GenericFunction):
type = Geometry
package = "geo"
name = quoted_name("ST_Buffer", True)
identifier = "buffer"
The above function will render as::
>>> print(func.geo.buffer())
"ST_Buffer"()
.. versionadded:: 1.3.13 The :class:`.quoted_name` construct is now
recognized for quoting when used with the "name" attribute of the
object, so that quoting can be forced on or off for the function
name.
"""
coerce_arguments = True
_register = False
def __init__(self, *args, **kwargs):
parsed_args = kwargs.pop("_parsed_args", None)
if parsed_args is None:
parsed_args = [_literal_as_binds(c, self.name) for c in args]
self._has_args = self._has_args or bool(parsed_args)
self.packagenames = []
self._bind = kwargs.get("bind", None)
self.clause_expr = ClauseList(
operator=operators.comma_op, group_contents=True, *parsed_args
).self_group()
self.type = sqltypes.to_instance(
kwargs.pop("type_", None) or getattr(self, "type", None)
)
register_function("cast", Cast)
register_function("extract", Extract)
class next_value(GenericFunction):
"""Represent the 'next value', given a :class:`.Sequence`
as its single argument.
Compiles into the appropriate function on each backend,
or will raise NotImplementedError if used on a backend
that does not provide support for sequences.
"""
type = sqltypes.Integer()
name = "next_value"
def __init__(self, seq, **kw):
assert isinstance(
seq, schema.Sequence
), "next_value() accepts a Sequence object as input."
self._bind = kw.get("bind", None)
self.sequence = seq
@property
def _from_objects(self):
return []
class AnsiFunction(GenericFunction):
def __init__(self, *args, **kwargs):
GenericFunction.__init__(self, *args, **kwargs)
class ReturnTypeFromArgs(GenericFunction):
"""Define a function whose return type is the same as its arguments."""
def __init__(self, *args, **kwargs):
args = [_literal_as_binds(c, self.name) for c in args]
kwargs.setdefault("type_", _type_from_args(args))
kwargs["_parsed_args"] = args
super(ReturnTypeFromArgs, self).__init__(*args, **kwargs)
class coalesce(ReturnTypeFromArgs):
_has_args = True
class max(ReturnTypeFromArgs): # noqa
pass
class min(ReturnTypeFromArgs): # noqa
pass
class sum(ReturnTypeFromArgs): # noqa
pass
class now(GenericFunction): # noqa
type = sqltypes.DateTime
class concat(GenericFunction):
type = sqltypes.String
class char_length(GenericFunction):
type = sqltypes.Integer
def __init__(self, arg, **kwargs):
GenericFunction.__init__(self, arg, **kwargs)
class random(GenericFunction):
_has_args = True
class count(GenericFunction):
r"""The ANSI COUNT aggregate function. With no arguments,
emits COUNT \*.
E.g.::
from sqlalchemy import func
from sqlalchemy import select
from sqlalchemy import table, column
my_table = table('some_table', column('id'))
stmt = select([func.count()]).select_from(my_table)
Executing ``stmt`` would emit::
SELECT count(*) AS count_1
FROM some_table
"""
type = sqltypes.Integer
def __init__(self, expression=None, **kwargs):
if expression is None:
expression = literal_column("*")
super(count, self).__init__(expression, **kwargs)
class current_date(AnsiFunction):
type = sqltypes.Date
class current_time(AnsiFunction):
type = sqltypes.Time
class current_timestamp(AnsiFunction):
type = sqltypes.DateTime
class current_user(AnsiFunction):
type = sqltypes.String
class localtime(AnsiFunction):
type = sqltypes.DateTime
class localtimestamp(AnsiFunction):
type = sqltypes.DateTime
class session_user(AnsiFunction):
type = sqltypes.String
class sysdate(AnsiFunction):
type = sqltypes.DateTime
class user(AnsiFunction):
type = sqltypes.String
class array_agg(GenericFunction):
"""support for the ARRAY_AGG function.
The ``func.array_agg(expr)`` construct returns an expression of
type :class:`_types.ARRAY`.
e.g.::
stmt = select([func.array_agg(table.c.values)[2:5]])
.. versionadded:: 1.1
.. seealso::
:func:`_postgresql.array_agg` - PostgreSQL-specific version that
returns :class:`_postgresql.ARRAY`, which has PG-specific operators
added.
"""
type = sqltypes.ARRAY
def __init__(self, *args, **kwargs):
args = [_literal_as_binds(c) for c in args]
default_array_type = kwargs.pop("_default_array_type", sqltypes.ARRAY)
if "type_" not in kwargs:
type_from_args = _type_from_args(args)
if isinstance(type_from_args, sqltypes.ARRAY):
kwargs["type_"] = type_from_args
else:
kwargs["type_"] = default_array_type(type_from_args)
kwargs["_parsed_args"] = args
super(array_agg, self).__init__(*args, **kwargs)
class OrderedSetAgg(GenericFunction):
"""Define a function where the return type is based on the sort
expression type as defined by the expression passed to the
:meth:`.FunctionElement.within_group` method."""
array_for_multi_clause = False
def within_group_type(self, within_group):
func_clauses = self.clause_expr.element
order_by = sqlutil.unwrap_order_by(within_group.order_by)
if self.array_for_multi_clause and len(func_clauses.clauses) > 1:
return sqltypes.ARRAY(order_by[0].type)
else:
return order_by[0].type
class mode(OrderedSetAgg):
"""implement the ``mode`` ordered-set aggregate function.
This function must be used with the :meth:`.FunctionElement.within_group`
modifier to supply a sort expression to operate upon.
The return type of this function is the same as the sort expression.
.. versionadded:: 1.1
"""
class percentile_cont(OrderedSetAgg):
"""implement the ``percentile_cont`` ordered-set aggregate function.
This function must be used with the :meth:`.FunctionElement.within_group`
modifier to supply a sort expression to operate upon.
The return type of this function is the same as the sort expression,
or if the arguments are an array, an :class:`_types.ARRAY` of the sort
expression's type.
.. versionadded:: 1.1
"""
array_for_multi_clause = True
class percentile_disc(OrderedSetAgg):
"""implement the ``percentile_disc`` ordered-set aggregate function.
This function must be used with the :meth:`.FunctionElement.within_group`
modifier to supply a sort expression to operate upon.
The return type of this function is the same as the sort expression,
or if the arguments are an array, an :class:`_types.ARRAY` of the sort
expression's type.
.. versionadded:: 1.1
"""
array_for_multi_clause = True
class rank(GenericFunction):
"""Implement the ``rank`` hypothetical-set aggregate function.
This function must be used with the :meth:`.FunctionElement.within_group`
modifier to supply a sort expression to operate upon.
The return type of this function is :class:`.Integer`.
.. versionadded:: 1.1
"""
type = sqltypes.Integer()
class dense_rank(GenericFunction):
"""Implement the ``dense_rank`` hypothetical-set aggregate function.
This function must be used with the :meth:`.FunctionElement.within_group`
modifier to supply a sort expression to operate upon.
The return type of this function is :class:`.Integer`.
.. versionadded:: 1.1
"""
type = sqltypes.Integer()
class percent_rank(GenericFunction):
"""Implement the ``percent_rank`` hypothetical-set aggregate function.
This function must be used with the :meth:`.FunctionElement.within_group`
modifier to supply a sort expression to operate upon.
The return type of this function is :class:`.Numeric`.
.. versionadded:: 1.1
"""
type = sqltypes.Numeric()
class cume_dist(GenericFunction):
"""Implement the ``cume_dist`` hypothetical-set aggregate function.
This function must be used with the :meth:`.FunctionElement.within_group`
modifier to supply a sort expression to operate upon.
The return type of this function is :class:`.Numeric`.
.. versionadded:: 1.1
"""
type = sqltypes.Numeric()
class cube(GenericFunction):
r"""Implement the ``CUBE`` grouping operation.
This function is used as part of the GROUP BY of a statement,
e.g. :meth:`_expression.Select.group_by`::
stmt = select(
[func.sum(table.c.value), table.c.col_1, table.c.col_2]
).group_by(func.cube(table.c.col_1, table.c.col_2))
.. versionadded:: 1.2
"""
_has_args = True
class rollup(GenericFunction):
r"""Implement the ``ROLLUP`` grouping operation.
This function is used as part of the GROUP BY of a statement,
e.g. :meth:`_expression.Select.group_by`::
stmt = select(
[func.sum(table.c.value), table.c.col_1, table.c.col_2]
).group_by(func.rollup(table.c.col_1, table.c.col_2))
.. versionadded:: 1.2
"""
_has_args = True
class grouping_sets(GenericFunction):
r"""Implement the ``GROUPING SETS`` grouping operation.
This function is used as part of the GROUP BY of a statement,
e.g. :meth:`_expression.Select.group_by`::
stmt = select(
[func.sum(table.c.value), table.c.col_1, table.c.col_2]
).group_by(func.grouping_sets(table.c.col_1, table.c.col_2))
In order to group by multiple sets, use the :func:`.tuple_` construct::
from sqlalchemy import tuple_
stmt = select(
[
func.sum(table.c.value),
table.c.col_1, table.c.col_2,
table.c.col_3]
).group_by(
func.grouping_sets(
tuple_(table.c.col_1, table.c.col_2),
tuple_(table.c.value, table.c.col_3),
)
)
.. versionadded:: 1.2
"""
_has_args = True
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/annotation.py
|
# sql/annotation.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The :class:`.Annotated` class and related routines; creates hash-equivalent
copies of SQL constructs which contain context-specific markers and
associations.
"""
from . import operators
from .. import util
class Annotated(object):
"""clones a ClauseElement and applies an 'annotations' dictionary.
Unlike regular clones, this clone also mimics __hash__() and
__cmp__() of the original element so that it takes its place
in hashed collections.
A reference to the original element is maintained, for the important
reason of keeping its hash value current. When GC'ed, the
hash value may be reused, causing conflicts.
.. note:: The rationale for Annotated producing a brand new class,
rather than placing the functionality directly within ClauseElement,
is **performance**. The __hash__() method is absent on plain
ClauseElement which leads to significantly reduced function call
overhead, as the use of sets and dictionaries against ClauseElement
objects is prevalent, but most are not "annotated".
"""
def __new__(cls, *args):
if not args:
# clone constructor
return object.__new__(cls)
else:
element, values = args
# pull appropriate subclass from registry of annotated
# classes
try:
cls = annotated_classes[element.__class__]
except KeyError:
cls = _new_annotation_type(element.__class__, cls)
return object.__new__(cls)
def __init__(self, element, values):
self.__dict__ = element.__dict__.copy()
self.__element = element
self._annotations = values
self._hash = hash(element)
def _annotate(self, values):
_values = self._annotations.copy()
_values.update(values)
return self._with_annotations(_values)
def _with_annotations(self, values):
clone = self.__class__.__new__(self.__class__)
clone.__dict__ = self.__dict__.copy()
clone._annotations = values
return clone
def _deannotate(self, values=None, clone=True):
if values is None:
return self.__element
else:
_values = self._annotations.copy()
for v in values:
_values.pop(v, None)
return self._with_annotations(_values)
def _compiler_dispatch(self, visitor, **kw):
return self.__element.__class__._compiler_dispatch(self, visitor, **kw)
@property
def _constructor(self):
return self.__element._constructor
def _clone(self):
clone = self.__element._clone()
if clone is self.__element:
# detect immutable, don't change anything
return self
else:
# update the clone with any changes that have occurred
# to this object's __dict__.
clone.__dict__.update(self.__dict__)
return self.__class__(clone, self._annotations)
def __reduce__(self):
return self.__class__, (self.__element, self._annotations)
def __hash__(self):
return self._hash
def __eq__(self, other):
if isinstance(self.__element, operators.ColumnOperators):
return self.__element.__class__.__eq__(self, other)
else:
return hash(other) == hash(self)
# hard-generate Annotated subclasses. this technique
# is used instead of on-the-fly types (i.e. type.__new__())
# so that the resulting objects are pickleable.
annotated_classes = {}
def _deep_annotate(element, annotations, exclude=None):
"""Deep copy the given ClauseElement, annotating each element
with the given annotations dictionary.
Elements within the exclude collection will be cloned but not annotated.
"""
def clone(elem):
if (
exclude
and hasattr(elem, "proxy_set")
and elem.proxy_set.intersection(exclude)
):
newelem = elem._clone()
elif annotations != elem._annotations:
newelem = elem._annotate(annotations)
else:
newelem = elem
newelem._copy_internals(clone=clone)
return newelem
if element is not None:
element = clone(element)
clone = None # remove gc cycles
return element
def _deep_deannotate(element, values=None):
"""Deep copy the given element, removing annotations."""
cloned = util.column_dict()
def clone(elem):
# if a values dict is given,
# the elem must be cloned each time it appears,
# as there may be different annotations in source
# elements that are remaining. if totally
# removing all annotations, can assume the same
# slate...
if values or elem not in cloned:
newelem = elem._deannotate(values=values, clone=True)
newelem._copy_internals(clone=clone)
if not values:
cloned[elem] = newelem
return newelem
else:
return cloned[elem]
if element is not None:
element = clone(element)
clone = None # remove gc cycles
return element
def _shallow_annotate(element, annotations):
"""Annotate the given ClauseElement and copy its internals so that
internal objects refer to the new annotated object.
Basically used to apply a "dont traverse" annotation to a
selectable, without digging throughout the whole
structure wasting time.
"""
element = element._annotate(annotations)
element._copy_internals()
return element
def _new_annotation_type(cls, base_cls):
if issubclass(cls, Annotated):
return cls
elif cls in annotated_classes:
return annotated_classes[cls]
for super_ in cls.__mro__:
# check if an Annotated subclass more specific than
# the given base_cls is already registered, such
# as AnnotatedColumnElement.
if super_ in annotated_classes:
base_cls = annotated_classes[super_]
break
annotated_classes[cls] = anno_cls = type(
"Annotated%s" % cls.__name__, (base_cls, cls), {}
)
globals()["Annotated%s" % cls.__name__] = anno_cls
return anno_cls
def _prepare_annotations(target_hierarchy, base_cls):
stack = [target_hierarchy]
while stack:
cls = stack.pop()
stack.extend(cls.__subclasses__())
_new_annotation_type(cls, base_cls)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/ddl.py
|
# sql/ddl.py
# Copyright (C) 2009-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Provides the hierarchy of DDL-defining schema items as well as routines
to invoke them for a create/drop call.
"""
from .base import _bind_or_error
from .base import _generative
from .base import Executable
from .base import SchemaVisitor
from .elements import ClauseElement
from .. import event
from .. import exc
from .. import util
from ..util import topological
class _DDLCompiles(ClauseElement):
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.ddl_compiler(dialect, self, **kw)
class DDLElement(Executable, _DDLCompiles):
"""Base class for DDL expression constructs.
This class is the base for the general purpose :class:`.DDL` class,
as well as the various create/drop clause constructs such as
:class:`.CreateTable`, :class:`.DropTable`, :class:`.AddConstraint`,
etc.
:class:`.DDLElement` integrates closely with SQLAlchemy events,
introduced in :ref:`event_toplevel`. An instance of one is
itself an event receiving callable::
event.listen(
users,
'after_create',
AddConstraint(constraint).execute_if(dialect='postgresql')
)
.. seealso::
:class:`.DDL`
:class:`.DDLEvents`
:ref:`event_toplevel`
:ref:`schema_ddl_sequences`
"""
_execution_options = Executable._execution_options.union(
{"autocommit": True}
)
target = None
on = None
dialect = None
callable_ = None
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_ddl(self, multiparams, params)
def execute(self, bind=None, target=None):
"""Execute this DDL immediately.
Executes the DDL statement in isolation using the supplied
:class:`.Connectable` or
:class:`.Connectable` assigned to the ``.bind``
property, if not supplied. If the DDL has a conditional ``on``
criteria, it will be invoked with None as the event.
:param bind:
Optional, an ``Engine`` or ``Connection``. If not supplied, a valid
:class:`.Connectable` must be present in the
``.bind`` property.
:param target:
Optional, defaults to None. The target SchemaItem for the
execute call. Will be passed to the ``on`` callable if any,
and may also provide string expansion data for the
statement. See ``execute_at`` for more information.
"""
if bind is None:
bind = _bind_or_error(self)
if self._should_execute(target, bind):
return bind.execute(self.against(target))
else:
bind.engine.logger.info("DDL execution skipped, criteria not met.")
@util.deprecated(
"0.7",
"The :meth:`.DDLElement.execute_at` method is deprecated and will "
"be removed in a future release. Please use the :class:`.DDLEvents` "
"listener interface in conjunction with the "
":meth:`.DDLElement.execute_if` method.",
)
def execute_at(self, event_name, target):
"""Link execution of this DDL to the DDL lifecycle of a SchemaItem.
Links this ``DDLElement`` to a ``Table`` or ``MetaData`` instance,
executing it when that schema item is created or dropped. The DDL
statement will be executed using the same Connection and transactional
context as the Table create/drop itself. The ``.bind`` property of
this statement is ignored.
:param event:
One of the events defined in the schema item's ``.ddl_events``;
e.g. 'before-create', 'after-create', 'before-drop' or 'after-drop'
:param target:
The Table or MetaData instance for which this DDLElement will
be associated with.
A DDLElement instance can be linked to any number of schema items.
``execute_at`` builds on the ``append_ddl_listener`` interface of
:class:`_schema.MetaData` and :class:`_schema.Table` objects.
Caveat: Creating or dropping a Table in isolation will also trigger
any DDL set to ``execute_at`` that Table's MetaData. This may change
in a future release.
"""
def call_event(target, connection, **kw):
if self._should_execute_deprecated(
event_name, target, connection, **kw
):
return connection.execute(self.against(target))
event.listen(target, "" + event_name.replace("-", "_"), call_event)
@_generative
def against(self, target):
"""Return a copy of this DDL against a specific schema item."""
self.target = target
@_generative
def execute_if(self, dialect=None, callable_=None, state=None):
r"""Return a callable that will execute this
DDLElement conditionally.
Used to provide a wrapper for event listening::
event.listen(
metadata,
'before_create',
DDL("my_ddl").execute_if(dialect='postgresql')
)
:param dialect: May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something').execute_if(dialect='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something').execute_if(dialect=('postgresql', 'mysql'))
:param callable\_: A callable, which will be invoked with
four positional arguments as well as optional keyword
arguments:
:ddl:
This DDL element.
:target:
The :class:`_schema.Table` or :class:`_schema.MetaData`
object which is the
target of this event. May be None if the DDL is executed
explicitly.
:bind:
The :class:`_engine.Connection` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
:state:
Optional keyword argument - will be the ``state`` argument
passed to this function.
:checkfirst:
Keyword argument, will be True if the 'checkfirst' flag was
set during the call to ``create()``, ``create_all()``,
``drop()``, ``drop_all()``.
If the callable returns a true value, the DDL statement will be
executed.
:param state: any value which will be passed to the callable\_
as the ``state`` keyword argument.
.. seealso::
:class:`.DDLEvents`
:ref:`event_toplevel`
"""
self.dialect = dialect
self.callable_ = callable_
self.state = state
def _should_execute(self, target, bind, **kw):
if self.on is not None and not self._should_execute_deprecated(
None, target, bind, **kw
):
return False
if isinstance(self.dialect, util.string_types):
if self.dialect != bind.engine.name:
return False
elif isinstance(self.dialect, (tuple, list, set)):
if bind.engine.name not in self.dialect:
return False
if self.callable_ is not None and not self.callable_(
self, target, bind, state=self.state, **kw
):
return False
return True
def _should_execute_deprecated(self, event, target, bind, **kw):
if self.on is None:
return True
elif isinstance(self.on, util.string_types):
return self.on == bind.engine.name
elif isinstance(self.on, (tuple, list, set)):
return bind.engine.name in self.on
else:
return self.on(self, event, target, bind, **kw)
def __call__(self, target, bind, **kw):
"""Execute the DDL as a ddl_listener."""
if self._should_execute(target, bind, **kw):
return bind.execute(self.against(target))
def _check_ddl_on(self, on):
if on is not None and (
not isinstance(on, util.string_types + (tuple, list, set))
and not util.callable(on)
):
raise exc.ArgumentError(
"Expected the name of a database dialect, a tuple "
"of names, or a callable for "
"'on' criteria, got type '%s'." % type(on).__name__
)
def bind(self):
if self._bind:
return self._bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class DDL(DDLElement):
"""A literal DDL statement.
Specifies literal SQL DDL to be executed by the database. DDL objects
function as DDL event listeners, and can be subscribed to those events
listed in :class:`.DDLEvents`, using either :class:`_schema.Table` or
:class:`_schema.MetaData` objects as targets.
Basic templating support allows
a single DDL instance to handle repetitive tasks for multiple tables.
Examples::
from sqlalchemy import event, DDL
tbl = Table('users', metadata, Column('uid', Integer))
event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger'))
spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE')
event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb'))
drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE')
connection.execute(drop_spow)
When operating on Table events, the following ``statement``
string substitutions are available::
%(table)s - the Table name, with any required quoting applied
%(schema)s - the schema name, with any required quoting applied
%(fullname)s - the Table name including schema, quoted if needed
The DDL's "context", if any, will be combined with the standard
substitutions noted above. Keys present in the context will override
the standard substitutions.
"""
__visit_name__ = "ddl"
@util.deprecated_params(
on=(
"0.7",
"The :paramref:`.DDL.on` parameter is deprecated and will be "
"removed in a future release. Please refer to "
":meth:`.DDLElement.execute_if`.",
)
)
def __init__(self, statement, on=None, context=None, bind=None):
"""Create a DDL statement.
:param statement:
A string or unicode string to be executed. Statements will be
processed with Python's string formatting operator. See the
``context`` argument and the ``execute_at`` method.
A literal '%' in a statement must be escaped as '%%'.
SQL bind parameters are not available in DDL statements.
:param on:
Optional filtering criteria. May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something', on='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something', on=('postgresql', 'mysql'))
If a callable, it will be invoked with four positional arguments
as well as optional keyword arguments:
:ddl:
This DDL element.
:event:
The name of the event that has triggered this DDL, such as
'after-create' Will be None if the DDL is executed explicitly.
:target:
The ``Table`` or ``MetaData`` object which is the target of
this event. May be None if the DDL is executed explicitly.
:connection:
The ``Connection`` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
If the callable returns a true value, the DDL statement will be
executed.
:param context:
Optional dictionary, defaults to None. These values will be
available for use in string substitutions on the DDL statement.
:param bind:
Optional. A :class:`.Connectable`, used by
default when ``execute()`` is invoked without a bind argument.
.. seealso::
:class:`.DDLEvents`
:ref:`event_toplevel`
"""
if not isinstance(statement, util.string_types):
raise exc.ArgumentError(
"Expected a string or unicode SQL statement, got '%r'"
% statement
)
self.statement = statement
self.context = context or {}
self._check_ddl_on(on)
self.on = on
self._bind = bind
def __repr__(self):
return "<%s@%s; %s>" % (
type(self).__name__,
id(self),
", ".join(
[repr(self.statement)]
+ [
"%s=%r" % (key, getattr(self, key))
for key in ("on", "context")
if getattr(self, key)
]
),
)
class _CreateDropBase(DDLElement):
"""Base class for DDL constructs that represent CREATE and DROP or
equivalents.
The common theme of _CreateDropBase is a single
``element`` attribute which refers to the element
to be created or dropped.
"""
def __init__(self, element, on=None, bind=None):
self.element = element
self._check_ddl_on(on)
self.on = on
self.bind = bind
def _create_rule_disable(self, compiler):
"""Allow disable of _create_rule using a callable.
Pass to _create_rule using
util.portable_instancemethod(self._create_rule_disable)
to retain serializability.
"""
return False
class CreateSchema(_CreateDropBase):
"""Represent a CREATE SCHEMA statement.
The argument here is the string name of the schema.
"""
__visit_name__ = "create_schema"
def __init__(self, name, quote=None, **kw):
"""Create a new :class:`.CreateSchema` construct."""
self.quote = quote
super(CreateSchema, self).__init__(name, **kw)
class DropSchema(_CreateDropBase):
"""Represent a DROP SCHEMA statement.
The argument here is the string name of the schema.
"""
__visit_name__ = "drop_schema"
def __init__(self, name, quote=None, cascade=False, **kw):
"""Create a new :class:`.DropSchema` construct."""
self.quote = quote
self.cascade = cascade
super(DropSchema, self).__init__(name, **kw)
class CreateTable(_CreateDropBase):
"""Represent a CREATE TABLE statement."""
__visit_name__ = "create_table"
def __init__(
self, element, on=None, bind=None, include_foreign_key_constraints=None
):
"""Create a :class:`.CreateTable` construct.
:param element: a :class:`_schema.Table` that's the subject
of the CREATE
:param on: See the description for 'on' in :class:`.DDL`.
:param bind: See the description for 'bind' in :class:`.DDL`.
:param include_foreign_key_constraints: optional sequence of
:class:`_schema.ForeignKeyConstraint` objects that will be included
inline within the CREATE construct; if omitted, all foreign key
constraints that do not specify use_alter=True are included.
.. versionadded:: 1.0.0
"""
super(CreateTable, self).__init__(element, on=on, bind=bind)
self.columns = [CreateColumn(column) for column in element.columns]
self.include_foreign_key_constraints = include_foreign_key_constraints
class _DropView(_CreateDropBase):
"""Semi-public 'DROP VIEW' construct.
Used by the test suite for dialect-agnostic drops of views.
This object will eventually be part of a public "view" API.
"""
__visit_name__ = "drop_view"
class CreateColumn(_DDLCompiles):
"""Represent a :class:`_schema.Column`
as rendered in a CREATE TABLE statement,
via the :class:`.CreateTable` construct.
This is provided to support custom column DDL within the generation
of CREATE TABLE statements, by using the
compiler extension documented in :ref:`sqlalchemy.ext.compiler_toplevel`
to extend :class:`.CreateColumn`.
Typical integration is to examine the incoming :class:`_schema.Column`
object, and to redirect compilation if a particular flag or condition
is found::
from sqlalchemy import schema
from sqlalchemy.ext.compiler import compiles
@compiles(schema.CreateColumn)
def compile(element, compiler, **kw):
column = element.element
if "special" not in column.info:
return compiler.visit_create_column(element, **kw)
text = "%s SPECIAL DIRECTIVE %s" % (
column.name,
compiler.type_compiler.process(column.type)
)
default = compiler.get_column_default_string(column)
if default is not None:
text += " DEFAULT " + default
if not column.nullable:
text += " NOT NULL"
if column.constraints:
text += " ".join(
compiler.process(const)
for const in column.constraints)
return text
The above construct can be applied to a :class:`_schema.Table`
as follows::
from sqlalchemy import Table, Metadata, Column, Integer, String
from sqlalchemy import schema
metadata = MetaData()
table = Table('mytable', MetaData(),
Column('x', Integer, info={"special":True}, primary_key=True),
Column('y', String(50)),
Column('z', String(20), info={"special":True})
)
metadata.create_all(conn)
Above, the directives we've added to the :attr:`_schema.Column.info`
collection
will be detected by our custom compilation scheme::
CREATE TABLE mytable (
x SPECIAL DIRECTIVE INTEGER NOT NULL,
y VARCHAR(50),
z SPECIAL DIRECTIVE VARCHAR(20),
PRIMARY KEY (x)
)
The :class:`.CreateColumn` construct can also be used to skip certain
columns when producing a ``CREATE TABLE``. This is accomplished by
creating a compilation rule that conditionally returns ``None``.
This is essentially how to produce the same effect as using the
``system=True`` argument on :class:`_schema.Column`, which marks a column
as an implicitly-present "system" column.
For example, suppose we wish to produce a :class:`_schema.Table`
which skips
rendering of the PostgreSQL ``xmin`` column against the PostgreSQL
backend, but on other backends does render it, in anticipation of a
triggered rule. A conditional compilation rule could skip this name only
on PostgreSQL::
from sqlalchemy.schema import CreateColumn
@compiles(CreateColumn, "postgresql")
def skip_xmin(element, compiler, **kw):
if element.element.name == 'xmin':
return None
else:
return compiler.visit_create_column(element, **kw)
my_table = Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('xmin', Integer)
)
Above, a :class:`.CreateTable` construct will generate a ``CREATE TABLE``
which only includes the ``id`` column in the string; the ``xmin`` column
will be omitted, but only against the PostgreSQL backend.
"""
__visit_name__ = "create_column"
def __init__(self, element):
self.element = element
class DropTable(_CreateDropBase):
"""Represent a DROP TABLE statement."""
__visit_name__ = "drop_table"
class CreateSequence(_CreateDropBase):
"""Represent a CREATE SEQUENCE statement."""
__visit_name__ = "create_sequence"
class DropSequence(_CreateDropBase):
"""Represent a DROP SEQUENCE statement."""
__visit_name__ = "drop_sequence"
class CreateIndex(_CreateDropBase):
"""Represent a CREATE INDEX statement."""
__visit_name__ = "create_index"
class DropIndex(_CreateDropBase):
"""Represent a DROP INDEX statement."""
__visit_name__ = "drop_index"
class AddConstraint(_CreateDropBase):
"""Represent an ALTER TABLE ADD CONSTRAINT statement."""
__visit_name__ = "add_constraint"
def __init__(self, element, *args, **kw):
super(AddConstraint, self).__init__(element, *args, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable
)
class DropConstraint(_CreateDropBase):
"""Represent an ALTER TABLE DROP CONSTRAINT statement."""
__visit_name__ = "drop_constraint"
def __init__(self, element, cascade=False, **kw):
self.cascade = cascade
super(DropConstraint, self).__init__(element, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable
)
class SetTableComment(_CreateDropBase):
"""Represent a COMMENT ON TABLE IS statement."""
__visit_name__ = "set_table_comment"
class DropTableComment(_CreateDropBase):
"""Represent a COMMENT ON TABLE '' statement.
Note this varies a lot across database backends.
"""
__visit_name__ = "drop_table_comment"
class SetColumnComment(_CreateDropBase):
"""Represent a COMMENT ON COLUMN IS statement."""
__visit_name__ = "set_column_comment"
class DropColumnComment(_CreateDropBase):
"""Represent a COMMENT ON COLUMN IS NULL statement."""
__visit_name__ = "drop_column_comment"
class DDLBase(SchemaVisitor):
def __init__(self, connection):
self.connection = connection
class SchemaGenerator(DDLBase):
def __init__(
self, dialect, connection, checkfirst=False, tables=None, **kwargs
):
super(SchemaGenerator, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def _can_create_table(self, table):
self.dialect.validate_identifier(table.name)
effective_schema = self.connection.schema_for_object(table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or not self.dialect.has_table(
self.connection, table.name, schema=effective_schema
)
def _can_create_sequence(self, sequence):
effective_schema = self.connection.schema_for_object(sequence)
return self.dialect.supports_sequences and (
(not self.dialect.sequences_optional or not sequence.optional)
and (
not self.checkfirst
or not self.dialect.has_sequence(
self.connection, sequence.name, schema=effective_schema
)
)
)
def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
collection = sort_tables_and_constraints(
[t for t in tables if self._can_create_table(t)]
)
seq_coll = [
s
for s in metadata._sequences.values()
if s.column is None and self._can_create_sequence(s)
]
event_collection = [t for (t, fks) in collection if t is not None]
metadata.dispatch.before_create(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
for seq in seq_coll:
self.traverse_single(seq, create_ok=True)
for table, fkcs in collection:
if table is not None:
self.traverse_single(
table,
create_ok=True,
include_foreign_key_constraints=fkcs,
_is_metadata_operation=True,
)
else:
for fkc in fkcs:
self.traverse_single(fkc)
metadata.dispatch.after_create(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
def visit_table(
self,
table,
create_ok=False,
include_foreign_key_constraints=None,
_is_metadata_operation=False,
):
if not create_ok and not self._can_create_table(table):
return
table.dispatch.before_create(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
if not self.dialect.supports_alter:
# e.g., don't omit any foreign key constraints
include_foreign_key_constraints = None
self.connection.execute(
# fmt: off
CreateTable(
table,
include_foreign_key_constraints= # noqa
include_foreign_key_constraints, # noqa
)
# fmt: on
)
if hasattr(table, "indexes"):
for index in table.indexes:
self.traverse_single(index)
if self.dialect.supports_comments and not self.dialect.inline_comments:
if table.comment is not None:
self.connection.execute(SetTableComment(table))
for column in table.columns:
if column.comment is not None:
self.connection.execute(SetColumnComment(column))
table.dispatch.after_create(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
def visit_foreign_key_constraint(self, constraint):
if not self.dialect.supports_alter:
return
self.connection.execute(AddConstraint(constraint))
def visit_sequence(self, sequence, create_ok=False):
if not create_ok and not self._can_create_sequence(sequence):
return
self.connection.execute(CreateSequence(sequence))
def visit_index(self, index):
self.connection.execute(CreateIndex(index))
class SchemaDropper(DDLBase):
def __init__(
self, dialect, connection, checkfirst=False, tables=None, **kwargs
):
super(SchemaDropper, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
try:
unsorted_tables = [t for t in tables if self._can_drop_table(t)]
collection = list(
reversed(
sort_tables_and_constraints(
unsorted_tables,
filter_fn=lambda constraint: False
if not self.dialect.supports_alter
or constraint.name is None
else None,
)
)
)
except exc.CircularDependencyError as err2:
if not self.dialect.supports_alter:
util.warn(
"Can't sort tables for DROP; an "
"unresolvable foreign key "
"dependency exists between tables: %s, and backend does "
"not support ALTER. To restore at least a partial sort, "
"apply use_alter=True to ForeignKey and "
"ForeignKeyConstraint "
"objects involved in the cycle to mark these as known "
"cycles that will be ignored."
% (", ".join(sorted([t.fullname for t in err2.cycles])))
)
collection = [(t, ()) for t in unsorted_tables]
else:
util.raise_(
exc.CircularDependencyError(
err2.args[0],
err2.cycles,
err2.edges,
msg="Can't sort tables for DROP; an "
"unresolvable foreign key "
"dependency exists between tables: %s. Please ensure "
"that the ForeignKey and ForeignKeyConstraint objects "
"involved in the cycle have "
"names so that they can be dropped using "
"DROP CONSTRAINT."
% (
", ".join(
sorted([t.fullname for t in err2.cycles])
)
),
),
from_=err2,
)
seq_coll = [
s
for s in metadata._sequences.values()
if s.column is None and self._can_drop_sequence(s)
]
event_collection = [t for (t, fks) in collection if t is not None]
metadata.dispatch.before_drop(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
for table, fkcs in collection:
if table is not None:
self.traverse_single(
table, drop_ok=True, _is_metadata_operation=True
)
else:
for fkc in fkcs:
self.traverse_single(fkc)
for seq in seq_coll:
self.traverse_single(seq, drop_ok=True)
metadata.dispatch.after_drop(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
def _can_drop_table(self, table):
self.dialect.validate_identifier(table.name)
effective_schema = self.connection.schema_for_object(table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or self.dialect.has_table(
self.connection, table.name, schema=effective_schema
)
def _can_drop_sequence(self, sequence):
effective_schema = self.connection.schema_for_object(sequence)
return self.dialect.supports_sequences and (
(not self.dialect.sequences_optional or not sequence.optional)
and (
not self.checkfirst
or self.dialect.has_sequence(
self.connection, sequence.name, schema=effective_schema
)
)
)
def visit_index(self, index):
self.connection.execute(DropIndex(index))
def visit_table(self, table, drop_ok=False, _is_metadata_operation=False):
if not drop_ok and not self._can_drop_table(table):
return
table.dispatch.before_drop(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
self.connection.execute(DropTable(table))
# traverse client side defaults which may refer to server-side
# sequences. noting that some of these client side defaults may also be
# set up as server side defaults (see http://docs.sqlalchemy.org/en/
# latest/core/defaults.html#associating-a-sequence-as-the-server-side-
# default), so have to be dropped after the table is dropped.
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
table.dispatch.after_drop(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
def visit_foreign_key_constraint(self, constraint):
if not self.dialect.supports_alter:
return
self.connection.execute(DropConstraint(constraint))
def visit_sequence(self, sequence, drop_ok=False):
if not drop_ok and not self._can_drop_sequence(sequence):
return
self.connection.execute(DropSequence(sequence))
def sort_tables(
tables, skip_fn=None, extra_dependencies=None,
):
"""sort a collection of :class:`_schema.Table` objects based on dependency
.
This is a dependency-ordered sort which will emit :class:`_schema.Table`
objects such that they will follow their dependent :class:`_schema.Table`
objects.
Tables are dependent on another based on the presence of
:class:`_schema.ForeignKeyConstraint`
objects as well as explicit dependencies
added by :meth:`_schema.Table.add_is_dependent_on`.
.. warning::
The :func:`._schema.sort_tables` function cannot by itself
accommodate automatic resolution of dependency cycles between
tables, which are usually caused by mutually dependent foreign key
constraints. When these cycles are detected, the foreign keys
of these tables are omitted from consideration in the sort.
A warning is emitted when this condition occurs, which will be an
exception raise in a future release. Tables which are not part
of the cycle will still be returned in dependency order.
To resolve these cycles, the
:paramref:`_schema.ForeignKeyConstraint.use_alter` parameter may be
applied to those constraints which create a cycle. Alternatively,
the :func:`_schema.sort_tables_and_constraints` function will
automatically return foreign key constraints in a separate
collection when cycles are detected so that they may be applied
to a schema separately.
.. versionchanged:: 1.3.17 - a warning is emitted when
:func:`_schema.sort_tables` cannot perform a proper sort due to
cyclical dependencies. This will be an exception in a future
release. Additionally, the sort will continue to return
other tables not involved in the cycle in dependency order
which was not the case previously.
:param tables: a sequence of :class:`_schema.Table` objects.
:param skip_fn: optional callable which will be passed a
:class:`_schema.ForeignKey` object; if it returns True, this
constraint will not be considered as a dependency. Note this is
**different** from the same parameter in
:func:`.sort_tables_and_constraints`, which is
instead passed the owning :class:`_schema.ForeignKeyConstraint` object.
:param extra_dependencies: a sequence of 2-tuples of tables which will
also be considered as dependent on each other.
.. seealso::
:func:`.sort_tables_and_constraints`
:attr:`_schema.MetaData.sorted_tables` - uses this function to sort
"""
if skip_fn is not None:
def _skip_fn(fkc):
for fk in fkc.elements:
if skip_fn(fk):
return True
else:
return None
else:
_skip_fn = None
return [
t
for (t, fkcs) in sort_tables_and_constraints(
tables,
filter_fn=_skip_fn,
extra_dependencies=extra_dependencies,
_warn_for_cycles=True,
)
if t is not None
]
def sort_tables_and_constraints(
tables, filter_fn=None, extra_dependencies=None, _warn_for_cycles=False
):
"""sort a collection of :class:`_schema.Table` /
:class:`_schema.ForeignKeyConstraint`
objects.
This is a dependency-ordered sort which will emit tuples of
``(Table, [ForeignKeyConstraint, ...])`` such that each
:class:`_schema.Table` follows its dependent :class:`_schema.Table`
objects.
Remaining :class:`_schema.ForeignKeyConstraint`
objects that are separate due to
dependency rules not satisfied by the sort are emitted afterwards
as ``(None, [ForeignKeyConstraint ...])``.
Tables are dependent on another based on the presence of
:class:`_schema.ForeignKeyConstraint` objects, explicit dependencies
added by :meth:`_schema.Table.add_is_dependent_on`,
as well as dependencies
stated here using the :paramref:`~.sort_tables_and_constraints.skip_fn`
and/or :paramref:`~.sort_tables_and_constraints.extra_dependencies`
parameters.
:param tables: a sequence of :class:`_schema.Table` objects.
:param filter_fn: optional callable which will be passed a
:class:`_schema.ForeignKeyConstraint` object,
and returns a value based on
whether this constraint should definitely be included or excluded as
an inline constraint, or neither. If it returns False, the constraint
will definitely be included as a dependency that cannot be subject
to ALTER; if True, it will **only** be included as an ALTER result at
the end. Returning None means the constraint is included in the
table-based result unless it is detected as part of a dependency cycle.
:param extra_dependencies: a sequence of 2-tuples of tables which will
also be considered as dependent on each other.
.. versionadded:: 1.0.0
.. seealso::
:func:`.sort_tables`
"""
fixed_dependencies = set()
mutable_dependencies = set()
if extra_dependencies is not None:
fixed_dependencies.update(extra_dependencies)
remaining_fkcs = set()
for table in tables:
for fkc in table.foreign_key_constraints:
if fkc.use_alter is True:
remaining_fkcs.add(fkc)
continue
if filter_fn:
filtered = filter_fn(fkc)
if filtered is True:
remaining_fkcs.add(fkc)
continue
dependent_on = fkc.referred_table
if dependent_on is not table:
mutable_dependencies.add((dependent_on, table))
fixed_dependencies.update(
(parent, table) for parent in table._extra_dependencies
)
try:
candidate_sort = list(
topological.sort(
fixed_dependencies.union(mutable_dependencies),
tables,
deterministic_order=True,
)
)
except exc.CircularDependencyError as err:
if _warn_for_cycles:
util.warn(
"Cannot correctly sort tables; there are unresolvable cycles "
'between tables "%s", which is usually caused by mutually '
"dependent foreign key constraints. Foreign key constraints "
"involving these tables will not be considered; this warning "
"may raise an error in a future release."
% (", ".join(sorted(t.fullname for t in err.cycles)),)
)
for edge in err.edges:
if edge in mutable_dependencies:
table = edge[1]
if table not in err.cycles:
continue
can_remove = [
fkc
for fkc in table.foreign_key_constraints
if filter_fn is None or filter_fn(fkc) is not False
]
remaining_fkcs.update(can_remove)
for fkc in can_remove:
dependent_on = fkc.referred_table
if dependent_on is not table:
mutable_dependencies.discard((dependent_on, table))
candidate_sort = list(
topological.sort(
fixed_dependencies.union(mutable_dependencies),
tables,
deterministic_order=True,
)
)
return [
(table, table.foreign_key_constraints.difference(remaining_fkcs))
for table in candidate_sort
] + [(None, list(remaining_fkcs))]
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/compiler.py
|
# sql/compiler.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Classes provided include:
:class:`.compiler.SQLCompiler` - renders SQL
strings
:class:`.compiler.DDLCompiler` - renders DDL
(data definition language) strings
:class:`.compiler.GenericTypeCompiler` - renders
type specification strings.
To generate user-defined SQL strings, see
:doc:`/ext/compiler`.
"""
import contextlib
import itertools
import re
from . import crud
from . import elements
from . import functions
from . import operators
from . import schema
from . import selectable
from . import sqltypes
from . import visitors
from .. import exc
from .. import util
RESERVED_WORDS = set(
[
"all",
"analyse",
"analyze",
"and",
"any",
"array",
"as",
"asc",
"asymmetric",
"authorization",
"between",
"binary",
"both",
"case",
"cast",
"check",
"collate",
"column",
"constraint",
"create",
"cross",
"current_date",
"current_role",
"current_time",
"current_timestamp",
"current_user",
"default",
"deferrable",
"desc",
"distinct",
"do",
"else",
"end",
"except",
"false",
"for",
"foreign",
"freeze",
"from",
"full",
"grant",
"group",
"having",
"ilike",
"in",
"initially",
"inner",
"intersect",
"into",
"is",
"isnull",
"join",
"leading",
"left",
"like",
"limit",
"localtime",
"localtimestamp",
"natural",
"new",
"not",
"notnull",
"null",
"off",
"offset",
"old",
"on",
"only",
"or",
"order",
"outer",
"overlaps",
"placing",
"primary",
"references",
"right",
"select",
"session_user",
"set",
"similar",
"some",
"symmetric",
"table",
"then",
"to",
"trailing",
"true",
"union",
"unique",
"user",
"using",
"verbose",
"when",
"where",
]
)
LEGAL_CHARACTERS = re.compile(r"^[A-Z0-9_$]+$", re.I)
LEGAL_CHARACTERS_PLUS_SPACE = re.compile(r"^[A-Z0-9_ $]+$", re.I)
ILLEGAL_INITIAL_CHARACTERS = {str(x) for x in range(0, 10)}.union(["$"])
FK_ON_DELETE = re.compile(
r"^(?:RESTRICT|CASCADE|SET NULL|NO ACTION|SET DEFAULT)$", re.I
)
FK_ON_UPDATE = re.compile(
r"^(?:RESTRICT|CASCADE|SET NULL|NO ACTION|SET DEFAULT)$", re.I
)
FK_INITIALLY = re.compile(r"^(?:DEFERRED|IMMEDIATE)$", re.I)
BIND_PARAMS = re.compile(r"(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])", re.UNICODE)
BIND_PARAMS_ESC = re.compile(r"\x5c(:[\w\$]*)(?![:\w\$])", re.UNICODE)
BIND_TEMPLATES = {
"pyformat": "%%(%(name)s)s",
"qmark": "?",
"format": "%%s",
"numeric": ":[_POSITION]",
"named": ":%(name)s",
}
OPERATORS = {
# binary
operators.and_: " AND ",
operators.or_: " OR ",
operators.add: " + ",
operators.mul: " * ",
operators.sub: " - ",
operators.div: " / ",
operators.mod: " % ",
operators.truediv: " / ",
operators.neg: "-",
operators.lt: " < ",
operators.le: " <= ",
operators.ne: " != ",
operators.gt: " > ",
operators.ge: " >= ",
operators.eq: " = ",
operators.is_distinct_from: " IS DISTINCT FROM ",
operators.isnot_distinct_from: " IS NOT DISTINCT FROM ",
operators.concat_op: " || ",
operators.match_op: " MATCH ",
operators.notmatch_op: " NOT MATCH ",
operators.in_op: " IN ",
operators.notin_op: " NOT IN ",
operators.comma_op: ", ",
operators.from_: " FROM ",
operators.as_: " AS ",
operators.is_: " IS ",
operators.isnot: " IS NOT ",
operators.collate: " COLLATE ",
# unary
operators.exists: "EXISTS ",
operators.distinct_op: "DISTINCT ",
operators.inv: "NOT ",
operators.any_op: "ANY ",
operators.all_op: "ALL ",
# modifiers
operators.desc_op: " DESC",
operators.asc_op: " ASC",
operators.nullsfirst_op: " NULLS FIRST",
operators.nullslast_op: " NULLS LAST",
}
FUNCTIONS = {
functions.coalesce: "coalesce",
functions.current_date: "CURRENT_DATE",
functions.current_time: "CURRENT_TIME",
functions.current_timestamp: "CURRENT_TIMESTAMP",
functions.current_user: "CURRENT_USER",
functions.localtime: "LOCALTIME",
functions.localtimestamp: "LOCALTIMESTAMP",
functions.random: "random",
functions.sysdate: "sysdate",
functions.session_user: "SESSION_USER",
functions.user: "USER",
functions.cube: "CUBE",
functions.rollup: "ROLLUP",
functions.grouping_sets: "GROUPING SETS",
}
EXTRACT_MAP = {
"month": "month",
"day": "day",
"year": "year",
"second": "second",
"hour": "hour",
"doy": "doy",
"minute": "minute",
"quarter": "quarter",
"dow": "dow",
"week": "week",
"epoch": "epoch",
"milliseconds": "milliseconds",
"microseconds": "microseconds",
"timezone_hour": "timezone_hour",
"timezone_minute": "timezone_minute",
}
COMPOUND_KEYWORDS = {
selectable.CompoundSelect.UNION: "UNION",
selectable.CompoundSelect.UNION_ALL: "UNION ALL",
selectable.CompoundSelect.EXCEPT: "EXCEPT",
selectable.CompoundSelect.EXCEPT_ALL: "EXCEPT ALL",
selectable.CompoundSelect.INTERSECT: "INTERSECT",
selectable.CompoundSelect.INTERSECT_ALL: "INTERSECT ALL",
}
class Compiled(object):
"""Represent a compiled SQL or DDL expression.
The ``__str__`` method of the ``Compiled`` object should produce
the actual text of the statement. ``Compiled`` objects are
specific to their underlying database dialect, and also may
or may not be specific to the columns referenced within a
particular set of bind parameters. In no case should the
``Compiled`` object be dependent on the actual values of those
bind parameters, even though it may reference those values as
defaults.
"""
_cached_metadata = None
execution_options = util.immutabledict()
"""
Execution options propagated from the statement. In some cases,
sub-elements of the statement can modify these.
"""
def __init__(
self,
dialect,
statement,
bind=None,
schema_translate_map=None,
compile_kwargs=util.immutabledict(),
):
"""Construct a new :class:`.Compiled` object.
:param dialect: :class:`.Dialect` to compile against.
:param statement: :class:`_expression.ClauseElement` to be compiled.
:param bind: Optional Engine or Connection to compile this
statement against.
:param schema_translate_map: dictionary of schema names to be
translated when forming the resultant SQL
.. versionadded:: 1.1
.. seealso::
:ref:`schema_translating`
:param compile_kwargs: additional kwargs that will be
passed to the initial call to :meth:`.Compiled.process`.
"""
self.dialect = dialect
self.bind = bind
self.preparer = self.dialect.identifier_preparer
if schema_translate_map:
self.preparer = self.preparer._with_schema_translate(
schema_translate_map
)
if statement is not None:
self.statement = statement
self.can_execute = statement.supports_execution
if self.can_execute:
self.execution_options = statement._execution_options
self.string = self.process(self.statement, **compile_kwargs)
@util.deprecated(
"0.7",
"The :meth:`.Compiled.compile` method is deprecated and will be "
"removed in a future release. The :class:`.Compiled` object "
"now runs its compilation within the constructor, and this method "
"does nothing.",
)
def compile(self):
"""Produce the internal string representation of this element.
"""
pass
def _execute_on_connection(self, connection, multiparams, params):
if self.can_execute:
return connection._execute_compiled(self, multiparams, params)
else:
raise exc.ObjectNotExecutableError(self.statement)
@property
def sql_compiler(self):
"""Return a Compiled that is capable of processing SQL expressions.
If this compiler is one, it would likely just return 'self'.
"""
raise NotImplementedError()
def process(self, obj, **kwargs):
return obj._compiler_dispatch(self, **kwargs)
def __str__(self):
"""Return the string text of the generated SQL or DDL."""
return self.string or ""
def construct_params(self, params=None):
"""Return the bind params for this compiled object.
:param params: a dict of string/object pairs whose values will
override bind values compiled in to the
statement.
"""
raise NotImplementedError()
@property
def params(self):
"""Return the bind params for this compiled object."""
return self.construct_params()
def execute(self, *multiparams, **params):
"""Execute this compiled object."""
e = self.bind
if e is None:
raise exc.UnboundExecutionError(
"This Compiled object is not bound to any Engine "
"or Connection.",
code="2afi",
)
return e._execute_compiled(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Execute this compiled object and return the result's
scalar value."""
return self.execute(*multiparams, **params).scalar()
class TypeCompiler(util.with_metaclass(util.EnsureKWArgType, object)):
"""Produces DDL specification for TypeEngine objects."""
ensure_kwarg = r"visit_\w+"
def __init__(self, dialect):
self.dialect = dialect
def process(self, type_, **kw):
return type_._compiler_dispatch(self, **kw)
class _CompileLabel(visitors.Visitable):
"""lightweight label object which acts as an expression.Label."""
__visit_name__ = "label"
__slots__ = "element", "name"
def __init__(self, col, name, alt_names=()):
self.element = col
self.name = name
self._alt_names = (col,) + alt_names
@property
def proxy_set(self):
return self.element.proxy_set
@property
def type(self):
return self.element.type
def self_group(self, **kw):
return self
class prefix_anon_map(dict):
"""A map that creates new keys for missing key access.
Considers keys of the form "<ident> <name>" to produce
new symbols "<name>_<index>", where "index" is an incrementing integer
corresponding to <name>.
Inlines the approach taken by :class:`sqlalchemy.util.PopulateDict` which
is otherwise usually used for this type of operation.
"""
def __missing__(self, key):
(ident, derived) = key.split(" ", 1)
anonymous_counter = self.get(derived, 1)
self[derived] = anonymous_counter + 1
value = derived + "_" + str(anonymous_counter)
self[key] = value
return value
class SQLCompiler(Compiled):
"""Default implementation of :class:`.Compiled`.
Compiles :class:`_expression.ClauseElement` objects into SQL strings.
"""
extract_map = EXTRACT_MAP
compound_keywords = COMPOUND_KEYWORDS
isdelete = isinsert = isupdate = False
"""class-level defaults which can be set at the instance
level to define if this Compiled instance represents
INSERT/UPDATE/DELETE
"""
isplaintext = False
returning = None
"""holds the "returning" collection of columns if
the statement is CRUD and defines returning columns
either implicitly or explicitly
"""
returning_precedes_values = False
"""set to True classwide to generate RETURNING
clauses before the VALUES or WHERE clause (i.e. MSSQL)
"""
render_table_with_column_in_update_from = False
"""set to True classwide to indicate the SET clause
in a multi-table UPDATE statement should qualify
columns with the table name (i.e. MySQL only)
"""
contains_expanding_parameters = False
"""True if we've encountered bindparam(..., expanding=True).
These need to be converted before execution time against the
string statement.
"""
ansi_bind_rules = False
"""SQL 92 doesn't allow bind parameters to be used
in the columns clause of a SELECT, nor does it allow
ambiguous expressions like "? = ?". A compiler
subclass can set this flag to False if the target
driver/DB enforces this
"""
_textual_ordered_columns = False
"""tell the result object that the column names as rendered are important,
but they are also "ordered" vs. what is in the compiled object here.
"""
_ordered_columns = True
"""
if False, means we can't be sure the list of entries
in _result_columns is actually the rendered order. Usually
True unless using an unordered TextAsFrom.
"""
_numeric_binds = False
"""
True if paramstyle is "numeric". This paramstyle is trickier than
all the others.
"""
insert_single_values_expr = None
"""When an INSERT is compiled with a single set of parameters inside
a VALUES expression, the string is assigned here, where it can be
used for insert batching schemes to rewrite the VALUES expression.
.. versionadded:: 1.3.8
"""
insert_prefetch = update_prefetch = ()
def __init__(
self, dialect, statement, column_keys=None, inline=False, **kwargs
):
"""Construct a new :class:`.SQLCompiler` object.
:param dialect: :class:`.Dialect` to be used
:param statement: :class:`_expression.ClauseElement` to be compiled
:param column_keys: a list of column names to be compiled into an
INSERT or UPDATE statement.
:param inline: whether to generate INSERT statements as "inline", e.g.
not formatted to return any generated defaults
:param kwargs: additional keyword arguments to be consumed by the
superclass.
"""
self.column_keys = column_keys
# compile INSERT/UPDATE defaults/sequences inlined (no pre-
# execute)
self.inline = inline or getattr(statement, "inline", False)
# a dictionary of bind parameter keys to BindParameter
# instances.
self.binds = {}
# a dictionary of BindParameter instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
# TypeEngine. ResultProxy uses this for type processing and
# column targeting
self._result_columns = []
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self.positiontup = []
self._numeric_binds = dialect.paramstyle == "numeric"
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
self.ctes = None
self.label_length = (
dialect.label_length or dialect.max_identifier_length
)
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = prefix_anon_map()
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names = {}
Compiled.__init__(self, dialect, statement, **kwargs)
if (
self.isinsert or self.isupdate or self.isdelete
) and statement._returning:
self.returning = statement._returning
if self.positional and self._numeric_binds:
self._apply_numbered_params()
@property
def prefetch(self):
return list(self.insert_prefetch + self.update_prefetch)
@util.memoized_instancemethod
def _init_cte_state(self):
"""Initialize collections related to CTEs only if
a CTE is located, to save on the overhead of
these collections otherwise.
"""
# collect CTEs to tack on top of a SELECT
self.ctes = util.OrderedDict()
self.ctes_by_name = {}
self.ctes_recursive = False
if self.positional:
self.cte_positional = {}
@contextlib.contextmanager
def _nested_result(self):
"""special API to support the use case of 'nested result sets'"""
result_columns, ordered_columns = (
self._result_columns,
self._ordered_columns,
)
self._result_columns, self._ordered_columns = [], False
try:
if self.stack:
entry = self.stack[-1]
entry["need_result_map_for_nested"] = True
else:
entry = None
yield self._result_columns, self._ordered_columns
finally:
if entry:
entry.pop("need_result_map_for_nested")
self._result_columns, self._ordered_columns = (
result_columns,
ordered_columns,
)
def _apply_numbered_params(self):
poscount = itertools.count(1)
self.string = re.sub(
r"\[_POSITION\]", lambda m: str(util.next(poscount)), self.string
)
@util.memoized_property
def _bind_processors(self):
return dict(
(key, value)
for key, value in (
(
self.bind_names[bindparam],
bindparam.type._cached_bind_processor(self.dialect),
)
for bindparam in self.bind_names
)
if value is not None
)
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(self, params=None, _group_number=None, _check=True):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam in self.bind_names:
name = self.bind_names[bindparam]
if bindparam.key in params:
pd[name] = params[bindparam.key]
elif name in params:
pd[name] = params[name]
elif _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d"
% (bindparam.key, _group_number),
code="cd3x",
)
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key,
code="cd3x",
)
elif bindparam.callable:
pd[name] = bindparam.effective_value
else:
pd[name] = bindparam.value
return pd
else:
pd = {}
for bindparam in self.bind_names:
if _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d"
% (bindparam.key, _group_number),
code="cd3x",
)
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key,
code="cd3x",
)
if bindparam.callable:
pd[self.bind_names[bindparam]] = bindparam.effective_value
else:
pd[self.bind_names[bindparam]] = bindparam.value
return pd
@property
def params(self):
"""Return the bind param dictionary embedded into this
compiled object, for those values that are present."""
return self.construct_params(_check=False)
@util.dependencies("sqlalchemy.engine.result")
def _create_result_map(self, result):
"""utility method used for unit tests only."""
return result.ResultMetaData._create_result_map(self._result_columns)
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label_reference(
self, element, within_columns_clause=False, **kwargs
):
if self.stack and self.dialect.supports_simple_order_by_label:
selectable = self.stack[-1]["selectable"]
with_cols, only_froms, only_cols = selectable._label_resolve_dict
if within_columns_clause:
resolve_dict = only_froms
else:
resolve_dict = only_cols
# this can be None in the case that a _label_reference()
# were subject to a replacement operation, in which case
# the replacement of the Label element may have changed
# to something else like a ColumnClause expression.
order_by_elem = element.element._order_by_label_element
if (
order_by_elem is not None
and order_by_elem.name in resolve_dict
and order_by_elem.shares_lineage(
resolve_dict[order_by_elem.name]
)
):
kwargs[
"render_label_as_label"
] = element.element._order_by_label_element
return self.process(
element.element,
within_columns_clause=within_columns_clause,
**kwargs
)
def visit_textual_label_reference(
self, element, within_columns_clause=False, **kwargs
):
if not self.stack:
# compiling the element outside of the context of a SELECT
return self.process(element._text_clause)
selectable = self.stack[-1]["selectable"]
with_cols, only_froms, only_cols = selectable._label_resolve_dict
try:
if within_columns_clause:
col = only_froms[element.element]
else:
col = with_cols[element.element]
except KeyError as ke:
elements._no_text_coercion(
element.element,
exc.CompileError,
"Can't resolve label reference for ORDER BY / "
"GROUP BY / DISTINCT etc.",
err=ke,
)
else:
kwargs["render_label_as_label"] = col
return self.process(
col, within_columns_clause=within_columns_clause, **kwargs
)
def visit_label(
self,
label,
add_to_result_map=None,
within_label_clause=False,
within_columns_clause=False,
render_label_as_label=None,
**kw
):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
render_label_with_as = (
within_columns_clause and not within_label_clause
)
render_label_only = render_label_as_label is label
if render_label_only or render_label_with_as:
if isinstance(label.name, elements._truncated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if render_label_with_as:
if add_to_result_map is not None:
add_to_result_map(
labelname,
label.name,
(label, labelname) + label._alt_names,
label.type,
)
return (
label.element._compiler_dispatch(
self,
within_columns_clause=True,
within_label_clause=True,
**kw
)
+ OPERATORS[operators.as_]
+ self.preparer.format_label(label, labelname)
)
elif render_label_only:
return self.preparer.format_label(label, labelname)
else:
return label.element._compiler_dispatch(
self, within_columns_clause=False, **kw
)
def _fallback_column_name(self, column):
raise exc.CompileError(
"Cannot compile Column object until " "its 'name' is assigned."
)
def visit_column(
self, column, add_to_result_map=None, include_table=True, **kwargs
):
name = orig_name = column.name
if name is None:
name = self._fallback_column_name(column)
is_literal = column.is_literal
if not is_literal and isinstance(name, elements._truncated_label):
name = self._truncated_identifier("colident", name)
if add_to_result_map is not None:
add_to_result_map(
name, orig_name, (column, name, column.key), column.type
)
if is_literal:
# note we are not currently accommodating for
# literal_column(quoted_name('ident', True)) here
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name)
table = column.table
if table is None or not include_table or not table.named_with_column:
return name
else:
effective_schema = self.preparer.schema_for_object(table)
if effective_schema:
schema_prefix = (
self.preparer.quote_schema(effective_schema) + "."
)
else:
schema_prefix = ""
tablename = table.name
if isinstance(tablename, elements._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + self.preparer.quote(tablename) + "." + name
def visit_collation(self, element, **kw):
return self.preparer.format_collation(element.collation)
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kw):
kw["type_expression"] = typeclause
return self.dialect.type_compiler.process(typeclause.type, **kw)
def post_process_text(self, text):
if self.preparer._double_percents:
text = text.replace("%", "%%")
return text
def escape_literal_column(self, text):
if self.preparer._double_percents:
text = text.replace("%", "%%")
return text
def visit_textclause(self, textclause, **kw):
def do_bindparam(m):
name = m.group(1)
if name in textclause._bindparams:
return self.process(textclause._bindparams[name], **kw)
else:
return self.bindparam_string(name, **kw)
if not self.stack:
self.isplaintext = True
# un-escape any \:params
return BIND_PARAMS_ESC.sub(
lambda m: m.group(1),
BIND_PARAMS.sub(
do_bindparam, self.post_process_text(textclause.text)
),
)
def visit_text_as_from(
self, taf, compound_index=None, asfrom=False, parens=True, **kw
):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = (
toplevel
or (
compound_index == 0
and entry.get("need_result_map_for_compound", False)
)
or entry.get("need_result_map_for_nested", False)
)
if populate_result_map:
self._ordered_columns = (
self._textual_ordered_columns
) = taf.positional
for c in taf.column_args:
self.process(
c,
within_columns_clause=True,
add_to_result_map=self._add_to_result_map,
)
text = self.process(taf.element, **kw)
if asfrom and parens:
text = "(%s)" % text
return text
def visit_null(self, expr, **kw):
return "NULL"
def visit_true(self, expr, **kw):
if self.dialect.supports_native_boolean:
return "true"
else:
return "1"
def visit_false(self, expr, **kw):
if self.dialect.supports_native_boolean:
return "false"
else:
return "0"
def visit_clauselist(self, clauselist, **kw):
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
text = sep.join(
s
for s in (
c._compiler_dispatch(self, **kw) for c in clauselist.clauses
)
if s
)
if clauselist._tuple_values and self.dialect.tuple_in_values:
text = "VALUES " + text
return text
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
x += (
"WHEN "
+ cond._compiler_dispatch(self, **kwargs)
+ " THEN "
+ result._compiler_dispatch(self, **kwargs)
+ " "
)
if clause.else_ is not None:
x += (
"ELSE " + clause.else_._compiler_dispatch(self, **kwargs) + " "
)
x += "END"
return x
def visit_type_coerce(self, type_coerce, **kw):
return type_coerce.typed_expression._compiler_dispatch(self, **kw)
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % (
cast.clause._compiler_dispatch(self, **kwargs),
cast.typeclause._compiler_dispatch(self, **kwargs),
)
def _format_frame_clause(self, range_, **kw):
return "%s AND %s" % (
"UNBOUNDED PRECEDING"
if range_[0] is elements.RANGE_UNBOUNDED
else "CURRENT ROW"
if range_[0] is elements.RANGE_CURRENT
else "%s PRECEDING"
% (self.process(elements.literal(abs(range_[0])), **kw),)
if range_[0] < 0
else "%s FOLLOWING"
% (self.process(elements.literal(range_[0]), **kw),),
"UNBOUNDED FOLLOWING"
if range_[1] is elements.RANGE_UNBOUNDED
else "CURRENT ROW"
if range_[1] is elements.RANGE_CURRENT
else "%s PRECEDING"
% (self.process(elements.literal(abs(range_[1])), **kw),)
if range_[1] < 0
else "%s FOLLOWING"
% (self.process(elements.literal(range_[1]), **kw),),
)
def visit_over(self, over, **kwargs):
if over.range_:
range_ = "RANGE BETWEEN %s" % self._format_frame_clause(
over.range_, **kwargs
)
elif over.rows:
range_ = "ROWS BETWEEN %s" % self._format_frame_clause(
over.rows, **kwargs
)
else:
range_ = None
return "%s OVER (%s)" % (
over.element._compiler_dispatch(self, **kwargs),
" ".join(
[
"%s BY %s"
% (word, clause._compiler_dispatch(self, **kwargs))
for word, clause in (
("PARTITION", over.partition_by),
("ORDER", over.order_by),
)
if clause is not None and len(clause)
]
+ ([range_] if range_ else [])
),
)
def visit_withingroup(self, withingroup, **kwargs):
return "%s WITHIN GROUP (ORDER BY %s)" % (
withingroup.element._compiler_dispatch(self, **kwargs),
withingroup.order_by._compiler_dispatch(self, **kwargs),
)
def visit_funcfilter(self, funcfilter, **kwargs):
return "%s FILTER (WHERE %s)" % (
funcfilter.func._compiler_dispatch(self, **kwargs),
funcfilter.criterion._compiler_dispatch(self, **kwargs),
)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (
field,
extract.expr._compiler_dispatch(self, **kwargs),
)
def visit_function(self, func, add_to_result_map=None, **kwargs):
if add_to_result_map is not None:
add_to_result_map(func.name, func.name, (), func.type)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
if disp:
return disp(func, **kwargs)
else:
name = FUNCTIONS.get(func.__class__, None)
if name:
if func._has_args:
name += "%(expr)s"
else:
name = func.name
name = (
self.preparer.quote(name)
if self.preparer._requires_quotes_illegal_chars(name)
or isinstance(name, elements.quoted_name)
else name
)
name = name + "%(expr)s"
return ".".join(
[
(
self.preparer.quote(tok)
if self.preparer._requires_quotes_illegal_chars(tok)
or isinstance(name, elements.quoted_name)
else tok
)
for tok in func.packagenames
]
+ [name]
) % {"expr": self.function_argspec(func, **kwargs)}
def visit_next_value_func(self, next_value, **kw):
return self.visit_sequence(next_value.sequence)
def visit_sequence(self, sequence, **kw):
raise NotImplementedError(
"Dialect '%s' does not support sequence increments."
% self.dialect.name
)
def function_argspec(self, func, **kwargs):
return func.clause_expr._compiler_dispatch(self, **kwargs)
def visit_compound_select(
self, cs, asfrom=False, parens=True, compound_index=0, **kwargs
):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
need_result_map = toplevel or (
compound_index == 0
and entry.get("need_result_map_for_compound", False)
)
self.stack.append(
{
"correlate_froms": entry["correlate_froms"],
"asfrom_froms": entry["asfrom_froms"],
"selectable": cs,
"need_result_map_for_compound": need_result_map,
}
)
keyword = self.compound_keywords.get(cs.keyword)
text = (" " + keyword + " ").join(
(
c._compiler_dispatch(
self,
asfrom=asfrom,
parens=False,
compound_index=i,
**kwargs
)
for i, c in enumerate(cs.selects)
)
)
text += self.group_by_clause(cs, **dict(asfrom=asfrom, **kwargs))
text += self.order_by_clause(cs, **kwargs)
text += (
(cs._limit_clause is not None or cs._offset_clause is not None)
and self.limit_clause(cs, **kwargs)
or ""
)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def _get_operator_dispatch(self, operator_, qualifier1, qualifier2):
attrname = "visit_%s_%s%s" % (
operator_.__name__,
qualifier1,
"_" + qualifier2 if qualifier2 else "",
)
return getattr(self, attrname, None)
def visit_unary(self, unary, **kw):
if unary.operator:
if unary.modifier:
raise exc.CompileError(
"Unary expression does not support operator "
"and modifier simultaneously"
)
disp = self._get_operator_dispatch(
unary.operator, "unary", "operator"
)
if disp:
return disp(unary, unary.operator, **kw)
else:
return self._generate_generic_unary_operator(
unary, OPERATORS[unary.operator], **kw
)
elif unary.modifier:
disp = self._get_operator_dispatch(
unary.modifier, "unary", "modifier"
)
if disp:
return disp(unary, unary.modifier, **kw)
else:
return self._generate_generic_unary_modifier(
unary, OPERATORS[unary.modifier], **kw
)
else:
raise exc.CompileError(
"Unary expression has no operator or modifier"
)
def visit_istrue_unary_operator(self, element, operator, **kw):
if (
element._is_implicitly_boolean
or self.dialect.supports_native_boolean
):
return self.process(element.element, **kw)
else:
return "%s = 1" % self.process(element.element, **kw)
def visit_isfalse_unary_operator(self, element, operator, **kw):
if (
element._is_implicitly_boolean
or self.dialect.supports_native_boolean
):
return "NOT %s" % self.process(element.element, **kw)
else:
return "%s = 0" % self.process(element.element, **kw)
def visit_notmatch_op_binary(self, binary, operator, **kw):
return "NOT %s" % self.visit_binary(
binary, override_operator=operators.match_op
)
def _emit_empty_in_warning(self):
util.warn(
"The IN-predicate was invoked with an "
"empty sequence. This results in a "
"contradiction, which nonetheless can be "
"expensive to evaluate. Consider alternative "
"strategies for improved performance."
)
def visit_empty_in_op_binary(self, binary, operator, **kw):
if self.dialect._use_static_in:
return "1 != 1"
else:
if self.dialect._warn_on_empty_in:
self._emit_empty_in_warning()
return self.process(binary.left != binary.left)
def visit_empty_notin_op_binary(self, binary, operator, **kw):
if self.dialect._use_static_in:
return "1 = 1"
else:
if self.dialect._warn_on_empty_in:
self._emit_empty_in_warning()
return self.process(binary.left == binary.left)
def visit_empty_set_expr(self, element_types):
raise NotImplementedError(
"Dialect '%s' does not support empty set expression."
% self.dialect.name
)
def visit_binary(
self, binary, override_operator=None, eager_grouping=False, **kw
):
# don't allow "? = ?" to render
if (
self.ansi_bind_rules
and isinstance(binary.left, elements.BindParameter)
and isinstance(binary.right, elements.BindParameter)
):
kw["literal_binds"] = True
operator_ = override_operator or binary.operator
disp = self._get_operator_dispatch(operator_, "binary", None)
if disp:
return disp(binary, operator_, **kw)
else:
try:
opstring = OPERATORS[operator_]
except KeyError as err:
util.raise_(
exc.UnsupportedCompilationError(self, operator_),
replace_context=err,
)
else:
return self._generate_generic_binary(binary, opstring, **kw)
def visit_function_as_comparison_op_binary(self, element, operator, **kw):
return self.process(element.sql_function, **kw)
def visit_mod_binary(self, binary, operator, **kw):
if self.preparer._double_percents:
return (
self.process(binary.left, **kw)
+ " %% "
+ self.process(binary.right, **kw)
)
else:
return (
self.process(binary.left, **kw)
+ " % "
+ self.process(binary.right, **kw)
)
def visit_custom_op_binary(self, element, operator, **kw):
kw["eager_grouping"] = operator.eager_grouping
return self._generate_generic_binary(
element, " " + operator.opstring + " ", **kw
)
def visit_custom_op_unary_operator(self, element, operator, **kw):
return self._generate_generic_unary_operator(
element, operator.opstring + " ", **kw
)
def visit_custom_op_unary_modifier(self, element, operator, **kw):
return self._generate_generic_unary_modifier(
element, " " + operator.opstring, **kw
)
def _generate_generic_binary(
self, binary, opstring, eager_grouping=False, **kw
):
_in_binary = kw.get("_in_binary", False)
kw["_in_binary"] = True
text = (
binary.left._compiler_dispatch(
self, eager_grouping=eager_grouping, **kw
)
+ opstring
+ binary.right._compiler_dispatch(
self, eager_grouping=eager_grouping, **kw
)
)
if _in_binary and eager_grouping:
text = "(%s)" % text
return text
def _generate_generic_unary_operator(self, unary, opstring, **kw):
return opstring + unary.element._compiler_dispatch(self, **kw)
def _generate_generic_unary_modifier(self, unary, opstring, **kw):
return unary.element._compiler_dispatch(self, **kw) + opstring
@util.memoized_property
def _like_percent_literal(self):
return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE)
def visit_contains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notcontains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_startswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notstartswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(binary.right)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_endswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notendswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_like_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
# TODO: use ternary here, not "and"/ "or"
return "%s LIKE %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_notlike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "%s NOT LIKE %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "lower(%s) LIKE lower(%s)" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "lower(%s) NOT LIKE lower(%s)" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_between_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " BETWEEN SYMMETRIC " if symmetric else " BETWEEN ", **kw
)
def visit_notbetween_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary,
" NOT BETWEEN SYMMETRIC " if symmetric else " NOT BETWEEN ",
**kw
)
def visit_bindparam(
self,
bindparam,
within_columns_clause=False,
literal_binds=False,
skip_bind_expression=False,
**kwargs
):
if not skip_bind_expression:
impl = bindparam.type.dialect_impl(self.dialect)
if impl._has_bind_expression:
bind_expression = impl.bind_expression(bindparam)
return self.process(
bind_expression,
skip_bind_expression=True,
within_columns_clause=within_columns_clause,
literal_binds=literal_binds,
**kwargs
)
if literal_binds or (within_columns_clause and self.ansi_bind_rules):
if bindparam.value is None and bindparam.callable is None:
raise exc.CompileError(
"Bind parameter '%s' without a "
"renderable value not allowed here." % bindparam.key
)
return self.render_literal_bindparam(
bindparam, within_columns_clause=True, **kwargs
)
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if (
existing.unique or bindparam.unique
) and not existing.proxy_set.intersection(bindparam.proxy_set):
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name"
% bindparam.key
)
elif existing._is_crud or bindparam._is_crud:
raise exc.CompileError(
"bindparam() name '%s' is reserved "
"for automatic usage in the VALUES or SET "
"clause of this "
"insert/update statement. Please use a "
"name other than column name when using bindparam() "
"with insert() or update() (for example, 'b_%s')."
% (bindparam.key, bindparam.key)
)
self.binds[bindparam.key] = self.binds[name] = bindparam
return self.bindparam_string(
name, expanding=bindparam.expanding, **kwargs
)
def render_literal_bindparam(self, bindparam, **kw):
value = bindparam.effective_value
return self.render_literal_value(value, bindparam.type)
def render_literal_value(self, value, type_):
"""Render the value of a bind parameter as a quoted literal.
This is used for statement sections that do not accept bind parameters
on the target driver/database.
This should be implemented by subclasses using the quoting services
of the DBAPI.
"""
processor = type_._cached_literal_processor(self.dialect)
if processor:
return processor(value)
else:
raise NotImplementedError(
"Don't know how to literal-quote value %r" % value
)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
if isinstance(bind_name, elements._truncated_label):
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name.apply_map(self.anon_map)
if len(anonname) > self.label_length - 6:
counter = self.truncated_names.get(ident_class, 1)
truncname = (
anonname[0 : max(self.label_length - 6, 0)]
+ "_"
+ hex(counter)[2:]
)
self.truncated_names[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name):
return name % self.anon_map
def bindparam_string(
self, name, positional_names=None, expanding=False, **kw
):
if self.positional:
if positional_names is not None:
positional_names.append(name)
else:
self.positiontup.append(name)
if expanding:
self.contains_expanding_parameters = True
return "([EXPANDING_%s])" % name
else:
return self.bindtemplate % {"name": name}
def visit_cte(
self,
cte,
asfrom=False,
ashint=False,
fromhints=None,
visiting_cte=None,
**kwargs
):
self._init_cte_state()
kwargs["visiting_cte"] = cte
if isinstance(cte.name, elements._truncated_label):
cte_name = self._truncated_identifier("alias", cte.name)
else:
cte_name = cte.name
is_new_cte = True
embedded_in_current_named_cte = False
if cte_name in self.ctes_by_name:
existing_cte = self.ctes_by_name[cte_name]
embedded_in_current_named_cte = visiting_cte is existing_cte
# we've generated a same-named CTE that we are enclosed in,
# or this is the same CTE. just return the name.
if cte in existing_cte._restates or cte is existing_cte:
is_new_cte = False
elif existing_cte in cte._restates:
# we've generated a same-named CTE that is
# enclosed in us - we take precedence, so
# discard the text for the "inner".
del self.ctes[existing_cte]
else:
raise exc.CompileError(
"Multiple, unrelated CTEs found with "
"the same name: %r" % cte_name
)
if asfrom or is_new_cte:
if cte._cte_alias is not None:
pre_alias_cte = cte._cte_alias
cte_pre_alias_name = cte._cte_alias.name
if isinstance(cte_pre_alias_name, elements._truncated_label):
cte_pre_alias_name = self._truncated_identifier(
"alias", cte_pre_alias_name
)
else:
pre_alias_cte = cte
cte_pre_alias_name = None
if is_new_cte:
self.ctes_by_name[cte_name] = cte
# look for embedded DML ctes and propagate autocommit
if (
"autocommit" in cte.element._execution_options
and "autocommit" not in self.execution_options
):
self.execution_options = self.execution_options.union(
{
"autocommit": cte.element._execution_options[
"autocommit"
]
}
)
if pre_alias_cte not in self.ctes:
self.visit_cte(pre_alias_cte, **kwargs)
if not cte_pre_alias_name and cte not in self.ctes:
if cte.recursive:
self.ctes_recursive = True
text = self.preparer.format_alias(cte, cte_name)
if cte.recursive:
if isinstance(cte.original, selectable.Select):
col_source = cte.original
elif isinstance(cte.original, selectable.CompoundSelect):
col_source = cte.original.selects[0]
else:
assert False
recur_cols = [
c
for c in util.unique_list(col_source.inner_columns)
if c is not None
]
text += "(%s)" % (
", ".join(
self.preparer.format_column(ident)
for ident in recur_cols
)
)
if self.positional:
kwargs["positional_names"] = self.cte_positional[cte] = []
text += " AS %s\n%s" % (
self._generate_prefixes(cte, cte._prefixes, **kwargs),
cte.original._compiler_dispatch(
self, asfrom=True, **kwargs
),
)
if cte._suffixes:
text += " " + self._generate_prefixes(
cte, cte._suffixes, **kwargs
)
self.ctes[cte] = text
if asfrom:
if not is_new_cte and embedded_in_current_named_cte:
return self.preparer.format_alias(cte, cte_name)
if cte_pre_alias_name:
text = self.preparer.format_alias(cte, cte_pre_alias_name)
if self.preparer._requires_quotes(cte_name):
cte_name = self.preparer.quote(cte_name)
text += self.get_render_as_alias_suffix(cte_name)
return text
else:
return self.preparer.format_alias(cte, cte_name)
def visit_alias(
self,
alias,
asfrom=False,
ashint=False,
iscrud=False,
fromhints=None,
**kwargs
):
if asfrom or ashint:
if isinstance(alias.name, elements._truncated_label):
alias_name = self._truncated_identifier("alias", alias.name)
else:
alias_name = alias.name
if ashint:
return self.preparer.format_alias(alias, alias_name)
elif asfrom:
ret = alias.original._compiler_dispatch(
self, asfrom=True, **kwargs
) + self.get_render_as_alias_suffix(
self.preparer.format_alias(alias, alias_name)
)
if fromhints and alias in fromhints:
ret = self.format_from_hint_text(
ret, alias, fromhints[alias], iscrud
)
return ret
else:
return alias.original._compiler_dispatch(self, **kwargs)
def visit_lateral(self, lateral, **kw):
kw["lateral"] = True
return "LATERAL %s" % self.visit_alias(lateral, **kw)
def visit_tablesample(self, tablesample, asfrom=False, **kw):
text = "%s TABLESAMPLE %s" % (
self.visit_alias(tablesample, asfrom=True, **kw),
tablesample._get_method()._compiler_dispatch(self, **kw),
)
if tablesample.seed is not None:
text += " REPEATABLE (%s)" % (
tablesample.seed._compiler_dispatch(self, **kw)
)
return text
def get_render_as_alias_suffix(self, alias_name_text):
return " AS " + alias_name_text
def _add_to_result_map(self, keyname, name, objects, type_):
self._result_columns.append((keyname, name, objects, type_))
def _label_select_column(
self,
select,
column,
populate_result_map,
asfrom,
column_clause_args,
name=None,
within_columns_clause=True,
need_column_expressions=False,
):
"""produce labeled columns present in a select()."""
impl = column.type.dialect_impl(self.dialect)
if impl._has_column_expression and (
need_column_expressions or populate_result_map
):
col_expr = impl.column_expression(column)
if populate_result_map:
def add_to_result_map(keyname, name, objects, type_):
self._add_to_result_map(
keyname, name, (column,) + objects, type_
)
else:
add_to_result_map = None
else:
col_expr = column
if populate_result_map:
add_to_result_map = self._add_to_result_map
else:
add_to_result_map = None
if not within_columns_clause:
result_expr = col_expr
elif isinstance(column, elements.Label):
if col_expr is not column:
result_expr = _CompileLabel(
col_expr, column.name, alt_names=(column.element,)
)
else:
result_expr = col_expr
elif select is not None and name:
result_expr = _CompileLabel(
col_expr, name, alt_names=(column._key_label,)
)
elif (
asfrom
and isinstance(column, elements.ColumnClause)
and not column.is_literal
and column.table is not None
and not isinstance(column.table, selectable.Select)
):
result_expr = _CompileLabel(
col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,),
)
elif (
not isinstance(column, elements.TextClause)
and (
not isinstance(column, elements.UnaryExpression)
or column.wraps_column_expression
)
and (
not hasattr(column, "name")
or isinstance(column, functions.Function)
)
):
result_expr = _CompileLabel(col_expr, column.anon_label)
elif col_expr is not column:
# TODO: are we sure "column" has a .name and .key here ?
# assert isinstance(column, elements.ColumnClause)
result_expr = _CompileLabel(
col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,),
)
else:
result_expr = col_expr
column_clause_args.update(
within_columns_clause=within_columns_clause,
add_to_result_map=add_to_result_map,
)
return result_expr._compiler_dispatch(self, **column_clause_args)
def format_from_hint_text(self, sqltext, table, hint, iscrud):
hinttext = self.get_from_hint_text(table, hint)
if hinttext:
sqltext += " " + hinttext
return sqltext
def get_select_hint_text(self, byfroms):
return None
def get_from_hint_text(self, table, text):
return None
def get_crud_hint_text(self, table, text):
return None
def get_statement_hint_text(self, hint_texts):
return " ".join(hint_texts)
def _transform_select_for_nested_joins(self, select):
"""Rewrite any "a JOIN (b JOIN c)" expression as
"a JOIN (select * from b JOIN c) AS anon", to support
databases that can't parse a parenthesized join correctly
(i.e. sqlite < 3.7.16).
"""
cloned = {}
column_translate = [{}]
def visit(element, **kw):
if element in column_translate[-1]:
return column_translate[-1][element]
elif element in cloned:
return cloned[element]
newelem = cloned[element] = element._clone()
if (
newelem.is_selectable
and newelem._is_join
and isinstance(newelem.right, selectable.FromGrouping)
):
newelem._reset_exported()
newelem.left = visit(newelem.left, **kw)
right = visit(newelem.right, **kw)
selectable_ = selectable.Select(
[right.element], use_labels=True
).alias()
for c in selectable_.c:
c._key_label = c.key
c._label = c.name
translate_dict = dict(
zip(newelem.right.element.c, selectable_.c)
)
# translating from both the old and the new
# because different select() structures will lead us
# to traverse differently
translate_dict[right.element.left] = selectable_
translate_dict[right.element.right] = selectable_
translate_dict[newelem.right.element.left] = selectable_
translate_dict[newelem.right.element.right] = selectable_
# propagate translations that we've gained
# from nested visit(newelem.right) outwards
# to the enclosing select here. this happens
# only when we have more than one level of right
# join nesting, i.e. "a JOIN (b JOIN (c JOIN d))"
for k, v in list(column_translate[-1].items()):
if v in translate_dict:
# remarkably, no current ORM tests (May 2013)
# hit this condition, only test_join_rewriting
# does.
column_translate[-1][k] = translate_dict[v]
column_translate[-1].update(translate_dict)
newelem.right = selectable_
newelem.onclause = visit(newelem.onclause, **kw)
elif newelem._is_from_container:
# if we hit an Alias, CompoundSelect or ScalarSelect, put a
# marker in the stack.
kw["transform_clue"] = "select_container"
newelem._copy_internals(clone=visit, **kw)
elif newelem.is_selectable and newelem._is_select:
barrier_select = (
kw.get("transform_clue", None) == "select_container"
)
# if we're still descended from an
# Alias/CompoundSelect/ScalarSelect, we're
# in a FROM clause, so start with a new translate collection
if barrier_select:
column_translate.append({})
kw["transform_clue"] = "inside_select"
newelem._copy_internals(clone=visit, **kw)
if barrier_select:
del column_translate[-1]
else:
newelem._copy_internals(clone=visit, **kw)
return newelem
return visit(select)
def _transform_result_map_for_nested_joins(
self, select, transformed_select
):
inner_col = dict(
(c._key_label, c) for c in transformed_select.inner_columns
)
d = dict((inner_col[c._key_label], c) for c in select.inner_columns)
self._result_columns = [
(key, name, tuple([d.get(col, col) for col in objs]), typ)
for key, name, objs, typ in self._result_columns
]
_default_stack_entry = util.immutabledict(
[("correlate_froms", frozenset()), ("asfrom_froms", frozenset())]
)
def _display_froms_for_select(self, select, asfrom, lateral=False):
# utility method to help external dialects
# get the correct from list for a select.
# specifically the oracle dialect needs this feature
# right now.
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
correlate_froms = entry["correlate_froms"]
asfrom_froms = entry["asfrom_froms"]
if asfrom and not lateral:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms
),
implicit_correlate_froms=(),
)
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms,
)
return froms
def visit_select(
self,
select,
asfrom=False,
parens=True,
fromhints=None,
compound_index=0,
nested_join_translation=False,
select_wraps_for=None,
lateral=False,
**kwargs
):
needs_nested_translation = (
select.use_labels
and not nested_join_translation
and not self.stack
and not self.dialect.supports_right_nested_joins
)
if needs_nested_translation:
transformed_select = self._transform_select_for_nested_joins(
select
)
text = self.visit_select(
transformed_select,
asfrom=asfrom,
parens=parens,
fromhints=fromhints,
compound_index=compound_index,
nested_join_translation=True,
**kwargs
)
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = need_column_expressions = (
toplevel
or entry.get("need_result_map_for_compound", False)
or entry.get("need_result_map_for_nested", False)
)
if compound_index > 0:
populate_result_map = False
# this was first proposed as part of #3372; however, it is not
# reached in current tests and could possibly be an assertion
# instead.
if not populate_result_map and "add_to_result_map" in kwargs:
del kwargs["add_to_result_map"]
if needs_nested_translation:
if populate_result_map:
self._transform_result_map_for_nested_joins(
select, transformed_select
)
return text
froms = self._setup_select_stack(select, entry, asfrom, lateral)
column_clause_args = kwargs.copy()
column_clause_args.update(
{"within_label_clause": False, "within_columns_clause": False}
)
text = "SELECT " # we're off to a good start !
if select._hints:
hint_text, byfrom = self._setup_select_hints(select)
if hint_text:
text += hint_text + " "
else:
byfrom = None
if select._prefixes:
text += self._generate_prefixes(select, select._prefixes, **kwargs)
text += self.get_select_precolumns(select, **kwargs)
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c
for c in [
self._label_select_column(
select,
column,
populate_result_map,
asfrom,
column_clause_args,
name=name,
need_column_expressions=need_column_expressions,
)
for name, column in select._columns_plus_names
]
if c is not None
]
if populate_result_map and select_wraps_for is not None:
# if this select is a compiler-generated wrapper,
# rewrite the targeted columns in the result map
translate = dict(
zip(
[name for (key, name) in select._columns_plus_names],
[
name
for (key, name) in select_wraps_for._columns_plus_names
],
)
)
self._result_columns = [
(key, name, tuple(translate.get(o, o) for o in obj), type_)
for key, name, obj, type_ in self._result_columns
]
text = self._compose_select_body(
text, select, inner_columns, froms, byfrom, kwargs
)
if select._statement_hints:
per_dialect = [
ht
for (dialect_name, ht) in select._statement_hints
if dialect_name in ("*", self.dialect.name)
]
if per_dialect:
text += " " + self.get_statement_hint_text(per_dialect)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
if select._suffixes:
text += " " + self._generate_prefixes(
select, select._suffixes, **kwargs
)
self.stack.pop(-1)
if (asfrom or lateral) and parens:
return "(" + text + ")"
else:
return text
def _setup_select_hints(self, select):
byfrom = dict(
[
(
from_,
hinttext
% {"name": from_._compiler_dispatch(self, ashint=True)},
)
for (from_, dialect), hinttext in select._hints.items()
if dialect in ("*", self.dialect.name)
]
)
hint_text = self.get_select_hint_text(byfrom)
return hint_text, byfrom
def _setup_select_stack(self, select, entry, asfrom, lateral):
correlate_froms = entry["correlate_froms"]
asfrom_froms = entry["asfrom_froms"]
if asfrom and not lateral:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms
),
implicit_correlate_froms=(),
)
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms,
)
new_correlate_froms = set(selectable._from_objects(*froms))
all_correlate_froms = new_correlate_froms.union(correlate_froms)
new_entry = {
"asfrom_froms": new_correlate_froms,
"correlate_froms": all_correlate_froms,
"selectable": select,
}
self.stack.append(new_entry)
return froms
def _compose_select_body(
self, text, select, inner_columns, froms, byfrom, kwargs
):
text += ", ".join(inner_columns)
if froms:
text += " \nFROM "
if select._hints:
text += ", ".join(
[
f._compiler_dispatch(
self, asfrom=True, fromhints=byfrom, **kwargs
)
for f in froms
]
)
else:
text += ", ".join(
[
f._compiler_dispatch(self, asfrom=True, **kwargs)
for f in froms
]
)
else:
text += self.default_from()
if select._whereclause is not None:
t = select._whereclause._compiler_dispatch(self, **kwargs)
if t:
text += " \nWHERE " + t
if select._group_by_clause.clauses:
text += self.group_by_clause(select, **kwargs)
if select._having is not None:
t = select._having._compiler_dispatch(self, **kwargs)
if t:
text += " \nHAVING " + t
if select._order_by_clause.clauses:
text += self.order_by_clause(select, **kwargs)
if (
select._limit_clause is not None
or select._offset_clause is not None
):
text += self.limit_clause(select, **kwargs)
if select._for_update_arg is not None:
text += self.for_update_clause(select, **kwargs)
return text
def _generate_prefixes(self, stmt, prefixes, **kw):
clause = " ".join(
prefix._compiler_dispatch(self, **kw)
for prefix, dialect_name in prefixes
if dialect_name is None or dialect_name == self.dialect.name
)
if clause:
clause += " "
return clause
def _render_cte_clause(self):
if self.positional:
self.positiontup = (
sum([self.cte_positional[cte] for cte in self.ctes], [])
+ self.positiontup
)
cte_text = self.get_cte_preamble(self.ctes_recursive) + " "
cte_text += ", \n".join([txt for txt in self.ctes.values()])
cte_text += "\n "
return cte_text
def get_cte_preamble(self, recursive):
if recursive:
return "WITH RECURSIVE"
else:
return "WITH"
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list.
"""
return select._distinct and "DISTINCT " or ""
def group_by_clause(self, select, **kw):
"""allow dialects to customize how GROUP BY is rendered."""
group_by = select._group_by_clause._compiler_dispatch(self, **kw)
if group_by:
return " GROUP BY " + group_by
else:
return ""
def order_by_clause(self, select, **kw):
"""allow dialects to customize how ORDER BY is rendered."""
order_by = select._order_by_clause._compiler_dispatch(self, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select, **kw):
return " FOR UPDATE"
def returning_clause(self, stmt, returning_cols):
raise exc.CompileError(
"RETURNING is not supported by this "
"dialect's statement compiler."
)
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT -1"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def visit_table(
self,
table,
asfrom=False,
iscrud=False,
ashint=False,
fromhints=None,
use_schema=True,
**kwargs
):
if asfrom or ashint:
effective_schema = self.preparer.schema_for_object(table)
if use_schema and effective_schema:
ret = (
self.preparer.quote_schema(effective_schema)
+ "."
+ self.preparer.quote(table.name)
)
else:
ret = self.preparer.quote(table.name)
if fromhints and table in fromhints:
ret = self.format_from_hint_text(
ret, table, fromhints[table], iscrud
)
return ret
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
if join.full:
join_type = " FULL OUTER JOIN "
elif join.isouter:
join_type = " LEFT OUTER JOIN "
else:
join_type = " JOIN "
return (
join.left._compiler_dispatch(self, asfrom=True, **kwargs)
+ join_type
+ join.right._compiler_dispatch(self, asfrom=True, **kwargs)
+ " ON "
+ join.onclause._compiler_dispatch(self, **kwargs)
)
def _setup_crud_hints(self, stmt, table_text):
dialect_hints = dict(
[
(table, hint_text)
for (table, dialect), hint_text in stmt._hints.items()
if dialect in ("*", self.dialect.name)
]
)
if stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text, stmt.table, dialect_hints[stmt.table], True
)
return dialect_hints, table_text
def visit_insert(self, insert_stmt, asfrom=False, **kw):
toplevel = not self.stack
self.stack.append(
{
"correlate_froms": set(),
"asfrom_froms": set(),
"selectable": insert_stmt,
}
)
crud_params = crud._setup_crud_params(
self, insert_stmt, crud.ISINSERT, **kw
)
if (
not crud_params
and not self.dialect.supports_default_values
and not self.dialect.supports_empty_insert
):
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support empty "
"inserts." % self.dialect.name
)
if insert_stmt._has_multi_parameters:
if not self.dialect.supports_multivalues_insert:
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support "
"in-place multirow inserts." % self.dialect.name
)
crud_params_single = crud_params[0]
else:
crud_params_single = crud_params
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT "
if insert_stmt._prefixes:
text += self._generate_prefixes(
insert_stmt, insert_stmt._prefixes, **kw
)
text += "INTO "
table_text = preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
_, table_text = self._setup_crud_hints(insert_stmt, table_text)
text += table_text
if crud_params_single or not supports_default_values:
text += " (%s)" % ", ".join(
[preparer.format_column(c[0]) for c in crud_params_single]
)
if self.returning or insert_stmt._returning:
returning_clause = self.returning_clause(
insert_stmt, self.returning or insert_stmt._returning
)
if self.returning_precedes_values:
text += " " + returning_clause
else:
returning_clause = None
if insert_stmt.select is not None:
select_text = self.process(self._insert_from_select, **kw)
if self.ctes and toplevel and self.dialect.cte_follows_insert:
text += " %s%s" % (self._render_cte_clause(), select_text)
else:
text += " %s" % select_text
elif not crud_params and supports_default_values:
text += " DEFAULT VALUES"
elif insert_stmt._has_multi_parameters:
text += " VALUES %s" % (
", ".join(
"(%s)" % (", ".join(c[1] for c in crud_param_set))
for crud_param_set in crud_params
)
)
else:
insert_single_values_expr = ", ".join([c[1] for c in crud_params])
text += " VALUES (%s)" % insert_single_values_expr
if toplevel:
self.insert_single_values_expr = insert_single_values_expr
if insert_stmt._post_values_clause is not None:
post_values_clause = self.process(
insert_stmt._post_values_clause, **kw
)
if post_values_clause:
text += " " + post_values_clause
if returning_clause and not self.returning_precedes_values:
text += " " + returning_clause
if self.ctes and toplevel and not self.dialect.cte_follows_insert:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom:
return "(" + text + ")"
else:
return text
def update_limit_clause(self, update_stmt):
"""Provide a hook for MySQL to add LIMIT to the UPDATE"""
return None
def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
"""Provide a hook to override the initial table clause
in an UPDATE statement.
MySQL overrides this.
"""
kw["asfrom"] = True
return from_table._compiler_dispatch(self, iscrud=True, **kw)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
"""Provide a hook to override the generation of an
UPDATE..FROM clause.
MySQL and MSSQL override this.
"""
raise NotImplementedError(
"This backend does not support multiple-table "
"criteria within UPDATE"
)
def visit_update(self, update_stmt, asfrom=False, **kw):
toplevel = not self.stack
extra_froms = update_stmt._extra_froms
is_multitable = bool(extra_froms)
if is_multitable:
# main table might be a JOIN
main_froms = set(selectable._from_objects(update_stmt.table))
render_extra_froms = [
f for f in extra_froms if f not in main_froms
]
correlate_froms = main_froms.union(extra_froms)
else:
render_extra_froms = []
correlate_froms = {update_stmt.table}
self.stack.append(
{
"correlate_froms": correlate_froms,
"asfrom_froms": correlate_froms,
"selectable": update_stmt,
}
)
text = "UPDATE "
if update_stmt._prefixes:
text += self._generate_prefixes(
update_stmt, update_stmt._prefixes, **kw
)
table_text = self.update_tables_clause(
update_stmt, update_stmt.table, render_extra_froms, **kw
)
crud_params = crud._setup_crud_params(
self, update_stmt, crud.ISUPDATE, **kw
)
if update_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
update_stmt, table_text
)
else:
dialect_hints = None
text += table_text
text += " SET "
include_table = (
is_multitable and self.render_table_with_column_in_update_from
)
text += ", ".join(
c[0]._compiler_dispatch(self, include_table=include_table)
+ "="
+ c[1]
for c in crud_params
)
if self.returning or update_stmt._returning:
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning or update_stmt._returning
)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
render_extra_froms,
dialect_hints,
**kw
)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._whereclause is not None:
t = self.process(update_stmt._whereclause, **kw)
if t:
text += " WHERE " + t
limit_clause = self.update_limit_clause(update_stmt)
if limit_clause:
text += " " + limit_clause
if (
self.returning or update_stmt._returning
) and not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning or update_stmt._returning
)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom:
return "(" + text + ")"
else:
return text
@util.memoized_property
def _key_getters_for_crud_column(self):
return crud._key_getters_for_crud_column(self, self.statement)
def delete_extra_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
"""Provide a hook to override the generation of an
DELETE..FROM clause.
This can be used to implement DELETE..USING for example.
MySQL and MSSQL override this.
"""
raise NotImplementedError(
"This backend does not support multiple-table "
"criteria within DELETE"
)
def delete_table_clause(self, delete_stmt, from_table, extra_froms):
return from_table._compiler_dispatch(self, asfrom=True, iscrud=True)
def visit_delete(self, delete_stmt, asfrom=False, **kw):
toplevel = not self.stack
crud._setup_crud_params(self, delete_stmt, crud.ISDELETE, **kw)
extra_froms = delete_stmt._extra_froms
correlate_froms = {delete_stmt.table}.union(extra_froms)
self.stack.append(
{
"correlate_froms": correlate_froms,
"asfrom_froms": correlate_froms,
"selectable": delete_stmt,
}
)
text = "DELETE "
if delete_stmt._prefixes:
text += self._generate_prefixes(
delete_stmt, delete_stmt._prefixes, **kw
)
text += "FROM "
table_text = self.delete_table_clause(
delete_stmt, delete_stmt.table, extra_froms
)
if delete_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
delete_stmt, table_text
)
else:
dialect_hints = None
text += table_text
if delete_stmt._returning:
if self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning
)
if extra_froms:
extra_from_text = self.delete_extra_from_clause(
delete_stmt,
delete_stmt.table,
extra_froms,
dialect_hints,
**kw
)
if extra_from_text:
text += " " + extra_from_text
if delete_stmt._whereclause is not None:
t = delete_stmt._whereclause._compiler_dispatch(self, **kw)
if t:
text += " WHERE " + t
if delete_stmt._returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning
)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom:
return "(" + text + ")"
else:
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % self.preparer.format_savepoint(
savepoint_stmt
)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % self.preparer.format_savepoint(
savepoint_stmt
)
class StrSQLCompiler(SQLCompiler):
"""A :class:`.SQLCompiler` subclass which allows a small selection
of non-standard SQL features to render into a string value.
The :class:`.StrSQLCompiler` is invoked whenever a Core expression
element is directly stringified without calling upon the
:meth:`_expression.ClauseElement.compile` method.
It can render a limited set
of non-standard SQL constructs to assist in basic stringification,
however for more substantial custom or dialect-specific SQL constructs,
it will be necessary to make use of
:meth:`_expression.ClauseElement.compile`
directly.
.. seealso::
:ref:`faq_sql_expression_string`
"""
def _fallback_column_name(self, column):
return "<name unknown>"
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_json_getitem_op_binary(self, binary, operator, **kw):
return self.visit_getitem_binary(binary, operator, **kw)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
return self.visit_getitem_binary(binary, operator, **kw)
def visit_sequence(self, seq, **kw):
return "<next sequence value: %s>" % self.preparer.format_sequence(seq)
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in elements._select_iterables(returning_cols)
]
return "RETURNING " + ", ".join(columns)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
return "FROM " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in extra_froms
)
def delete_extra_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
return ", " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in extra_froms
)
def get_from_hint_text(self, table, text):
return "[%s]" % text
class DDLCompiler(Compiled):
@util.memoized_property
def sql_compiler(self):
return self.dialect.statement_compiler(self.dialect, None)
@util.memoized_property
def type_compiler(self):
return self.dialect.type_compiler
def construct_params(self, params=None):
return None
def visit_ddl(self, ddl, **kwargs):
# table events can substitute table and schema name
context = ddl.context
if isinstance(ddl.target, schema.Table):
context = context.copy()
preparer = self.preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ""
else:
table, sch = path[-1], path[0]
context.setdefault("table", table)
context.setdefault("schema", sch)
context.setdefault("fullname", preparer.format_table(ddl.target))
return self.sql_compiler.post_process_text(ddl.statement % context)
def visit_create_schema(self, create):
schema = self.preparer.format_schema(create.element)
return "CREATE SCHEMA " + schema
def visit_drop_schema(self, drop):
schema = self.preparer.format_schema(drop.element)
text = "DROP SCHEMA " + schema
if drop.cascade:
text += " CASCADE"
return text
def visit_create_table(self, create):
table = create.element
preparer = self.preparer
text = "\nCREATE "
if table._prefixes:
text += " ".join(table._prefixes) + " "
text += "TABLE " + preparer.format_table(table) + " "
create_table_suffix = self.create_table_suffix(table)
if create_table_suffix:
text += create_table_suffix + " "
text += "("
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for create_column in create.columns:
column = create_column.element
try:
processed = self.process(
create_column, first_pk=column.primary_key and not first_pk
)
if processed is not None:
text += separator
separator = ", \n"
text += "\t" + processed
if column.primary_key:
first_pk = True
except exc.CompileError as ce:
util.raise_(
exc.CompileError(
util.u("(in table '%s', column '%s'): %s")
% (table.description, column.name, ce.args[0])
),
from_=ce,
)
const = self.create_table_constraints(
table,
_include_foreign_key_constraints=create.include_foreign_key_constraints, # noqa
)
if const:
text += separator + "\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
def visit_create_column(self, create, first_pk=False):
column = create.element
if column.system:
return None
text = self.get_column_specification(column, first_pk=first_pk)
const = " ".join(
self.process(constraint) for constraint in column.constraints
)
if const:
text += " " + const
return text
def create_table_constraints(
self, table, _include_foreign_key_constraints=None
):
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
constraints = []
if table.primary_key:
constraints.append(table.primary_key)
all_fkcs = table.foreign_key_constraints
if _include_foreign_key_constraints is not None:
omit_fkcs = all_fkcs.difference(_include_foreign_key_constraints)
else:
omit_fkcs = set()
constraints.extend(
[
c
for c in table._sorted_constraints
if c is not table.primary_key and c not in omit_fkcs
]
)
return ", \n\t".join(
p
for p in (
self.process(constraint)
for constraint in constraints
if (
constraint._create_rule is None
or constraint._create_rule(self)
)
and (
not self.dialect.supports_alter
or not getattr(constraint, "use_alter", False)
)
)
if p is not None
)
def visit_drop_table(self, drop):
return "\nDROP TABLE " + self.preparer.format_table(drop.element)
def visit_drop_view(self, drop):
return "\nDROP VIEW " + self.preparer.format_table(drop.element)
def _verify_index_table(self, index):
if index.table is None:
raise exc.CompileError(
"Index '%s' is not associated " "with any table." % index.name
)
def visit_create_index(
self, create, include_schema=False, include_table_schema=True
):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
if index.name is None:
raise exc.CompileError(
"CREATE INDEX requires that the index have a name"
)
text += "INDEX %s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=include_schema),
preparer.format_table(
index.table, use_schema=include_table_schema
),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
return text
def visit_drop_index(self, drop):
index = drop.element
if index.name is None:
raise exc.CompileError(
"DROP INDEX requires that the index have a name"
)
return "\nDROP INDEX " + self._prepared_index_name(
index, include_schema=True
)
def _prepared_index_name(self, index, include_schema=False):
if index.table is not None:
effective_schema = self.preparer.schema_for_object(index.table)
else:
effective_schema = None
if include_schema and effective_schema:
schema_name = self.preparer.quote_schema(effective_schema)
else:
schema_name = None
index_name = self.preparer.format_index(index)
if schema_name:
index_name = schema_name + "." + index_name
return index_name
def visit_add_constraint(self, create):
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element),
)
def visit_set_table_comment(self, create):
return "COMMENT ON TABLE %s IS %s" % (
self.preparer.format_table(create.element),
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.String()
),
)
def visit_drop_table_comment(self, drop):
return "COMMENT ON TABLE %s IS NULL" % self.preparer.format_table(
drop.element
)
def visit_set_column_comment(self, create):
return "COMMENT ON COLUMN %s IS %s" % (
self.preparer.format_column(
create.element, use_table=True, use_schema=True
),
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.String()
),
)
def visit_drop_column_comment(self, drop):
return "COMMENT ON COLUMN %s IS NULL" % self.preparer.format_column(
drop.element, use_table=True
)
def visit_create_sequence(self, create):
text = "CREATE SEQUENCE %s" % self.preparer.format_sequence(
create.element
)
if create.element.increment is not None:
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
if create.element.minvalue is not None:
text += " MINVALUE %d" % create.element.minvalue
if create.element.maxvalue is not None:
text += " MAXVALUE %d" % create.element.maxvalue
if create.element.nominvalue is not None:
text += " NO MINVALUE"
if create.element.nomaxvalue is not None:
text += " NO MAXVALUE"
if create.element.cache is not None:
text += " CACHE %d" % create.element.cache
if create.element.order is True:
text += " ORDER"
if create.element.cycle is not None:
text += " CYCLE"
return text
def visit_drop_sequence(self, drop):
return "DROP SEQUENCE %s" % self.preparer.format_sequence(drop.element)
def visit_drop_constraint(self, drop):
constraint = drop.element
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
else:
formatted_name = None
if formatted_name is None:
raise exc.CompileError(
"Can't emit DROP CONSTRAINT for constraint %r; "
"it has no name" % drop.element
)
return "ALTER TABLE %s DROP CONSTRAINT %s%s" % (
self.preparer.format_table(drop.element.table),
formatted_name,
drop.cascade and " CASCADE" or "",
)
def get_column_specification(self, column, **kwargs):
colspec = (
self.preparer.format_column(column)
+ " "
+ self.dialect.type_compiler.process(
column.type, type_expression=column
)
)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.computed is not None:
colspec += " " + self.process(column.computed)
if not column.nullable:
colspec += " NOT NULL"
return colspec
def create_table_suffix(self, table):
return ""
def post_create_table(self, table):
return ""
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.server_default.arg, util.string_types):
return self.sql_compiler.render_literal_value(
column.server_default.arg, sqltypes.STRINGTYPE
)
else:
return self.sql_compiler.process(
column.server_default.arg, literal_binds=True
)
else:
return None
def visit_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(
constraint.sqltext, include_table=False, literal_binds=True
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_column_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(
constraint.sqltext, include_table=False, literal_binds=True
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "PRIMARY KEY "
text += "(%s)" % ", ".join(
self.preparer.quote(c.name)
for c in (
constraint.columns_autoinc_first
if constraint._implicit_generated
else constraint.columns
)
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.preparer
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
remote_table = list(constraint.elements)[0].column.table
text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % (
", ".join(
preparer.quote(f.parent.name) for f in constraint.elements
),
self.define_constraint_remote_table(
constraint, remote_table, preparer
),
", ".join(
preparer.quote(f.column.name) for f in constraint.elements
),
)
text += self.define_constraint_match(constraint)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table)
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE (%s)" % (
", ".join(self.preparer.quote(c.name) for c in constraint)
)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % self.preparer.validate_sql_phrase(
constraint.ondelete, FK_ON_DELETE
)
if constraint.onupdate is not None:
text += " ON UPDATE %s" % self.preparer.validate_sql_phrase(
constraint.onupdate, FK_ON_UPDATE
)
return text
def define_constraint_deferrability(self, constraint):
text = ""
if constraint.deferrable is not None:
if constraint.deferrable:
text += " DEFERRABLE"
else:
text += " NOT DEFERRABLE"
if constraint.initially is not None:
text += " INITIALLY %s" % self.preparer.validate_sql_phrase(
constraint.initially, FK_INITIALLY
)
return text
def define_constraint_match(self, constraint):
text = ""
if constraint.match is not None:
text += " MATCH %s" % constraint.match
return text
def visit_computed_column(self, generated):
text = "GENERATED ALWAYS AS (%s)" % self.sql_compiler.process(
generated.sqltext, include_table=False, literal_binds=True
)
if generated.persisted is True:
text += " STORED"
elif generated.persisted is False:
text += " VIRTUAL"
return text
class GenericTypeCompiler(TypeCompiler):
def visit_FLOAT(self, type_, **kw):
return "FLOAT"
def visit_REAL(self, type_, **kw):
return "REAL"
def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % {"precision": type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % {
"precision": type_.precision,
"scale": type_.scale,
}
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return "DECIMAL"
elif type_.scale is None:
return "DECIMAL(%(precision)s)" % {"precision": type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % {
"precision": type_.precision,
"scale": type_.scale,
}
def visit_INTEGER(self, type_, **kw):
return "INTEGER"
def visit_SMALLINT(self, type_, **kw):
return "SMALLINT"
def visit_BIGINT(self, type_, **kw):
return "BIGINT"
def visit_TIMESTAMP(self, type_, **kw):
return "TIMESTAMP"
def visit_DATETIME(self, type_, **kw):
return "DATETIME"
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
return "TIME"
def visit_CLOB(self, type_, **kw):
return "CLOB"
def visit_NCLOB(self, type_, **kw):
return "NCLOB"
def _render_string_type(self, type_, name):
text = name
if type_.length:
text += "(%d)" % type_.length
if type_.collation:
text += ' COLLATE "%s"' % type_.collation
return text
def visit_CHAR(self, type_, **kw):
return self._render_string_type(type_, "CHAR")
def visit_NCHAR(self, type_, **kw):
return self._render_string_type(type_, "NCHAR")
def visit_VARCHAR(self, type_, **kw):
return self._render_string_type(type_, "VARCHAR")
def visit_NVARCHAR(self, type_, **kw):
return self._render_string_type(type_, "NVARCHAR")
def visit_TEXT(self, type_, **kw):
return self._render_string_type(type_, "TEXT")
def visit_BLOB(self, type_, **kw):
return "BLOB"
def visit_BINARY(self, type_, **kw):
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_, **kw):
return "BOOLEAN"
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_, **kw)
def visit_boolean(self, type_, **kw):
return self.visit_BOOLEAN(type_, **kw)
def visit_time(self, type_, **kw):
return self.visit_TIME(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_DATETIME(type_, **kw)
def visit_date(self, type_, **kw):
return self.visit_DATE(type_, **kw)
def visit_big_integer(self, type_, **kw):
return self.visit_BIGINT(type_, **kw)
def visit_small_integer(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_integer(self, type_, **kw):
return self.visit_INTEGER(type_, **kw)
def visit_real(self, type_, **kw):
return self.visit_REAL(type_, **kw)
def visit_float(self, type_, **kw):
return self.visit_FLOAT(type_, **kw)
def visit_numeric(self, type_, **kw):
return self.visit_NUMERIC(type_, **kw)
def visit_string(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_unicode(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_enum(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_null(self, type_, **kw):
raise exc.CompileError(
"Can't generate DDL for %r; "
"did you forget to specify a "
"type on this Column?" % type_
)
def visit_type_decorator(self, type_, **kw):
return self.process(type_.type_engine(self.dialect), **kw)
def visit_user_defined(self, type_, **kw):
return type_.get_col_spec(**kw)
class StrSQLTypeCompiler(GenericTypeCompiler):
def __getattr__(self, key):
if key.startswith("visit_"):
return self._visit_unknown
else:
raise AttributeError(key)
def _visit_unknown(self, type_, **kw):
return "%s" % type_.__class__.__name__
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
schema_for_object = schema._schema_getter(None)
def __init__(
self,
dialect,
initial_quote='"',
final_quote=None,
escape_quote='"',
quote_case_sensitive_collations=True,
omit_schema=False,
):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to
`initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.escape_quote = escape_quote
self.escape_to_quote = self.escape_quote * 2
self.omit_schema = omit_schema
self.quote_case_sensitive_collations = quote_case_sensitive_collations
self._strings = {}
self._double_percents = self.dialect.paramstyle in (
"format",
"pyformat",
)
def _with_schema_translate(self, schema_translate_map):
prep = self.__class__.__new__(self.__class__)
prep.__dict__.update(self.__dict__)
prep.schema_for_object = schema._schema_getter(schema_translate_map)
return prep
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
value = value.replace(self.escape_quote, self.escape_to_quote)
if self._double_percents:
value = value.replace("%", "%%")
return value
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace(self.escape_to_quote, self.escape_quote)
def validate_sql_phrase(self, element, reg):
"""keyword sequence filter.
a filter for elements that are intended to represent keyword sequences,
such as "INITIALLY", "INITIALLY DEFERRED", etc. no special characters
should be present.
.. versionadded:: 1.3
"""
if element is not None and not reg.match(element):
raise exc.CompileError(
"Unexpected SQL phrase: %r (matching against %r)"
% (element, reg.pattern)
)
return element
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return (
self.initial_quote
+ self._escape_identifier(value)
+ self.final_quote
)
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (
lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(util.text_type(value))
or (lc_value != value)
)
def _requires_quotes_illegal_chars(self, value):
"""Return True if the given identifier requires quoting, but
not taking case convention into account."""
return not self.legal_characters.match(util.text_type(value))
def quote_schema(self, schema, force=None):
"""Conditionally quote a schema name.
The name is quoted if it is a reserved word, contains quote-necessary
characters, or is an instance of :class:`.quoted_name` which includes
``quote`` set to ``True``.
Subclasses can override this to provide database-dependent
quoting behavior for schema names.
:param schema: string schema name
:param force: unused
.. deprecated:: 0.9
The :paramref:`.IdentifierPreparer.quote_schema.force`
parameter is deprecated and will be removed in a future
release. This flag has no effect on the behavior of the
:meth:`.IdentifierPreparer.quote` method; please refer to
:class:`.quoted_name`.
"""
if force is not None:
# not using the util.deprecated_params() decorator in this
# case because of the additional function call overhead on this
# very performance-critical spot.
util.warn_deprecated(
"The IdentifierPreparer.quote_schema.force parameter is "
"deprecated and will be removed in a future release. This "
"flag has no effect on the behavior of the "
"IdentifierPreparer.quote method; please refer to "
"quoted_name()."
)
return self.quote(schema)
def quote(self, ident, force=None):
"""Conditionally quote an identfier.
The identifier is quoted if it is a reserved word, contains
quote-necessary characters, or is an instance of
:class:`.quoted_name` which includes ``quote`` set to ``True``.
Subclasses can override this to provide database-dependent
quoting behavior for identifier names.
:param ident: string identifier
:param force: unused
.. deprecated:: 0.9
The :paramref:`.IdentifierPreparer.quote.force`
parameter is deprecated and will be removed in a future
release. This flag has no effect on the behavior of the
:meth:`.IdentifierPreparer.quote` method; please refer to
:class:`.quoted_name`.
"""
if force is not None:
# not using the util.deprecated_params() decorator in this
# case because of the additional function call overhead on this
# very performance-critical spot.
util.warn_deprecated(
"The IdentifierPreparer.quote.force parameter is "
"deprecated and will be removed in a future release. This "
"flag has no effect on the behavior of the "
"IdentifierPreparer.quote method; please refer to "
"quoted_name()."
)
force = getattr(ident, "quote", None)
if force is None:
if ident in self._strings:
return self._strings[ident]
else:
if self._requires_quotes(ident):
self._strings[ident] = self.quote_identifier(ident)
else:
self._strings[ident] = ident
return self._strings[ident]
elif force:
return self.quote_identifier(ident)
else:
return ident
def format_collation(self, collation_name):
if self.quote_case_sensitive_collations:
return self.quote(collation_name)
else:
return collation_name
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name)
effective_schema = self.schema_for_object(sequence)
if (
not self.omit_schema
and use_schema
and effective_schema is not None
):
name = self.quote_schema(effective_schema) + "." + name
return name
def format_label(self, label, name=None):
return self.quote(name or label.name)
def format_alias(self, alias, name=None):
return self.quote(name or alias.name)
def format_savepoint(self, savepoint, name=None):
# Running the savepoint name through quoting is unnecessary
# for all known dialects. This is here to support potential
# third party use cases
ident = name or savepoint.ident
if self._requires_quotes(ident):
ident = self.quote_identifier(ident)
return ident
@util.dependencies("sqlalchemy.sql.naming")
def format_constraint(self, naming, constraint):
if isinstance(constraint.name, elements._defer_name):
name = naming._constraint_name_for_table(
constraint, constraint.table
)
if name is None:
if isinstance(constraint.name, elements._defer_none_name):
return None
else:
name = constraint.name
else:
name = constraint.name
if isinstance(name, elements._truncated_label):
if constraint.__visit_name__ == "index":
max_ = (
self.dialect.max_index_name_length
or self.dialect.max_identifier_length
)
else:
max_ = self.dialect.max_identifier_length
if len(name) > max_:
name = name[0 : max_ - 8] + "_" + util.md5_hex(name)[-4:]
else:
self.dialect.validate_identifier(name)
return self.quote(name)
def format_index(self, index):
return self.format_constraint(index)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(name)
effective_schema = self.schema_for_object(table)
if not self.omit_schema and use_schema and effective_schema:
result = self.quote_schema(effective_schema) + "." + result
return result
def format_schema(self, name):
"""Prepare a quoted schema name."""
return self.quote(name)
def format_column(
self,
column,
use_table=False,
name=None,
table_name=None,
use_schema=False,
):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if not getattr(column, "is_literal", False):
if use_table:
return (
self.format_table(
column.table, use_schema=use_schema, name=table_name
)
+ "."
+ self.quote(name)
)
else:
return self.quote(name)
else:
# literal textual elements get stuck into ColumnClause a lot,
# which shouldn't get quoted
if use_table:
return (
self.format_table(
column.table, use_schema=use_schema, name=table_name
)
+ "."
+ name
)
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
effective_schema = self.schema_for_object(table)
if not self.omit_schema and use_schema and effective_schema:
return (
self.quote_schema(effective_schema),
self.format_table(table, use_schema=False),
)
else:
return (self.format_table(table, use_schema=False),)
@util.memoized_property
def _r_identifiers(self):
initial, final, escaped_final = [
re.escape(s)
for s in (
self.initial_quote,
self.final_quote,
self._escape_identifier(self.final_quote),
)
]
r = re.compile(
r"(?:"
r"(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s"
r"|([^\.]+))(?=\.|$))+"
% {"initial": initial, "final": final, "escaped": escaped_final}
)
return r
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
r = self._r_identifiers
return [
self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]
]
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/naming.py
|
# sqlalchemy/naming.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Establish constraint and index naming conventions.
"""
import re
from .elements import _defer_name
from .elements import _defer_none_name
from .elements import conv
from .schema import CheckConstraint
from .schema import Column
from .schema import Constraint
from .schema import ForeignKeyConstraint
from .schema import Index
from .schema import PrimaryKeyConstraint
from .schema import Table
from .schema import UniqueConstraint
from .. import event
from .. import events # noqa
from .. import exc
class ConventionDict(object):
def __init__(self, const, table, convention):
self.const = const
self._is_fk = isinstance(const, ForeignKeyConstraint)
self.table = table
self.convention = convention
self._const_name = const.name
def _key_table_name(self):
return self.table.name
def _column_X(self, idx):
if self._is_fk:
fk = self.const.elements[idx]
return fk.parent
else:
return list(self.const.columns)[idx]
def _key_constraint_name(self):
if isinstance(self._const_name, (type(None), _defer_none_name)):
raise exc.InvalidRequestError(
"Naming convention including "
"%(constraint_name)s token requires that "
"constraint is explicitly named."
)
if not isinstance(self._const_name, conv):
self.const.name = None
return self._const_name
def _key_column_X_key(self, idx):
# note this method was missing before
# [ticket:3989], meaning tokens like ``%(column_0_key)s`` weren't
# working even though documented.
return self._column_X(idx).key
def _key_column_X_name(self, idx):
return self._column_X(idx).name
def _key_column_X_label(self, idx):
return self._column_X(idx)._ddl_label
def _key_referred_table_name(self):
fk = self.const.elements[0]
refs = fk.target_fullname.split(".")
if len(refs) == 3:
refschema, reftable, refcol = refs
else:
reftable, refcol = refs
return reftable
def _key_referred_column_X_name(self, idx):
fk = self.const.elements[idx]
# note that before [ticket:3989], this method was returning
# the specification for the :class:`.ForeignKey` itself, which normally
# would be using the ``.key`` of the column, not the name.
return fk.column.name
def __getitem__(self, key):
if key in self.convention:
return self.convention[key](self.const, self.table)
elif hasattr(self, "_key_%s" % key):
return getattr(self, "_key_%s" % key)()
else:
col_template = re.match(r".*_?column_(\d+)(_?N)?_.+", key)
if col_template:
idx = col_template.group(1)
multiples = col_template.group(2)
if multiples:
if self._is_fk:
elems = self.const.elements
else:
elems = list(self.const.columns)
tokens = []
for idx, elem in enumerate(elems):
attr = "_key_" + key.replace("0" + multiples, "X")
try:
tokens.append(getattr(self, attr)(idx))
except AttributeError:
raise KeyError(key)
sep = "_" if multiples.startswith("_") else ""
return sep.join(tokens)
else:
attr = "_key_" + key.replace(idx, "X")
idx = int(idx)
if hasattr(self, attr):
return getattr(self, attr)(idx)
raise KeyError(key)
_prefix_dict = {
Index: "ix",
PrimaryKeyConstraint: "pk",
CheckConstraint: "ck",
UniqueConstraint: "uq",
ForeignKeyConstraint: "fk",
}
def _get_convention(dict_, key):
for super_ in key.__mro__:
if super_ in _prefix_dict and _prefix_dict[super_] in dict_:
return dict_[_prefix_dict[super_]]
elif super_ in dict_:
return dict_[super_]
else:
return None
def _constraint_name_for_table(const, table):
metadata = table.metadata
convention = _get_convention(metadata.naming_convention, type(const))
if isinstance(const.name, conv):
return const.name
elif (
convention is not None
and not isinstance(const.name, conv)
and (
const.name is None
or "constraint_name" in convention
or isinstance(const.name, _defer_name)
)
):
return conv(
convention
% ConventionDict(const, table, metadata.naming_convention)
)
elif isinstance(convention, _defer_none_name):
return None
@event.listens_for(Constraint, "after_parent_attach")
@event.listens_for(Index, "after_parent_attach")
def _constraint_name(const, table):
if isinstance(table, Column):
# for column-attached constraint, set another event
# to link the column attached to the table as this constraint
# associated with the table.
event.listen(
table,
"after_parent_attach",
lambda col, table: _constraint_name(const, table),
)
elif isinstance(table, Table):
if isinstance(const.name, (conv, _defer_name)):
return
newname = _constraint_name_for_table(const, table)
if newname is not None:
const.name = newname
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/util.py
|
# sql/util.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""High level utilities which build upon other modules here.
"""
from collections import deque
from itertools import chain
from . import operators
from . import visitors
from .annotation import _deep_annotate # noqa
from .annotation import _deep_deannotate # noqa
from .annotation import _shallow_annotate # noqa
from .base import _from_objects
from .base import ColumnSet
from .ddl import sort_tables # noqa
from .elements import _expand_cloned
from .elements import _find_columns # noqa
from .elements import _label_reference
from .elements import _textual_label_reference
from .elements import BindParameter
from .elements import ColumnClause
from .elements import ColumnElement
from .elements import Null
from .elements import UnaryExpression
from .schema import Column
from .selectable import Alias
from .selectable import FromClause
from .selectable import FromGrouping
from .selectable import Join
from .selectable import ScalarSelect
from .selectable import SelectBase
from .selectable import TableClause
from .. import exc
from .. import util
join_condition = util.langhelpers.public_factory(
Join._join_condition, ".sql.util.join_condition"
)
def find_join_source(clauses, join_to):
"""Given a list of FROM clauses and a selectable,
return the first index and element from the list of
clauses which can be joined against the selectable. returns
None, None if no match is found.
e.g.::
clause1 = table1.join(table2)
clause2 = table4.join(table5)
join_to = table2.join(table3)
find_join_source([clause1, clause2], join_to) == clause1
"""
selectables = list(_from_objects(join_to))
idx = []
for i, f in enumerate(clauses):
for s in selectables:
if f.is_derived_from(s):
idx.append(i)
return idx
def find_left_clause_that_matches_given(clauses, join_from):
"""Given a list of FROM clauses and a selectable,
return the indexes from the list of
clauses which is derived from the selectable.
"""
selectables = list(_from_objects(join_from))
liberal_idx = []
for i, f in enumerate(clauses):
for s in selectables:
# basic check, if f is derived from s.
# this can be joins containing a table, or an aliased table
# or select statement matching to a table. This check
# will match a table to a selectable that is adapted from
# that table. With Query, this suits the case where a join
# is being made to an adapted entity
if f.is_derived_from(s):
liberal_idx.append(i)
break
# in an extremely small set of use cases, a join is being made where
# there are multiple FROM clauses where our target table is represented
# in more than one, such as embedded or similar. in this case, do
# another pass where we try to get a more exact match where we aren't
# looking at adaption relationships.
if len(liberal_idx) > 1:
conservative_idx = []
for idx in liberal_idx:
f = clauses[idx]
for s in selectables:
if set(surface_selectables(f)).intersection(
surface_selectables(s)
):
conservative_idx.append(idx)
break
if conservative_idx:
return conservative_idx
return liberal_idx
def find_left_clause_to_join_from(clauses, join_to, onclause):
"""Given a list of FROM clauses, a selectable,
and optional ON clause, return a list of integer indexes from the
clauses list indicating the clauses that can be joined from.
The presence of an "onclause" indicates that at least one clause can
definitely be joined from; if the list of clauses is of length one
and the onclause is given, returns that index. If the list of clauses
is more than length one, and the onclause is given, attempts to locate
which clauses contain the same columns.
"""
idx = []
selectables = set(_from_objects(join_to))
# if we are given more than one target clause to join
# from, use the onclause to provide a more specific answer.
# otherwise, don't try to limit, after all, "ON TRUE" is a valid
# on clause
if len(clauses) > 1 and onclause is not None:
resolve_ambiguity = True
cols_in_onclause = _find_columns(onclause)
else:
resolve_ambiguity = False
cols_in_onclause = None
for i, f in enumerate(clauses):
for s in selectables.difference([f]):
if resolve_ambiguity:
if set(f.c).union(s.c).issuperset(cols_in_onclause):
idx.append(i)
break
elif Join._can_join(f, s) or onclause is not None:
idx.append(i)
break
if len(idx) > 1:
# this is the same "hide froms" logic from
# Selectable._get_display_froms
toremove = set(
chain(*[_expand_cloned(f._hide_froms) for f in clauses])
)
idx = [i for i in idx if clauses[i] not in toremove]
# onclause was given and none of them resolved, so assume
# all indexes can match
if not idx and onclause is not None:
return range(len(clauses))
else:
return idx
def visit_binary_product(fn, expr):
"""Produce a traversal of the given expression, delivering
column comparisons to the given function.
The function is of the form::
def my_fn(binary, left, right)
For each binary expression located which has a
comparison operator, the product of "left" and
"right" will be delivered to that function,
in terms of that binary.
Hence an expression like::
and_(
(a + b) == q + func.sum(e + f),
j == r
)
would have the traversal::
a <eq> q
a <eq> e
a <eq> f
b <eq> q
b <eq> e
b <eq> f
j <eq> r
That is, every combination of "left" and
"right" that doesn't further contain
a binary comparison is passed as pairs.
"""
stack = []
def visit(element):
if isinstance(element, ScalarSelect):
# we don't want to dig into correlated subqueries,
# those are just column elements by themselves
yield element
elif element.__visit_name__ == "binary" and operators.is_comparison(
element.operator
):
stack.insert(0, element)
for l in visit(element.left):
for r in visit(element.right):
fn(stack[0], l, r)
stack.pop(0)
for elem in element.get_children():
visit(elem)
else:
if isinstance(element, ColumnClause):
yield element
for elem in element.get_children():
for e in visit(elem):
yield e
list(visit(expr))
visit = None # remove gc cycles
def find_tables(
clause,
check_columns=False,
include_aliases=False,
include_joins=False,
include_selects=False,
include_crud=False,
):
"""locate Table objects within the given expression."""
tables = []
_visitors = {}
if include_selects:
_visitors["select"] = _visitors["compound_select"] = tables.append
if include_joins:
_visitors["join"] = tables.append
if include_aliases:
_visitors["alias"] = tables.append
if include_crud:
_visitors["insert"] = _visitors["update"] = _visitors[
"delete"
] = lambda ent: tables.append(ent.table)
if check_columns:
def visit_column(column):
tables.append(column.table)
_visitors["column"] = visit_column
_visitors["table"] = tables.append
visitors.traverse(clause, {"column_collections": False}, _visitors)
return tables
def unwrap_order_by(clause):
"""Break up an 'order by' expression into individual column-expressions,
without DESC/ASC/NULLS FIRST/NULLS LAST"""
cols = util.column_set()
result = []
stack = deque([clause])
while stack:
t = stack.popleft()
if isinstance(t, ColumnElement) and (
not isinstance(t, UnaryExpression)
or not operators.is_ordering_modifier(t.modifier)
):
if isinstance(t, _label_reference):
t = t.element
if isinstance(t, (_textual_label_reference)):
continue
if t not in cols:
cols.add(t)
result.append(t)
else:
for c in t.get_children():
stack.append(c)
return result
def unwrap_label_reference(element):
def replace(elem):
if isinstance(elem, (_label_reference, _textual_label_reference)):
return elem.element
return visitors.replacement_traverse(element, {}, replace)
def expand_column_list_from_order_by(collist, order_by):
"""Given the columns clause and ORDER BY of a selectable,
return a list of column expressions that can be added to the collist
corresponding to the ORDER BY, without repeating those already
in the collist.
"""
cols_already_present = set(
[
col.element if col._order_by_label_element is not None else col
for col in collist
]
)
return [
col
for col in chain(*[unwrap_order_by(o) for o in order_by])
if col not in cols_already_present
]
def clause_is_present(clause, search):
"""Given a target clause and a second to search within, return True
if the target is plainly present in the search without any
subqueries or aliases involved.
Basically descends through Joins.
"""
for elem in surface_selectables(search):
if clause == elem: # use == here so that Annotated's compare
return True
else:
return False
def surface_selectables(clause):
stack = [clause]
while stack:
elem = stack.pop()
yield elem
if isinstance(elem, Join):
stack.extend((elem.left, elem.right))
elif isinstance(elem, FromGrouping):
stack.append(elem.element)
def surface_selectables_only(clause):
stack = [clause]
while stack:
elem = stack.pop()
if isinstance(elem, (TableClause, Alias)):
yield elem
if isinstance(elem, Join):
stack.extend((elem.left, elem.right))
elif isinstance(elem, FromGrouping):
stack.append(elem.element)
elif isinstance(elem, ColumnClause):
stack.append(elem.table)
def surface_column_elements(clause, include_scalar_selects=True):
"""traverse and yield only outer-exposed column elements, such as would
be addressable in the WHERE clause of a SELECT if this element were
in the columns clause."""
filter_ = (FromGrouping,)
if not include_scalar_selects:
filter_ += (SelectBase,)
stack = deque([clause])
while stack:
elem = stack.popleft()
yield elem
for sub in elem.get_children():
if isinstance(sub, filter_):
continue
stack.append(sub)
def selectables_overlap(left, right):
"""Return True if left/right have some overlapping selectable"""
return bool(
set(surface_selectables(left)).intersection(surface_selectables(right))
)
def bind_values(clause):
"""Return an ordered list of "bound" values in the given clause.
E.g.::
>>> expr = and_(
... table.c.foo==5, table.c.foo==7
... )
>>> bind_values(expr)
[5, 7]
"""
v = []
def visit_bindparam(bind):
v.append(bind.effective_value)
visitors.traverse(clause, {}, {"bindparam": visit_bindparam})
return v
def _quote_ddl_expr(element):
if isinstance(element, util.string_types):
element = element.replace("'", "''")
return "'%s'" % element
else:
return repr(element)
class _repr_base(object):
_LIST = 0
_TUPLE = 1
_DICT = 2
__slots__ = ("max_chars",)
def trunc(self, value):
rep = repr(value)
lenrep = len(rep)
if lenrep > self.max_chars:
segment_length = self.max_chars // 2
rep = (
rep[0:segment_length]
+ (
" ... (%d characters truncated) ... "
% (lenrep - self.max_chars)
)
+ rep[-segment_length:]
)
return rep
class _repr_row(_repr_base):
"""Provide a string view of a row."""
__slots__ = ("row",)
def __init__(self, row, max_chars=300):
self.row = row
self.max_chars = max_chars
def __repr__(self):
trunc = self.trunc
return "(%s%s)" % (
", ".join(trunc(value) for value in self.row),
"," if len(self.row) == 1 else "",
)
class _repr_params(_repr_base):
"""Provide a string view of bound parameters.
Truncates display to a given numnber of 'multi' parameter sets,
as well as long values to a given number of characters.
"""
__slots__ = "params", "batches", "ismulti"
def __init__(self, params, batches, max_chars=300, ismulti=None):
self.params = params
self.ismulti = ismulti
self.batches = batches
self.max_chars = max_chars
def __repr__(self):
if self.ismulti is None:
return self.trunc(self.params)
if isinstance(self.params, list):
typ = self._LIST
elif isinstance(self.params, tuple):
typ = self._TUPLE
elif isinstance(self.params, dict):
typ = self._DICT
else:
return self.trunc(self.params)
if self.ismulti and len(self.params) > self.batches:
msg = " ... displaying %i of %i total bound parameter sets ... "
return " ".join(
(
self._repr_multi(self.params[: self.batches - 2], typ)[
0:-1
],
msg % (self.batches, len(self.params)),
self._repr_multi(self.params[-2:], typ)[1:],
)
)
elif self.ismulti:
return self._repr_multi(self.params, typ)
else:
return self._repr_params(self.params, typ)
def _repr_multi(self, multi_params, typ):
if multi_params:
if isinstance(multi_params[0], list):
elem_type = self._LIST
elif isinstance(multi_params[0], tuple):
elem_type = self._TUPLE
elif isinstance(multi_params[0], dict):
elem_type = self._DICT
else:
assert False, "Unknown parameter type %s" % (
type(multi_params[0])
)
elements = ", ".join(
self._repr_params(params, elem_type) for params in multi_params
)
else:
elements = ""
if typ == self._LIST:
return "[%s]" % elements
else:
return "(%s)" % elements
def _repr_params(self, params, typ):
trunc = self.trunc
if typ is self._DICT:
return "{%s}" % (
", ".join(
"%r: %s" % (key, trunc(value))
for key, value in params.items()
)
)
elif typ is self._TUPLE:
return "(%s%s)" % (
", ".join(trunc(value) for value in params),
"," if len(params) == 1 else "",
)
else:
return "[%s]" % (", ".join(trunc(value) for value in params))
def adapt_criterion_to_null(crit, nulls):
"""given criterion containing bind params, convert selected elements
to IS NULL.
"""
def visit_binary(binary):
if (
isinstance(binary.left, BindParameter)
and binary.left._identifying_key in nulls
):
# reverse order if the NULL is on the left side
binary.left = binary.right
binary.right = Null()
binary.operator = operators.is_
binary.negate = operators.isnot
elif (
isinstance(binary.right, BindParameter)
and binary.right._identifying_key in nulls
):
binary.right = Null()
binary.operator = operators.is_
binary.negate = operators.isnot
return visitors.cloned_traverse(crit, {}, {"binary": visit_binary})
def splice_joins(left, right, stop_on=None):
if left is None:
return right
stack = [(right, None)]
adapter = ClauseAdapter(left)
ret = None
while stack:
(right, prevright) = stack.pop()
if isinstance(right, Join) and right is not stop_on:
right = right._clone()
right._reset_exported()
right.onclause = adapter.traverse(right.onclause)
stack.append((right.left, right))
else:
right = adapter.traverse(right)
if prevright is not None:
prevright.left = right
if ret is None:
ret = right
return ret
def reduce_columns(columns, *clauses, **kw):
r"""given a list of columns, return a 'reduced' set based on natural
equivalents.
the set is reduced to the smallest list of columns which have no natural
equivalent present in the list. A "natural equivalent" means that two
columns will ultimately represent the same value because they are related
by a foreign key.
\*clauses is an optional list of join clauses which will be traversed
to further identify columns that are "equivalent".
\**kw may specify 'ignore_nonexistent_tables' to ignore foreign keys
whose tables are not yet configured, or columns that aren't yet present.
This function is primarily used to determine the most minimal "primary
key" from a selectable, by reducing the set of primary key columns present
in the selectable to just those that are not repeated.
"""
ignore_nonexistent_tables = kw.pop("ignore_nonexistent_tables", False)
only_synonyms = kw.pop("only_synonyms", False)
columns = util.ordered_column_set(columns)
omit = util.column_set()
for col in columns:
for fk in chain(*[c.foreign_keys for c in col.proxy_set]):
for c in columns:
if c is col:
continue
try:
fk_col = fk.column
except exc.NoReferencedColumnError:
# TODO: add specific coverage here
# to test/sql/test_selectable ReduceTest
if ignore_nonexistent_tables:
continue
else:
raise
except exc.NoReferencedTableError:
# TODO: add specific coverage here
# to test/sql/test_selectable ReduceTest
if ignore_nonexistent_tables:
continue
else:
raise
if fk_col.shares_lineage(c) and (
not only_synonyms or c.name == col.name
):
omit.add(col)
break
if clauses:
def visit_binary(binary):
if binary.operator == operators.eq:
cols = util.column_set(
chain(*[c.proxy_set for c in columns.difference(omit)])
)
if binary.left in cols and binary.right in cols:
for c in reversed(columns):
if c.shares_lineage(binary.right) and (
not only_synonyms or c.name == binary.left.name
):
omit.add(c)
break
for clause in clauses:
if clause is not None:
visitors.traverse(clause, {}, {"binary": visit_binary})
return ColumnSet(columns.difference(omit))
def criterion_as_pairs(
expression,
consider_as_foreign_keys=None,
consider_as_referenced_keys=None,
any_operator=False,
):
"""traverse an expression and locate binary criterion pairs."""
if consider_as_foreign_keys and consider_as_referenced_keys:
raise exc.ArgumentError(
"Can only specify one of "
"'consider_as_foreign_keys' or "
"'consider_as_referenced_keys'"
)
def col_is(a, b):
# return a is b
return a.compare(b)
def visit_binary(binary):
if not any_operator and binary.operator is not operators.eq:
return
if not isinstance(binary.left, ColumnElement) or not isinstance(
binary.right, ColumnElement
):
return
if consider_as_foreign_keys:
if binary.left in consider_as_foreign_keys and (
col_is(binary.right, binary.left)
or binary.right not in consider_as_foreign_keys
):
pairs.append((binary.right, binary.left))
elif binary.right in consider_as_foreign_keys and (
col_is(binary.left, binary.right)
or binary.left not in consider_as_foreign_keys
):
pairs.append((binary.left, binary.right))
elif consider_as_referenced_keys:
if binary.left in consider_as_referenced_keys and (
col_is(binary.right, binary.left)
or binary.right not in consider_as_referenced_keys
):
pairs.append((binary.left, binary.right))
elif binary.right in consider_as_referenced_keys and (
col_is(binary.left, binary.right)
or binary.left not in consider_as_referenced_keys
):
pairs.append((binary.right, binary.left))
else:
if isinstance(binary.left, Column) and isinstance(
binary.right, Column
):
if binary.left.references(binary.right):
pairs.append((binary.right, binary.left))
elif binary.right.references(binary.left):
pairs.append((binary.left, binary.right))
pairs = []
visitors.traverse(expression, {}, {"binary": visit_binary})
return pairs
class ClauseAdapter(visitors.ReplacingCloningVisitor):
"""Clones and modifies clauses based on column correspondence.
E.g.::
table1 = Table('sometable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
table2 = Table('someothertable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
condition = table1.c.col1 == table2.c.col1
make an alias of table1::
s = table1.alias('foo')
calling ``ClauseAdapter(s).traverse(condition)`` converts
condition to read::
s.c.col1 == table2.c.col1
"""
def __init__(
self,
selectable,
equivalents=None,
include_fn=None,
exclude_fn=None,
adapt_on_names=False,
anonymize_labels=False,
):
self.__traverse_options__ = {
"stop_on": [selectable],
"anonymize_labels": anonymize_labels,
}
self.selectable = selectable
self.include_fn = include_fn
self.exclude_fn = exclude_fn
self.equivalents = util.column_dict(equivalents or {})
self.adapt_on_names = adapt_on_names
def _corresponding_column(
self, col, require_embedded, _seen=util.EMPTY_SET
):
newcol = self.selectable.corresponding_column(
col, require_embedded=require_embedded
)
if newcol is None and col in self.equivalents and col not in _seen:
for equiv in self.equivalents[col]:
newcol = self._corresponding_column(
equiv,
require_embedded=require_embedded,
_seen=_seen.union([col]),
)
if newcol is not None:
return newcol
if self.adapt_on_names and newcol is None:
newcol = self.selectable.c.get(col.name)
return newcol
def replace(self, col):
if isinstance(col, FromClause) and self.selectable.is_derived_from(
col
):
return self.selectable
elif not isinstance(col, ColumnElement):
return None
elif self.include_fn and not self.include_fn(col):
return None
elif self.exclude_fn and self.exclude_fn(col):
return None
else:
return self._corresponding_column(col, True)
class ColumnAdapter(ClauseAdapter):
"""Extends ClauseAdapter with extra utility functions.
Key aspects of ColumnAdapter include:
* Expressions that are adapted are stored in a persistent
.columns collection; so that an expression E adapted into
an expression E1, will return the same object E1 when adapted
a second time. This is important in particular for things like
Label objects that are anonymized, so that the ColumnAdapter can
be used to present a consistent "adapted" view of things.
* Exclusion of items from the persistent collection based on
include/exclude rules, but also independent of hash identity.
This because "annotated" items all have the same hash identity as their
parent.
* "wrapping" capability is added, so that the replacement of an expression
E can proceed through a series of adapters. This differs from the
visitor's "chaining" feature in that the resulting object is passed
through all replacing functions unconditionally, rather than stopping
at the first one that returns non-None.
* An adapt_required option, used by eager loading to indicate that
We don't trust a result row column that is not translated.
This is to prevent a column from being interpreted as that
of the child row in a self-referential scenario, see
inheritance/test_basic.py->EagerTargetingTest.test_adapt_stringency
"""
def __init__(
self,
selectable,
equivalents=None,
adapt_required=False,
include_fn=None,
exclude_fn=None,
adapt_on_names=False,
allow_label_resolve=True,
anonymize_labels=False,
):
ClauseAdapter.__init__(
self,
selectable,
equivalents,
include_fn=include_fn,
exclude_fn=exclude_fn,
adapt_on_names=adapt_on_names,
anonymize_labels=anonymize_labels,
)
self.columns = util.WeakPopulateDict(self._locate_col)
if self.include_fn or self.exclude_fn:
self.columns = self._IncludeExcludeMapping(self, self.columns)
self.adapt_required = adapt_required
self.allow_label_resolve = allow_label_resolve
self._wrap = None
class _IncludeExcludeMapping(object):
def __init__(self, parent, columns):
self.parent = parent
self.columns = columns
def __getitem__(self, key):
if (
self.parent.include_fn and not self.parent.include_fn(key)
) or (self.parent.exclude_fn and self.parent.exclude_fn(key)):
if self.parent._wrap:
return self.parent._wrap.columns[key]
else:
return key
return self.columns[key]
def wrap(self, adapter):
ac = self.__class__.__new__(self.__class__)
ac.__dict__.update(self.__dict__)
ac._wrap = adapter
ac.columns = util.WeakPopulateDict(ac._locate_col)
if ac.include_fn or ac.exclude_fn:
ac.columns = self._IncludeExcludeMapping(ac, ac.columns)
return ac
def traverse(self, obj):
return self.columns[obj]
adapt_clause = traverse
adapt_list = ClauseAdapter.copy_and_process
def _locate_col(self, col):
c = ClauseAdapter.traverse(self, col)
if self._wrap:
c2 = self._wrap._locate_col(c)
if c2 is not None:
c = c2
if self.adapt_required and c is col:
return None
c._allow_label_resolve = self.allow_label_resolve
return c
def __getstate__(self):
d = self.__dict__.copy()
del d["columns"]
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.columns = util.WeakPopulateDict(self._locate_col)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/visitors.py
|
# sql/visitors.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Visitor/traversal interface and library functions.
SQLAlchemy schema and expression constructs rely on a Python-centric
version of the classic "visitor" pattern as the primary way in which
they apply functionality. The most common use of this pattern
is statement compilation, where individual expression classes match
up to rendering methods that produce a string result. Beyond this,
the visitor system is also used to inspect expressions for various
information and patterns, as well as for the purposes of applying
transformations to expressions.
Examples of how the visit system is used can be seen in the source code
of for example the ``sqlalchemy.sql.util`` and the ``sqlalchemy.sql.compiler``
modules. Some background on clause adaption is also at
http://techspot.zzzeek.org/2008/01/23/expression-transformations/ .
"""
from collections import deque
import operator
from .. import exc
from .. import util
__all__ = [
"VisitableType",
"Visitable",
"ClauseVisitor",
"CloningVisitor",
"ReplacingCloningVisitor",
"iterate",
"iterate_depthfirst",
"traverse_using",
"traverse",
"traverse_depthfirst",
"cloned_traverse",
"replacement_traverse",
]
class VisitableType(type):
"""Metaclass which assigns a ``_compiler_dispatch`` method to classes
having a ``__visit_name__`` attribute.
The ``_compiler_dispatch`` attribute becomes an instance method which
looks approximately like the following::
def _compiler_dispatch (self, visitor, **kw):
'''Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.'''
visit_attr = 'visit_%s' % self.__visit_name__
return getattr(visitor, visit_attr)(self, **kw)
Classes having no ``__visit_name__`` attribute will remain unaffected.
"""
def __init__(cls, clsname, bases, clsdict):
if clsname != "Visitable" and hasattr(cls, "__visit_name__"):
_generate_dispatch(cls)
super(VisitableType, cls).__init__(clsname, bases, clsdict)
def _generate_dispatch(cls):
"""Return an optimized visit dispatch function for the cls
for use by the compiler.
"""
if "__visit_name__" in cls.__dict__:
visit_name = cls.__visit_name__
if isinstance(visit_name, util.compat.string_types):
# There is an optimization opportunity here because the
# the string name of the class's __visit_name__ is known at
# this early stage (import time) so it can be pre-constructed.
getter = operator.attrgetter("visit_%s" % visit_name)
def _compiler_dispatch(self, visitor, **kw):
try:
meth = getter(visitor)
except AttributeError as err:
util.raise_(
exc.UnsupportedCompilationError(visitor, cls),
replace_context=err,
)
else:
return meth(self, **kw)
else:
# The optimization opportunity is lost for this case because the
# __visit_name__ is not yet a string. As a result, the visit
# string has to be recalculated with each compilation.
def _compiler_dispatch(self, visitor, **kw):
visit_attr = "visit_%s" % self.__visit_name__
try:
meth = getattr(visitor, visit_attr)
except AttributeError as err:
util.raise_(
exc.UnsupportedCompilationError(visitor, cls),
replace_context=err,
)
else:
return meth(self, **kw)
_compiler_dispatch.__doc__ = """Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.
"""
cls._compiler_dispatch = _compiler_dispatch
class Visitable(util.with_metaclass(VisitableType, object)):
"""Base class for visitable objects, applies the
:class:`.visitors.VisitableType` metaclass.
The :class:`.Visitable` class is essentially at the base of the
:class:`_expression.ClauseElement` hierarchy.
"""
class ClauseVisitor(object):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.traverse` function.
Direct usage of the :func:`.visitors.traverse` function is usually
preferred.
"""
__traverse_options__ = {}
def traverse_single(self, obj, **kw):
for v in self.visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(self, obj):
"""traverse the given expression structure, returning an iterator
of all elements.
"""
return iterate(obj, self.__traverse_options__)
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
@util.memoized_property
def _visitor_dict(self):
visitors = {}
for name in dir(self):
if name.startswith("visit_"):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def visitor_iterator(self):
"""iterate through this visitor and each 'chained' visitor."""
v = self
while v:
yield v
v = getattr(v, "_next", None)
def chain(self, visitor):
"""'chain' an additional ClauseVisitor onto this ClauseVisitor.
the chained visitor will receive all visit events after this one.
"""
tail = list(self.visitor_iterator)[-1]
tail._next = visitor
return self
class CloningVisitor(ClauseVisitor):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.cloned_traverse` function.
Direct usage of the :func:`.visitors.cloned_traverse` function is usually
preferred.
"""
def copy_and_process(self, list_):
"""Apply cloned traversal to the given list of elements, and return
the new list.
"""
return [self.traverse(x) for x in list_]
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict
)
class ReplacingCloningVisitor(CloningVisitor):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.replacement_traverse` function.
Direct usage of the :func:`.visitors.replacement_traverse` function is
usually preferred.
"""
def replace(self, elem):
"""receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
def traverse(self, obj):
"""traverse and visit the given expression structure."""
def replace(elem):
for v in self.visitor_iterator:
e = v.replace(elem)
if e is not None:
return e
return replacement_traverse(obj, self.__traverse_options__, replace)
def iterate(obj, opts):
r"""traverse the given expression structure, returning an iterator.
traversal is configured to be breadth-first.
The central API feature used by the :func:`.visitors.iterate` and
:func:`.visitors.iterate_depthfirst` functions is the
:meth:`_expression.ClauseElement.get_children` method of
:class:`_expression.ClauseElement`
objects. This method should return all the
:class:`_expression.ClauseElement` objects
which are associated with a particular :class:`_expression.ClauseElement`
object.
For example, a :class:`.Case` structure will refer to a series of
:class:`_expression.ColumnElement`
objects within its "whens" and "else\_" member
variables.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
"""
# fasttrack for atomic elements like columns
children = obj.get_children(**opts)
if not children:
return [obj]
traversal = deque()
stack = deque([obj])
while stack:
t = stack.popleft()
traversal.append(t)
for c in t.get_children(**opts):
stack.append(c)
return iter(traversal)
def iterate_depthfirst(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be depth-first.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
.. seealso::
:func:`.visitors.iterate` - includes a general overview of iteration.
"""
# fasttrack for atomic elements like columns
children = obj.get_children(**opts)
if not children:
return [obj]
stack = deque([obj])
traversal = deque()
while stack:
t = stack.pop()
traversal.appendleft(t)
for c in t.get_children(**opts):
stack.append(c)
return iter(traversal)
def traverse_using(iterator, obj, visitors):
"""visit the given expression structure using the given iterator of
objects.
:func:`.visitors.traverse_using` is usually called internally as the result
of the :func:`.visitors.traverse` or :func:`.visitors.traverse_depthfirst`
functions.
:param iterator: an iterable or sequence which will yield
:class:`_expression.ClauseElement`
structures; the iterator is assumed to be the
product of the :func:`.visitors.iterate` or
:func:`.visitors.iterate_depthfirst` functions.
:param obj: the :class:`_expression.ClauseElement`
that was used as the target of the
:func:`.iterate` or :func:`.iterate_depthfirst` function.
:param visitors: dictionary of visit functions. See :func:`.traverse`
for details on this dictionary.
.. seealso::
:func:`.traverse`
:func:`.traverse_depthfirst`
"""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
def traverse(obj, opts, visitors):
"""traverse and visit the given expression structure using the default
iterator.
e.g.::
from sqlalchemy.sql import visitors
stmt = select([some_table]).where(some_table.c.foo == 'bar')
def visit_bindparam(bind_param):
print("found bound value: %s" % bind_param.value)
visitors.traverse(stmt, {}, {"bindparam": visit_bindparam})
The iteration of objects uses the :func:`.visitors.iterate` function,
which does a breadth-first traversal using a stack.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
:param visitors: dictionary of visit functions. The dictionary should
have strings as keys, each of which would correspond to the
``__visit_name__`` of a particular kind of SQL expression object, and
callable functions as values, each of which represents a visitor function
for that kind of object.
"""
return traverse_using(iterate(obj, opts), obj, visitors)
def traverse_depthfirst(obj, opts, visitors):
"""traverse and visit the given expression structure using the
depth-first iterator.
The iteration of objects uses the :func:`.visitors.iterate_depthfirst`
function, which does a depth-first traversal using a stack.
Usage is the same as that of :func:`.visitors.traverse` function.
"""
return traverse_using(iterate_depthfirst(obj, opts), obj, visitors)
def cloned_traverse(obj, opts, visitors):
"""clone the given expression structure, allowing modifications by
visitors.
Traversal usage is the same as that of :func:`.visitors.traverse`.
The visitor functions present in the ``visitors`` dictionary may also
modify the internals of the given structure as the traversal proceeds.
The central API feature used by the :func:`.visitors.cloned_traverse`
and :func:`.visitors.replacement_traverse` functions, in addition to the
:meth:`_expression.ClauseElement.get_children`
function that is used to achieve
the iteration, is the :meth:`_expression.ClauseElement._copy_internals`
method.
For a :class:`_expression.ClauseElement`
structure to support cloning and replacement
traversals correctly, it needs to be able to pass a cloning function into
its internal members in order to make copies of them.
.. seealso::
:func:`.visitors.traverse`
:func:`.visitors.replacement_traverse`
"""
cloned = {}
stop_on = set(opts.get("stop_on", []))
def clone(elem, **kw):
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
cloned[id(elem)] = newelem = elem._clone()
newelem._copy_internals(clone=clone, **kw)
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(obj)
clone = None # remove gc cycles
return obj
def replacement_traverse(obj, opts, replace):
"""clone the given expression structure, allowing element
replacement by a given replacement function.
This function is very similar to the :func:`.visitors.cloned_traverse`
function, except instead of being passed a dictionary of visitors, all
elements are unconditionally passed into the given replace function.
The replace function then has the option to return an entirely new object
which will replace the one given. if it returns ``None``, then the object
is kept in place.
The difference in usage between :func:`.visitors.cloned_traverse` and
:func:`.visitors.replacement_traverse` is that in the former case, an
already-cloned object is passed to the visitor function, and the visitor
function can then manipulate the internal state of the object.
In the case of the latter, the visitor function should only return an
entirely different object, or do nothing.
The use case for :func:`.visitors.replacement_traverse` is that of
replacing a FROM clause inside of a SQL structure with a different one,
as is a common use case within the ORM.
"""
cloned = {}
stop_on = {id(x) for x in opts.get("stop_on", [])}
def clone(elem, **kw):
if (
id(elem) in stop_on
or "no_replacement_traverse" in elem._annotations
):
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem
else:
if elem not in cloned:
cloned[elem] = newelem = elem._clone()
newelem._copy_internals(clone=clone, **kw)
return cloned[elem]
if obj is not None:
obj = clone(obj, **opts)
clone = None # remove gc cycles
return obj
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/default_comparator.py
|
# sql/default_comparator.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Default implementation of SQL comparison operations.
"""
from . import operators
from . import type_api
from .elements import _clause_element_as_expr
from .elements import _const_expr
from .elements import _is_literal
from .elements import _literal_as_text
from .elements import and_
from .elements import BinaryExpression
from .elements import BindParameter
from .elements import ClauseElement
from .elements import ClauseList
from .elements import collate
from .elements import CollectionAggregate
from .elements import ColumnElement
from .elements import False_
from .elements import Null
from .elements import or_
from .elements import TextClause
from .elements import True_
from .elements import Tuple
from .elements import UnaryExpression
from .elements import Visitable
from .selectable import Alias
from .selectable import ScalarSelect
from .selectable import Selectable
from .selectable import SelectBase
from .. import exc
from .. import util
def _boolean_compare(
expr,
op,
obj,
negate=None,
reverse=False,
_python_is_types=(util.NoneType, bool),
result_type=None,
**kwargs
):
if result_type is None:
result_type = type_api.BOOLEANTYPE
if isinstance(obj, _python_is_types + (Null, True_, False_)):
# allow x ==/!= True/False to be treated as a literal.
# this comes out to "== / != true/false" or "1/0" if those
# constants aren't supported and works on all platforms
if op in (operators.eq, operators.ne) and isinstance(
obj, (bool, True_, False_)
):
return BinaryExpression(
expr,
_literal_as_text(obj),
op,
type_=result_type,
negate=negate,
modifiers=kwargs,
)
elif op in (operators.is_distinct_from, operators.isnot_distinct_from):
return BinaryExpression(
expr,
_literal_as_text(obj),
op,
type_=result_type,
negate=negate,
modifiers=kwargs,
)
else:
# all other None/True/False uses IS, IS NOT
if op in (operators.eq, operators.is_):
return BinaryExpression(
expr,
_const_expr(obj),
operators.is_,
negate=operators.isnot,
type_=result_type,
)
elif op in (operators.ne, operators.isnot):
return BinaryExpression(
expr,
_const_expr(obj),
operators.isnot,
negate=operators.is_,
type_=result_type,
)
else:
raise exc.ArgumentError(
"Only '=', '!=', 'is_()', 'isnot()', "
"'is_distinct_from()', 'isnot_distinct_from()' "
"operators can be used with None/True/False"
)
else:
obj = _check_literal(expr, op, obj)
if reverse:
return BinaryExpression(
obj, expr, op, type_=result_type, negate=negate, modifiers=kwargs
)
else:
return BinaryExpression(
expr, obj, op, type_=result_type, negate=negate, modifiers=kwargs
)
def _custom_op_operate(expr, op, obj, reverse=False, result_type=None, **kw):
if result_type is None:
if op.return_type:
result_type = op.return_type
elif op.is_comparison:
result_type = type_api.BOOLEANTYPE
return _binary_operate(
expr, op, obj, reverse=reverse, result_type=result_type, **kw
)
def _binary_operate(expr, op, obj, reverse=False, result_type=None, **kw):
obj = _check_literal(expr, op, obj)
if reverse:
left, right = obj, expr
else:
left, right = expr, obj
if result_type is None:
op, result_type = left.comparator._adapt_expression(
op, right.comparator
)
return BinaryExpression(left, right, op, type_=result_type, modifiers=kw)
def _conjunction_operate(expr, op, other, **kw):
if op is operators.and_:
return and_(expr, other)
elif op is operators.or_:
return or_(expr, other)
else:
raise NotImplementedError()
def _scalar(expr, op, fn, **kw):
return fn(expr)
def _in_impl(expr, op, seq_or_selectable, negate_op, **kw):
seq_or_selectable = _clause_element_as_expr(seq_or_selectable)
if isinstance(seq_or_selectable, ScalarSelect):
return _boolean_compare(expr, op, seq_or_selectable, negate=negate_op)
elif isinstance(seq_or_selectable, SelectBase):
# TODO: if we ever want to support (x, y, z) IN (select x,
# y, z from table), we would need a multi-column version of
# as_scalar() to produce a multi- column selectable that
# does not export itself as a FROM clause
return _boolean_compare(
expr, op, seq_or_selectable.as_scalar(), negate=negate_op, **kw
)
elif isinstance(seq_or_selectable, (Selectable, TextClause)):
return _boolean_compare(
expr, op, seq_or_selectable, negate=negate_op, **kw
)
elif isinstance(seq_or_selectable, ClauseElement):
if (
isinstance(seq_or_selectable, BindParameter)
and seq_or_selectable.expanding
):
if isinstance(expr, Tuple):
seq_or_selectable = seq_or_selectable._with_expanding_in_types(
[elem.type for elem in expr]
)
return _boolean_compare(
expr, op, seq_or_selectable, negate=negate_op
)
else:
raise exc.InvalidRequestError(
"in_() accepts"
" either a list of expressions, "
'a selectable, or an "expanding" bound parameter: %r'
% seq_or_selectable
)
# Handle non selectable arguments as sequences
args = []
for o in seq_or_selectable:
if not _is_literal(o):
if not isinstance(o, operators.ColumnOperators):
raise exc.InvalidRequestError(
"in_() accepts"
" either a list of expressions, "
'a selectable, or an "expanding" bound parameter: %r' % o
)
elif o is None:
o = Null()
else:
o = expr._bind_param(op, o)
args.append(o)
if len(args) == 0:
op, negate_op = (
(operators.empty_in_op, operators.empty_notin_op)
if op is operators.in_op
else (operators.empty_notin_op, operators.empty_in_op)
)
return _boolean_compare(
expr,
op,
ClauseList(_tuple_values=isinstance(expr, Tuple), *args).self_group(
against=op
),
negate=negate_op,
)
def _getitem_impl(expr, op, other, **kw):
if isinstance(expr.type, type_api.INDEXABLE):
other = _check_literal(expr, op, other)
return _binary_operate(expr, op, other, **kw)
else:
_unsupported_impl(expr, op, other, **kw)
def _unsupported_impl(expr, op, *arg, **kw):
raise NotImplementedError(
"Operator '%s' is not supported on " "this expression" % op.__name__
)
def _inv_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__inv__`."""
if hasattr(expr, "negation_clause"):
return expr.negation_clause
else:
return expr._negate()
def _neg_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__neg__`."""
return UnaryExpression(expr, operator=operators.neg, type_=expr.type)
def _match_impl(expr, op, other, **kw):
"""See :meth:`.ColumnOperators.match`."""
return _boolean_compare(
expr,
operators.match_op,
_check_literal(expr, operators.match_op, other),
result_type=type_api.MATCHTYPE,
negate=operators.notmatch_op
if op is operators.match_op
else operators.match_op,
**kw
)
def _distinct_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.distinct`."""
return UnaryExpression(
expr, operator=operators.distinct_op, type_=expr.type
)
def _between_impl(expr, op, cleft, cright, **kw):
"""See :meth:`.ColumnOperators.between`."""
return BinaryExpression(
expr,
ClauseList(
_check_literal(expr, operators.and_, cleft),
_check_literal(expr, operators.and_, cright),
operator=operators.and_,
group=False,
group_contents=False,
),
op,
negate=operators.notbetween_op
if op is operators.between_op
else operators.between_op,
modifiers=kw,
)
def _collate_impl(expr, op, other, **kw):
return collate(expr, other)
# a mapping of operators with the method they use, along with
# their negated operator for comparison operators
operator_lookup = {
"and_": (_conjunction_operate,),
"or_": (_conjunction_operate,),
"inv": (_inv_impl,),
"add": (_binary_operate,),
"mul": (_binary_operate,),
"sub": (_binary_operate,),
"div": (_binary_operate,),
"mod": (_binary_operate,),
"truediv": (_binary_operate,),
"custom_op": (_custom_op_operate,),
"json_path_getitem_op": (_binary_operate,),
"json_getitem_op": (_binary_operate,),
"concat_op": (_binary_operate,),
"any_op": (_scalar, CollectionAggregate._create_any),
"all_op": (_scalar, CollectionAggregate._create_all),
"lt": (_boolean_compare, operators.ge),
"le": (_boolean_compare, operators.gt),
"ne": (_boolean_compare, operators.eq),
"gt": (_boolean_compare, operators.le),
"ge": (_boolean_compare, operators.lt),
"eq": (_boolean_compare, operators.ne),
"is_distinct_from": (_boolean_compare, operators.isnot_distinct_from),
"isnot_distinct_from": (_boolean_compare, operators.is_distinct_from),
"like_op": (_boolean_compare, operators.notlike_op),
"ilike_op": (_boolean_compare, operators.notilike_op),
"notlike_op": (_boolean_compare, operators.like_op),
"notilike_op": (_boolean_compare, operators.ilike_op),
"contains_op": (_boolean_compare, operators.notcontains_op),
"startswith_op": (_boolean_compare, operators.notstartswith_op),
"endswith_op": (_boolean_compare, operators.notendswith_op),
"desc_op": (_scalar, UnaryExpression._create_desc),
"asc_op": (_scalar, UnaryExpression._create_asc),
"nullsfirst_op": (_scalar, UnaryExpression._create_nullsfirst),
"nullslast_op": (_scalar, UnaryExpression._create_nullslast),
"in_op": (_in_impl, operators.notin_op),
"notin_op": (_in_impl, operators.in_op),
"is_": (_boolean_compare, operators.is_),
"isnot": (_boolean_compare, operators.isnot),
"collate": (_collate_impl,),
"match_op": (_match_impl,),
"notmatch_op": (_match_impl,),
"distinct_op": (_distinct_impl,),
"between_op": (_between_impl,),
"notbetween_op": (_between_impl,),
"neg": (_neg_impl,),
"getitem": (_getitem_impl,),
"lshift": (_unsupported_impl,),
"rshift": (_unsupported_impl,),
"contains": (_unsupported_impl,),
}
def _check_literal(expr, operator, other, bindparam_type=None):
if isinstance(other, (ColumnElement, TextClause)):
if isinstance(other, BindParameter) and other.type._isnull:
other = other._clone()
other.type = expr.type
return other
elif hasattr(other, "__clause_element__"):
other = other.__clause_element__()
elif isinstance(other, type_api.TypeEngine.Comparator):
other = other.expr
if isinstance(other, (SelectBase, Alias)):
return other.as_scalar()
elif not isinstance(other, Visitable):
return expr._bind_param(operator, other, type_=bindparam_type)
else:
return other
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/elements.py
|
# sql/elements.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core SQL expression elements, including :class:`_expression.ClauseElement`,
:class:`_expression.ColumnElement`, and derived classes.
"""
from __future__ import unicode_literals
import itertools
import numbers
import operator
import re
from . import operators
from . import type_api
from .annotation import Annotated
from .base import _generative
from .base import Executable
from .base import Immutable
from .base import NO_ARG
from .base import PARSE_AUTOCOMMIT
from .visitors import cloned_traverse
from .visitors import traverse
from .visitors import Visitable
from .. import exc
from .. import inspection
from .. import util
def _clone(element, **kw):
return element._clone()
def _document_text_coercion(paramname, meth_rst, param_rst):
return util.add_parameter_text(
paramname,
(
".. warning:: "
"The %s argument to %s can be passed as a Python string argument, "
"which will be treated "
"as **trusted SQL text** and rendered as given. **DO NOT PASS "
"UNTRUSTED INPUT TO THIS PARAMETER**."
)
% (param_rst, meth_rst),
)
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``.
e.g.::
collate(mycolumn, 'utf8_bin')
produces::
mycolumn COLLATE utf8_bin
The collation expression is also quoted if it is a case sensitive
identifier, e.g. contains uppercase characters.
.. versionchanged:: 1.2 quoting is automatically applied to COLLATE
expressions if they are case sensitive.
"""
expr = _literal_as_binds(expression)
return BinaryExpression(
expr, CollationClause(collation), operators.collate, type_=expr.type
)
def between(expr, lower_bound, upper_bound, symmetric=False):
"""Produce a ``BETWEEN`` predicate clause.
E.g.::
from sqlalchemy import between
stmt = select([users_table]).where(between(users_table.c.id, 5, 7))
Would produce SQL resembling::
SELECT id, name FROM user WHERE id BETWEEN :id_1 AND :id_2
The :func:`.between` function is a standalone version of the
:meth:`_expression.ColumnElement.between` method available on all
SQL expressions, as in::
stmt = select([users_table]).where(users_table.c.id.between(5, 7))
All arguments passed to :func:`.between`, including the left side
column expression, are coerced from Python scalar values if a
the value is not a :class:`_expression.ColumnElement` subclass.
For example,
three fixed values can be compared as in::
print(between(5, 3, 7))
Which would produce::
:param_1 BETWEEN :param_2 AND :param_3
:param expr: a column expression, typically a
:class:`_expression.ColumnElement`
instance or alternatively a Python scalar expression to be coerced
into a column expression, serving as the left side of the ``BETWEEN``
expression.
:param lower_bound: a column or Python scalar expression serving as the
lower bound of the right side of the ``BETWEEN`` expression.
:param upper_bound: a column or Python scalar expression serving as the
upper bound of the right side of the ``BETWEEN`` expression.
:param symmetric: if True, will render " BETWEEN SYMMETRIC ". Note
that not all databases support this syntax.
.. versionadded:: 0.9.5
.. seealso::
:meth:`_expression.ColumnElement.between`
"""
expr = _literal_as_binds(expr)
return expr.between(lower_bound, upper_bound, symmetric=symmetric)
def literal(value, type_=None):
r"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non-
:class:`_expression.ClauseElement` objects (such as strings, ints, dates,
etc.) are
used in a comparison operation with a :class:`_expression.ColumnElement`
subclass,
such as a :class:`~sqlalchemy.schema.Column` object. Use this function
to force the generation of a literal clause, which will be created as a
:class:`BindParameter` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return BindParameter(None, value, type_=type_, unique=True)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return BindParameter(key, None, type_=type_, unique=False, isoutparam=True)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`_expression.ColumnElement` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
@inspection._self_inspects
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = "clause"
_annotations = {}
supports_execution = False
_from_objects = []
bind = None
_is_clone_of = None
is_selectable = False
is_clause_element = True
description = None
_order_by_label_element = None
_is_from_container = False
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
ClauseElement._cloned_set._reset(c)
ColumnElement.comparator._reset(c)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned ancestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
# note this creates a cycle, asserted in test_memusage. however,
# turning this into a plain @property adds tends of thousands of method
# calls to Core / ORM performance tests, so the small overhead
# introduced by the relatively small amount of short term cycles
# produced here is preferable
while f is not None:
s.add(f)
f = f._is_clone_of
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop("_is_clone_of", None)
return d
def _annotate(self, values):
"""return a copy of this ClauseElement with annotations
updated by the given dictionary.
"""
return Annotated(self, values)
def _with_annotations(self, values):
"""return a copy of this ClauseElement with annotations
replaced by the given dictionary.
"""
return Annotated(self, values)
def _deannotate(self, values=None, clone=False):
"""return a copy of this :class:`_expression.ClauseElement`
with annotations
removed.
:param values: optional tuple of individual values
to remove.
"""
if clone:
# clone is used when we are also copying
# the expression for a deep deannotation
return self._clone()
else:
# if no clone, since we have no annotations we return
# self
return self
def _execute_on_connection(self, connection, multiparams, params):
if self.supports_execution:
return connection._execute_clauseelement(self, multiparams, params)
else:
raise exc.ObjectNotExecutableError(self)
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print(clause.compile().params)
{'foo':None}
>>> print(clause.params({'foo':7}).compile().params)
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument"
)
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
bind.required = False
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {"bindparam": visit_bindparam})
def compare(self, other, **kw):
r"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`_expression.ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone, **kw):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
pass
def get_children(self, **kwargs):
r"""Return immediate child elements of this
:class:`_expression.ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`_expression.ClauseElement`.
This method is overridden by subclasses to return a
"grouping" construct, i.e. parenthesis. In particular
it's used by "binary" expressions to provide a grouping
around themselves when placed into a larger expression,
as well as by :func:`_expression.select` constructs when placed into
the FROM clause of another :func:`_expression.select`. (Note that
subqueries should be normally created using the
:meth:`_expression.Select.alias` method, as many platforms require
nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of
:class:`_expression.ClauseElement`
just returns self.
"""
return self
@util.dependencies("sqlalchemy.engine.default")
def compile(self, default, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`_expression.ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance from which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`_expression.ClauseElement`
's bound engine,
if any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
:param compile_kwargs: optional dictionary of additional parameters
that will be passed through to the compiler within all "visit"
methods. This allows any custom flag to be passed through to
a custom compilation construct, for example. It is also used
for the case of passing the ``literal_binds`` flag through::
from sqlalchemy.sql import table, column, select
t = table('t', column('x'))
s = select([t]).where(t.c.x == 5)
print(s.compile(compile_kwargs={"literal_binds": True}))
.. versionadded:: 0.9.0
.. seealso::
:ref:`faq_sql_expression_string`
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
dialect = default.StrCompileDialect()
return self._compiler(dialect, bind=bind, **kw)
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
if util.py3k:
return str(self.compile())
else:
return unicode(self.compile()).encode( # noqa
"ascii", "backslashreplace"
) # noqa
@util.deprecated(
"0.9",
"The :meth:`_expression.ClauseElement.__and__` "
"method is deprecated and will "
"be removed in a future release. Conjunctions should only be "
"used from a :class:`_expression.ColumnElement` subclass, e.g. "
":meth:`_expression.ColumnElement.__and__`.",
)
def __and__(self, other):
"""'and' at the ClauseElement level.
"""
return and_(self, other)
@util.deprecated(
"0.9",
"The :meth:`_expression.ClauseElement.__or__` "
"method is deprecated and will "
"be removed in a future release. Conjunctions should only be "
"used from a :class:`_expression.ColumnElement` subclass, e.g. "
":meth:`_expression.ColumnElement.__or__`.",
)
def __or__(self, other):
"""'or' at the ClauseElement level.
"""
return or_(self, other)
def __invert__(self):
if hasattr(self, "negation_clause"):
return self.negation_clause
else:
return self._negate()
def _negate(self):
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None,
)
def __bool__(self):
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
def __repr__(self):
friendly = self.description
if friendly is None:
return object.__repr__(self)
else:
return "<%s.%s at 0x%x; %s>" % (
self.__module__,
self.__class__.__name__,
id(self),
friendly,
)
class ColumnElement(operators.ColumnOperators, ClauseElement):
"""Represent a column-oriented SQL expression suitable for usage in the
"columns" clause, WHERE clause etc. of a statement.
While the most familiar kind of :class:`_expression.ColumnElement` is the
:class:`_schema.Column` object, :class:`_expression.ColumnElement`
serves as the basis
for any unit that may be present in a SQL expression, including
the expressions themselves, SQL functions, bound parameters,
literal expressions, keywords such as ``NULL``, etc.
:class:`_expression.ColumnElement`
is the ultimate base class for all such elements.
A wide variety of SQLAlchemy Core functions work at the SQL expression
level, and are intended to accept instances of
:class:`_expression.ColumnElement` as
arguments. These functions will typically document that they accept a
"SQL expression" as an argument. What this means in terms of SQLAlchemy
usually refers to an input which is either already in the form of a
:class:`_expression.ColumnElement` object,
or a value which can be **coerced** into
one. The coercion rules followed by most, but not all, SQLAlchemy Core
functions with regards to SQL expressions are as follows:
* a literal Python value, such as a string, integer or floating
point value, boolean, datetime, ``Decimal`` object, or virtually
any other Python object, will be coerced into a "literal bound
value". This generally means that a :func:`.bindparam` will be
produced featuring the given value embedded into the construct; the
resulting :class:`.BindParameter` object is an instance of
:class:`_expression.ColumnElement`.
The Python value will ultimately be sent
to the DBAPI at execution time as a parameterized argument to the
``execute()`` or ``executemany()`` methods, after SQLAlchemy
type-specific converters (e.g. those provided by any associated
:class:`.TypeEngine` objects) are applied to the value.
* any special object value, typically ORM-level constructs, which
feature a method called ``__clause_element__()``. The Core
expression system looks for this method when an object of otherwise
unknown type is passed to a function that is looking to coerce the
argument into a :class:`_expression.ColumnElement` expression. The
``__clause_element__()`` method, if present, should return a
:class:`_expression.ColumnElement` instance. The primary use of
``__clause_element__()`` within SQLAlchemy is that of class-bound
attributes on ORM-mapped classes; a ``User`` class which contains a
mapped attribute named ``.name`` will have a method
``User.name.__clause_element__()`` which when invoked returns the
:class:`_schema.Column`
called ``name`` associated with the mapped table.
* The Python ``None`` value is typically interpreted as ``NULL``,
which in SQLAlchemy Core produces an instance of :func:`.null`.
A :class:`_expression.ColumnElement` provides the ability to generate new
:class:`_expression.ColumnElement`
objects using Python expressions. This means that Python operators
such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations,
and allow the instantiation of further :class:`_expression.ColumnElement`
instances
which are composed from other, more fundamental
:class:`_expression.ColumnElement`
objects. For example, two :class:`.ColumnClause` objects can be added
together with the addition operator ``+`` to produce
a :class:`.BinaryExpression`.
Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
of :class:`_expression.ColumnElement`::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print(column('a') + column('b'))
a + b
.. seealso::
:class:`_schema.Column`
:func:`_expression.column`
"""
__visit_name__ = "column_element"
primary_key = False
foreign_keys = []
_proxies = ()
_label = None
"""The named label that can be used to target
this column in a result set.
This label is almost always the label used when
rendering <expr> AS <label> in a SELECT statement. It also
refers to a name that this column expression can be located from
in a result set.
For a regular Column bound to a Table, this is typically the label
<tablename>_<columnname>. For other constructs, different rules
may apply, such as anonymized labels and others.
"""
key = None
"""the 'key' that in some circumstances refers to this object in a
Python namespace.
This typically refers to the "key" of the column as present in the
``.c`` collection of a selectable, e.g. sometable.c["somekey"] would
return a Column with a .key of "somekey".
"""
_key_label = None
"""A label-based version of 'key' that in some circumstances refers
to this object in a Python namespace.
_key_label comes into play when a select() statement is constructed with
apply_labels(); in this case, all Column objects in the ``.c`` collection
are rendered as <tablename>_<columnname> in SQL; this is essentially the
value of ._label. But to locate those columns in the ``.c`` collection,
the name is along the lines of <tablename>_<key>; that's the typical
value of .key_label.
"""
_render_label_in_columns_clause = True
"""A flag used by select._columns_plus_names that helps to determine
we are actually going to render in terms of "SELECT <col> AS <label>".
This flag can be returned as False for some Column objects that want
to be rendered as simple "SELECT <col>"; typically columns that don't have
any parent table and are named the same as what the label would be
in any case.
"""
_resolve_label = None
"""The name that should be used to identify this ColumnElement in a
select() object when "label resolution" logic is used; this refers
to using a string name in an expression like order_by() or group_by()
that wishes to target a labeled expression in the columns clause.
The name is distinct from that of .name or ._label to account for the case
where anonymizing logic may be used to change the name that's actually
rendered at compile time; this attribute should hold onto the original
name that was user-assigned when producing a .label() construct.
"""
_allow_label_resolve = True
"""A flag that can be flipped to prevent a column from being resolvable
by string label name."""
_is_implicitly_boolean = False
_alt_names = ()
def self_group(self, against=None):
if (
against in (operators.and_, operators.or_, operators._asbool)
and self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity
):
return AsBoolean(self, operators.istrue, operators.isfalse)
elif against in (operators.any_op, operators.all_op):
return Grouping(self)
else:
return self
def _negate(self):
if self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return AsBoolean(self, operators.isfalse, operators.istrue)
else:
return super(ColumnElement, self)._negate()
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@util.memoized_property
def comparator(self):
try:
comparator_factory = self.type.comparator_factory
except AttributeError as err:
util.raise_(
TypeError(
"Object %r associated with '.type' attribute "
"is not a TypeEngine class or object" % self.type
),
replace_context=err,
)
else:
return comparator_factory(self)
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError as err:
util.raise_(
AttributeError(
"Neither %r object nor %r object has an attribute %r"
% (
type(self).__name__,
type(self.comparator).__name__,
key,
)
),
replace_context=err,
)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def _bind_param(self, operator, obj, type_=None):
return BindParameter(
None,
obj,
_compared_to_operator=operator,
type_=type_,
_compared_to_type=self.type,
unique=True,
)
@property
def expression(self):
"""Return a column expression.
Part of the inspection interface; returns self.
"""
return self
@property
def _select_iterable(self):
return (self,)
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set if not c._proxies)
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
for c in self._proxies:
s.update(c.proxy_set)
return s
def _uncached_proxy_set(self):
"""An 'uncached' version of proxy set.
This is so that we can read annotations from the list of columns
without breaking the caching of the above proxy_set.
"""
s = util.column_set([self])
for c in self._proxies:
s.update(c._uncached_proxy_set())
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`_expression.ColumnElement`
has a common ancestor to this :class:`_expression.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other):
"""Return True if the given column element compares to this one
when targeting within a result row."""
return (
hasattr(other, "name")
and hasattr(self, "name")
and other.name == self.name
)
def _make_proxy(
self, selectable, name=None, name_is_truncatable=False, **kw
):
"""Create a new :class:`_expression.ColumnElement` representing this
:class:`_expression.ColumnElement`
as it appears in the select list of a
descending selectable.
"""
if name is None:
name = self.anon_label
if self.key:
key = self.key
else:
try:
key = str(self)
except exc.UnsupportedCompilationError:
key = self.anon_label
else:
key = name
co = ColumnClause(
_as_truncated(name) if name_is_truncatable else name,
type_=getattr(self, "type", None),
_selectable=selectable,
)
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = selectable._is_clone_of.columns.get(key)
selectable._columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this
dictionary, if any of the columns in the corresponding set() pass
the comparison test, the result is True. This is used to expand the
comparison to other columns that may be known to be equivalent to
this one via foreign key or other criterion.
"""
to_compare = (other,)
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif hash(oth) == hash(self):
return True
else:
return False
def cast(self, type_):
"""Produce a type cast, i.e. ``CAST(<expression> AS <type>)``.
This is a shortcut to the :func:`_expression.cast` function.
.. seealso::
:ref:`coretutorial_casts`
:func:`_expression.cast`
:func:`_expression.type_coerce`
.. versionadded:: 1.0.7
"""
return Cast(self, type_)
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`_expression.label` function.
if 'name' is None, an anonymous label name will be generated.
"""
return Label(name, self, self.type)
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
while self._is_clone_of is not None:
self = self._is_clone_of
return _anonymous_label(
"%%(%d %s)s" % (id(self), getattr(self, "name", "anon"))
)
class BindParameter(ColumnElement):
r"""Represent a "bound expression".
:class:`.BindParameter` is invoked explicitly using the
:func:`.bindparam` function, as in::
from sqlalchemy import bindparam
stmt = select([users_table]).\
where(users_table.c.name == bindparam('username'))
Detailed discussion of how :class:`.BindParameter` is used is
at :func:`.bindparam`.
.. seealso::
:func:`.bindparam`
"""
__visit_name__ = "bindparam"
_is_crud = False
_expanding_in_types = ()
def __init__(
self,
key,
value=NO_ARG,
type_=None,
unique=False,
required=NO_ARG,
quote=None,
callable_=None,
expanding=False,
isoutparam=False,
_compared_to_operator=None,
_compared_to_type=None,
):
r"""Produce a "bound expression".
The return value is an instance of :class:`.BindParameter`; this
is a :class:`_expression.ColumnElement`
subclass which represents a so-called
"placeholder" value in a SQL expression, the value of which is
supplied at the point at which the statement in executed against a
database connection.
In SQLAlchemy, the :func:`.bindparam` construct has
the ability to carry along the actual value that will be ultimately
used at expression time. In this way, it serves not just as
a "placeholder" for eventual population, but also as a means of
representing so-called "unsafe" values which should not be rendered
directly in a SQL statement, but rather should be passed along
to the :term:`DBAPI` as values which need to be correctly escaped
and potentially handled for type-safety.
When using :func:`.bindparam` explicitly, the use case is typically
one of traditional deferment of parameters; the :func:`.bindparam`
construct accepts a name which can then be referred to at execution
time::
from sqlalchemy import bindparam
stmt = select([users_table]).\
where(users_table.c.name == bindparam('username'))
The above statement, when rendered, will produce SQL similar to::
SELECT id, name FROM user WHERE name = :username
In order to populate the value of ``:username`` above, the value
would typically be applied at execution time to a method
like :meth:`_engine.Connection.execute`::
result = connection.execute(stmt, username='wendy')
Explicit use of :func:`.bindparam` is also common when producing
UPDATE or DELETE statements that are to be invoked multiple times,
where the WHERE criterion of the statement is to change on each
invocation, such as::
stmt = (users_table.update().
where(user_table.c.name == bindparam('username')).
values(fullname=bindparam('fullname'))
)
connection.execute(
stmt, [{"username": "wendy", "fullname": "Wendy Smith"},
{"username": "jack", "fullname": "Jack Jones"},
]
)
SQLAlchemy's Core expression system makes wide use of
:func:`.bindparam` in an implicit sense. It is typical that Python
literal values passed to virtually all SQL expression functions are
coerced into fixed :func:`.bindparam` constructs. For example, given
a comparison operation such as::
expr = users_table.c.name == 'Wendy'
The above expression will produce a :class:`.BinaryExpression`
construct, where the left side is the :class:`_schema.Column` object
representing the ``name`` column, and the right side is a
:class:`.BindParameter` representing the literal value::
print(repr(expr.right))
BindParameter('%(4327771088 name)s', 'Wendy', type_=String())
The expression above will render SQL such as::
user.name = :name_1
Where the ``:name_1`` parameter name is an anonymous name. The
actual string ``Wendy`` is not in the rendered string, but is carried
along where it is later used within statement execution. If we
invoke a statement like the following::
stmt = select([users_table]).where(users_table.c.name == 'Wendy')
result = connection.execute(stmt)
We would see SQL logging output as::
SELECT "user".id, "user".name
FROM "user"
WHERE "user".name = %(name_1)s
{'name_1': 'Wendy'}
Above, we see that ``Wendy`` is passed as a parameter to the database,
while the placeholder ``:name_1`` is rendered in the appropriate form
for the target database, in this case the PostgreSQL database.
Similarly, :func:`.bindparam` is invoked automatically
when working with :term:`CRUD` statements as far as the "VALUES"
portion is concerned. The :func:`_expression.insert`
construct produces an
``INSERT`` expression which will, at statement execution time,
generate bound placeholders based on the arguments passed, as in::
stmt = users_table.insert()
result = connection.execute(stmt, name='Wendy')
The above will produce SQL output as::
INSERT INTO "user" (name) VALUES (%(name)s)
{'name': 'Wendy'}
The :class:`_expression.Insert` construct,
at compilation/execution time,
rendered a single :func:`.bindparam` mirroring the column
name ``name`` as a result of the single ``name`` parameter
we passed to the :meth:`_engine.Connection.execute` method.
:param key:
the key (e.g. the name) for this bind param.
Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. Will be used at statement
execution time as the value for this parameter passed to the
DBAPI, if no other value is indicated to the statement execution
method for this particular parameter name. Defaults to ``None``.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A :class:`.TypeEngine` class or instance representing an optional
datatype for this :func:`.bindparam`. If not passed, a type
may be determined automatically for the bind, based on the given
value; for example, trivial Python types such as ``str``,
``int``, ``bool``
may result in the :class:`.String`, :class:`.Integer` or
:class:`.Boolean` types being automatically selected.
The type of a :func:`.bindparam` is significant especially in that
the type will apply pre-processing to the value before it is
passed to the database. For example, a :func:`.bindparam` which
refers to a datetime value, and is specified as holding the
:class:`.DateTime` type, may apply conversion needed to the
value (such as stringification on SQLite) before passing the value
to the database.
:param unique:
if True, the key name of this :class:`.BindParameter` will be
modified if another :class:`.BindParameter` of the same name
already has been located within the containing
expression. This flag is used generally by the internals
when producing so-called "anonymous" bound expressions, it
isn't generally applicable to explicitly-named :func:`.bindparam`
constructs.
:param required:
If ``True``, a value is required at execution time. If not passed,
it defaults to ``True`` if neither :paramref:`.bindparam.value`
or :paramref:`.bindparam.callable` were passed. If either of these
parameters are present, then :paramref:`.bindparam.required`
defaults to ``False``.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend, where bound names must
sometimes be quoted.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter. This applies to backends such as Oracle which
support OUT parameters.
:param expanding:
if True, this parameter will be treated as an "expanding" parameter
at execution time; the parameter value is expected to be a sequence,
rather than a scalar value, and the string SQL statement will
be transformed on a per-execution basis to accommodate the sequence
with a variable number of parameter slots passed to the DBAPI.
This is to allow statement caching to be used in conjunction with
an IN clause.
.. seealso::
:meth:`.ColumnOperators.in_`
:ref:`baked_in` - with baked queries
.. note:: The "expanding" feature does not support "executemany"-
style parameter sets.
.. versionadded:: 1.2
.. versionchanged:: 1.3 the "expanding" bound parameter feature now
supports empty lists.
.. seealso::
:ref:`coretutorial_bind_param`
:ref:`coretutorial_insert_expressions`
:func:`.outparam`
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.key
if required is NO_ARG:
required = value is NO_ARG and callable_ is None
if value is NO_ARG:
value = None
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label(
"%%(%d %s)s"
% (
id(self),
re.sub(r"[%\(\) \$]+", "_", key).strip("_")
if key is not None
else "param",
)
)
else:
self.key = key or _anonymous_label("%%(%d param)s" % id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or "param"
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
self.expanding = expanding
if type_ is None:
if _compared_to_type is not None:
self.type = _compared_to_type.coerce_compared_value(
_compared_to_operator, value
)
else:
self.type = type_api._resolve_value_to_type(value)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
def _with_expanding_in_types(self, types):
"""Return a copy of this :class:`.BindParameter` in
the context of an expanding IN against a tuple.
"""
cloned = self._clone()
cloned._expanding_in_types = types
return cloned
def _with_value(self, value):
"""Return a copy of this :class:`.BindParameter` with the given value
set.
"""
cloned = self._clone()
cloned.value = value
cloned.callable = None
cloned.required = False
if cloned.type is type_api.NULLTYPE:
cloned.type = type_api._resolve_value_to_type(value)
return cloned
@property
def effective_value(self):
"""Return the value of this bound parameter,
taking into account if the ``callable`` parameter
was set.
The ``callable`` value will be evaluated
and returned if present, else ``value``.
"""
if self.callable:
return self.callable()
else:
return self.value
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _anonymous_label(
"%%(%d %s)s" % (id(c), c._orig_key or "param")
)
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _anonymous_label(
"%%(%d %s)s" % (id(self), self._orig_key or "param")
)
def compare(self, other, **kw):
"""Compare this :class:`BindParameter` to the given
clause."""
return (
isinstance(other, BindParameter)
and self.type._compare_type_affinity(other.type)
and self.value == other.value
and self.callable == other.callable
)
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if self.callable:
v = self.callable()
d["callable"] = None
d["value"] = v
return d
def __setstate__(self, state):
if state.get("unique", False):
state["key"] = _anonymous_label(
"%%(%d %s)s" % (id(self), state.get("_orig_key", "param"))
)
self.__dict__.update(state)
def __repr__(self):
return "BindParameter(%r, %r, type_=%r)" % (
self.key,
self.value,
self.type,
)
class TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = "typeclause"
def __init__(self, type_):
self.type = type_
class TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
E.g.::
from sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The :class:`_expression.TextClause` construct is produced using the
:func:`_expression.text`
function; see that function for full documentation.
.. seealso::
:func:`_expression.text`
"""
__visit_name__ = "textclause"
_bind_params_regex = re.compile(r"(?<![:\w\x5c]):(\w+)(?!:)", re.UNICODE)
_execution_options = Executable._execution_options.union(
{"autocommit": PARSE_AUTOCOMMIT}
)
_is_implicitly_boolean = False
def __and__(self, other):
# support use in select.where(), query.filter()
return and_(self, other)
@property
def _select_iterable(self):
return (self,)
@property
def selectable(self):
# allows text() to be considered by
# _interpret_as_from
return self
_hide_froms = []
# help in those cases where text() is
# interpreted in a column expression situation
key = _label = _resolve_label = None
_allow_label_resolve = False
def __init__(self, text, bind=None):
self._bind = bind
self._bindparams = {}
def repl(m):
self._bindparams[m.group(1)] = BindParameter(m.group(1))
return ":%s" % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
@classmethod
@util.deprecated_params(
autocommit=(
"0.6",
"The :paramref:`_expression.text.autocommit` "
"parameter is deprecated and "
"will be removed in a future release. Please use the "
":paramref:`.Connection.execution_options.autocommit` parameter "
"in conjunction with the :meth:`.Executable.execution_options` "
"method.",
),
bindparams=(
"0.9",
"The :paramref:`_expression.text.bindparams` parameter "
"is deprecated and will be removed in a future release. Please "
"refer to the :meth:`_expression.TextClause.bindparams` method.",
),
typemap=(
"0.9",
"The :paramref:`_expression.text.typemap` parameter is "
"deprecated and will be removed in a future release. Please "
"refer to the :meth:`_expression.TextClause.columns` method.",
),
)
@_document_text_coercion("text", ":func:`.text`", ":paramref:`.text.text`")
def _create_text(
self, text, bind=None, bindparams=None, typemap=None, autocommit=None
):
r"""Construct a new :class:`_expression.TextClause` clause,
representing
a textual SQL string directly.
E.g.::
from sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The advantages :func:`_expression.text`
provides over a plain string are
backend-neutral support for bind parameters, per-statement
execution options, as well as
bind parameter and result-column typing behavior, allowing
SQLAlchemy type constructs to play a role when executing
a statement that is specified literally. The construct can also
be provided with a ``.c`` collection of column elements, allowing
it to be embedded in other SQL expression constructs as a subquery.
Bind parameters are specified by name, using the format ``:name``.
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
For SQL statements where a colon is required verbatim, as within
an inline string, use a backslash to escape::
t = text("SELECT * FROM users WHERE name='\:username'")
The :class:`_expression.TextClause`
construct includes methods which can
provide information about the bound parameters as well as the column
values which would be returned from the textual statement, assuming
it's an executable SELECT type of statement. The
:meth:`_expression.TextClause.bindparams`
method is used to provide bound
parameter detail, and :meth:`_expression.TextClause.columns`
method allows
specification of return columns including names and types::
t = text("SELECT * FROM users WHERE id=:user_id").\
bindparams(user_id=7).\
columns(id=Integer, name=String)
for id, name in connection.execute(t):
print(id, name)
The :func:`_expression.text` construct is used in cases when
a literal string SQL fragment is specified as part of a larger query,
such as for the WHERE clause of a SELECT statement::
s = select([users.c.id, users.c.name]).where(text("id=:user_id"))
result = connection.execute(s, user_id=12)
:func:`_expression.text` is also used for the construction
of a full, standalone statement using plain text.
As such, SQLAlchemy refers
to it as an :class:`.Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`_expression.text`
construct that should be subject to "autocommit"
can be set explicitly so using the
:paramref:`.Connection.execution_options.autocommit` option::
t = text("EXEC my_procedural_thing()").\
execution_options(autocommit=True)
Note that SQLAlchemy's usual "autocommit" behavior applies to
:func:`_expression.text` constructs implicitly - that is,
statements which begin
with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
or a variety of other phrases specific to certain backends, will
be eligible for autocommit if no transaction is in progress.
:param text:
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
:param autocommit: whether or not to set the "autocommit" execution
option for this :class:`_expression.TextClause` object.
:param bind:
an optional connection or engine to be used for this text query.
:param bindparams:
A list of :func:`.bindparam` instances used to
provide information about parameters embedded in the statement.
E.g.::
stmt = text("SELECT * FROM table WHERE id=:id",
bindparams=[bindparam('id', value=5, type_=Integer)])
:param typemap:
A dictionary mapping the names of columns represented in the columns
clause of a ``SELECT`` statement to type objects.
E.g.::
stmt = text("SELECT * FROM table",
typemap={'id': Integer, 'name': String},
)
.. seealso::
:ref:`sqlexpression_text` - in the Core tutorial
:ref:`orm_tutorial_literal_sql` - in the ORM tutorial
"""
stmt = TextClause(text, bind=bind)
if bindparams:
stmt = stmt.bindparams(*bindparams)
if typemap:
stmt = stmt.columns(**typemap)
if autocommit is not None:
stmt = stmt.execution_options(autocommit=autocommit)
return stmt
@_generative
def bindparams(self, *binds, **names_to_values):
"""Establish the values and/or types of bound parameters within
this :class:`_expression.TextClause` construct.
Given a text construct such as::
from sqlalchemy import text
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
the :meth:`_expression.TextClause.bindparams`
method can be used to establish
the initial value of ``:name`` and ``:timestamp``,
using simple keyword arguments::
stmt = stmt.bindparams(name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
Where above, new :class:`.BindParameter` objects
will be generated with the names ``name`` and ``timestamp``, and
values of ``jack`` and ``datetime.datetime(2012, 10, 8, 15, 12, 5)``,
respectively. The types will be
inferred from the values given, in this case :class:`.String` and
:class:`.DateTime`.
When specific typing behavior is needed, the positional ``*binds``
argument can be used in which to specify :func:`.bindparam` constructs
directly. These constructs must include at least the ``key``
argument, then an optional value and type::
from sqlalchemy import bindparam
stmt = stmt.bindparams(
bindparam('name', value='jack', type_=String),
bindparam('timestamp', type_=DateTime)
)
Above, we specified the type of :class:`.DateTime` for the
``timestamp`` bind, and the type of :class:`.String` for the ``name``
bind. In the case of ``name`` we also set the default value of
``"jack"``.
Additional bound parameters can be supplied at statement execution
time, e.g.::
result = connection.execute(stmt,
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
The :meth:`_expression.TextClause.bindparams`
method can be called repeatedly,
where it will re-use existing :class:`.BindParameter` objects to add
new information. For example, we can call
:meth:`_expression.TextClause.bindparams`
first with typing information, and a
second time with value information, and it will be combined::
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
stmt = stmt.bindparams(
bindparam('name', type_=String),
bindparam('timestamp', type_=DateTime)
)
stmt = stmt.bindparams(
name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
)
The :meth:`_expression.TextClause.bindparams`
method also supports the concept of
**unique** bound parameters. These are parameters that are
"uniquified" on name at statement compilation time, so that multiple
:func:`_expression.text`
constructs may be combined together without the names
conflicting. To use this feature, specify the
:paramref:`.BindParameter.unique` flag on each :func:`.bindparam`
object::
stmt1 = text("select id from table where name=:name").bindparams(
bindparam("name", value='name1', unique=True)
)
stmt2 = text("select id from table where name=:name").bindparams(
bindparam("name", value='name2', unique=True)
)
union = union_all(
stmt1.columns(column("id")),
stmt2.columns(column("id"))
)
The above statement will render as::
select id from table where name=:name_1
UNION ALL select id from table where name=:name_2
.. versionadded:: 1.3.11 Added support for the
:paramref:`.BindParameter.unique` flag to work with
:func:`_expression.text`
constructs.
"""
self._bindparams = new_params = self._bindparams.copy()
for bind in binds:
try:
# the regex used for text() currently will not match
# a unique/anonymous key in any case, so use the _orig_key
# so that a text() construct can support unique parameters
existing = new_params[bind._orig_key]
except KeyError as err:
util.raise_(
exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % bind._orig_key
),
replace_context=err,
)
else:
new_params[existing._orig_key] = bind
for key, value in names_to_values.items():
try:
existing = new_params[key]
except KeyError as err:
util.raise_(
exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % key
),
replace_context=err,
)
else:
new_params[key] = existing._with_value(value)
@util.dependencies("sqlalchemy.sql.selectable")
def columns(self, selectable, *cols, **types):
"""Turn this :class:`_expression.TextClause` object into a
:class:`.TextAsFrom`
object that can be embedded into another statement.
This function essentially bridges the gap between an entirely
textual SELECT statement and the SQL expression language concept
of a "selectable"::
from sqlalchemy.sql import column, text
stmt = text("SELECT id, name FROM some_table")
stmt = stmt.columns(column('id'), column('name')).alias('st')
stmt = select([mytable]).\
select_from(
mytable.join(stmt, mytable.c.name == stmt.c.name)
).where(stmt.c.id > 5)
Above, we pass a series of :func:`_expression.column` elements to the
:meth:`_expression.TextClause.columns` method positionally. These
:func:`_expression.column`
elements now become first class elements upon the :attr:`.TextAsFrom.c`
column collection, just like any other selectable.
The column expressions we pass to
:meth:`_expression.TextClause.columns` may
also be typed; when we do so, these :class:`.TypeEngine` objects become
the effective return type of the column, so that SQLAlchemy's
result-set-processing systems may be used on the return values.
This is often needed for types such as date or boolean types, as well
as for unicode processing on some dialect configurations::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
column('id', Integer),
column('name', Unicode),
column('timestamp', DateTime)
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
As a shortcut to the above syntax, keyword arguments referring to
types alone may be used, if only type conversion is needed::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
id=Integer,
name=Unicode,
timestamp=DateTime
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
The positional form of :meth:`_expression.TextClause.columns`
also provides the
unique feature of **positional column targeting**, which is
particularly useful when using the ORM with complex textual queries. If
we specify the columns from our model to
:meth:`_expression.TextClause.columns`,
the result set will match to those columns positionally, meaning the
name or origin of the column in the textual SQL doesn't matter::
stmt = text("SELECT users.id, addresses.id, users.id, "
"users.name, addresses.email_address AS email "
"FROM users JOIN addresses ON users.id=addresses.user_id "
"WHERE users.id = 1").columns(
User.id,
Address.id,
Address.user_id,
User.name,
Address.email_address
)
query = session.query(User).from_statement(stmt).options(
contains_eager(User.addresses))
.. versionadded:: 1.1 the :meth:`_expression.TextClause.columns`
method now
offers positional column targeting in the result set when
the column expressions are passed purely positionally.
The :meth:`_expression.TextClause.columns` method provides a direct
route to calling :meth:`_expression.FromClause.alias` as well as
:meth:`_expression.SelectBase.cte`
against a textual SELECT statement::
stmt = stmt.columns(id=Integer, name=String).cte('st')
stmt = select([sometable]).where(sometable.c.id == stmt.c.id)
.. versionadded:: 0.9.0 :func:`_expression.text`
can now be converted into a
fully featured "selectable" construct using the
:meth:`_expression.TextClause.columns` method.
"""
positional_input_cols = [
ColumnClause(col.key, types.pop(col.key))
if col.key in types
else col
for col in cols
]
keyed_input_cols = [
ColumnClause(key, type_) for key, type_ in types.items()
]
return selectable.TextAsFrom(
self,
positional_input_cols + keyed_input_cols,
positional=bool(positional_input_cols) and not keyed_input_cols,
)
@property
def type(self):
return type_api.NULLTYPE
@property
def comparator(self):
return self.type.comparator_factory(self)
def self_group(self, against=None):
if against is operators.in_op:
return Grouping(self)
else:
return self
def _copy_internals(self, clone=_clone, **kw):
self._bindparams = dict(
(b.key, clone(b, **kw)) for b in self._bindparams.values()
)
def get_children(self, **kwargs):
return list(self._bindparams.values())
def compare(self, other):
return isinstance(other, TextClause) and other.text == self.text
class Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
:class:`.Null` is accessed as a constant via the
:func:`.null` function.
"""
__visit_name__ = "null"
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@classmethod
def _instance(cls):
"""Return a constant :class:`.Null` construct."""
return Null()
def compare(self, other):
return isinstance(other, Null)
class False_(ColumnElement):
"""Represent the ``false`` keyword, or equivalent, in a SQL statement.
:class:`.False_` is accessed as a constant via the
:func:`.false` function.
"""
__visit_name__ = "false"
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return True_()
@classmethod
def _instance(cls):
"""Return a :class:`.False_` construct.
E.g.::
>>> from sqlalchemy import false
>>> print(select([t.c.x]).where(false()))
SELECT x FROM t WHERE false
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print(select([t.c.x]).where(false()))
SELECT x FROM t WHERE 0 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print(select([t.c.x]).where(or_(t.c.x > 5, true())))
SELECT x FROM t WHERE true
>>> print(select([t.c.x]).where(and_(t.c.x > 5, false())))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.true`
"""
return False_()
def compare(self, other):
return isinstance(other, False_)
class True_(ColumnElement):
"""Represent the ``true`` keyword, or equivalent, in a SQL statement.
:class:`.True_` is accessed as a constant via the
:func:`.true` function.
"""
__visit_name__ = "true"
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return False_()
@classmethod
def _ifnone(cls, other):
if other is None:
return cls._instance()
else:
return other
@classmethod
def _instance(cls):
"""Return a constant :class:`.True_` construct.
E.g.::
>>> from sqlalchemy import true
>>> print(select([t.c.x]).where(true()))
SELECT x FROM t WHERE true
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print(select([t.c.x]).where(true()))
SELECT x FROM t WHERE 1 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print(select([t.c.x]).where(or_(t.c.x > 5, true())))
SELECT x FROM t WHERE true
>>> print(select([t.c.x]).where(and_(t.c.x > 5, false())))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.false`
"""
return True_()
def compare(self, other):
return isinstance(other, True_)
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = "clauselist"
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop("operator", operators.comma_op)
self.group = kwargs.pop("group", True)
self.group_contents = kwargs.pop("group_contents", True)
self._tuple_values = kwargs.pop("_tuple_values", False)
text_converter = kwargs.pop(
"_literal_as_text", _expression_literal_as_text
)
if self.group_contents:
self.clauses = [
text_converter(clause).self_group(against=self.operator)
for clause in clauses
]
else:
self.clauses = [text_converter(clause) for clause in clauses]
self._is_implicitly_boolean = operators.is_boolean(self.operator)
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
if self.group_contents:
self.clauses.append(
_literal_as_text(clause).self_group(against=self.operator)
)
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone, **kw):
self.clauses = [clone(clause, **kw) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`.ClauseList` to the given :class:`.ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif (
isinstance(other, ClauseList)
and len(self.clauses) == len(other.clauses)
and self.operator is other.operator
):
if self.operator in (operators.and_, operators.or_):
completed = set()
for clause in self.clauses:
for other_clause in set(other.clauses).difference(
completed
):
if clause.compare(other_clause, **kw):
completed.add(other_clause)
break
return len(completed) == len(other.clauses)
else:
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return True
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = "clauselist"
_tuple_values = False
def __init__(self, *arg, **kw):
raise NotImplementedError(
"BooleanClauseList has a private constructor"
)
@classmethod
def _construct(cls, operator, continue_on, skip_on, *clauses, **kw):
convert_clauses = []
clauses = [
_expression_literal_as_text(clause)
for clause in util.coerce_generator_arg(clauses)
]
for clause in clauses:
if isinstance(clause, continue_on):
continue
elif isinstance(clause, skip_on):
return clause.self_group(against=operators._asbool)
convert_clauses.append(clause)
if len(convert_clauses) == 1:
return convert_clauses[0].self_group(against=operators._asbool)
elif not convert_clauses and clauses:
return clauses[0].self_group(against=operators._asbool)
convert_clauses = [
c.self_group(against=operator) for c in convert_clauses
]
self = cls.__new__(cls)
self.clauses = convert_clauses
self.group = True
self.operator = operator
self.group_contents = True
self.type = type_api.BOOLEANTYPE
self._is_implicitly_boolean = True
return self
@classmethod
def and_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``AND``.
E.g.::
from sqlalchemy import and_
stmt = select([users_table]).where(
and_(
users_table.c.name == 'wendy',
users_table.c.enrolled == True
)
)
The :func:`.and_` conjunction is also available using the
Python ``&`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') &
(users_table.c.enrolled == True)
)
The :func:`.and_` operation is also implicit in some cases;
the :meth:`_expression.Select.where`
method for example can be invoked multiple
times against a statement, which will have the effect of each
clause being combined using :func:`.and_`::
stmt = select([users_table]).\
where(users_table.c.name == 'wendy').\
where(users_table.c.enrolled == True)
.. seealso::
:func:`.or_`
"""
return cls._construct(operators.and_, True_, False_, *clauses)
@classmethod
def or_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``OR``.
E.g.::
from sqlalchemy import or_
stmt = select([users_table]).where(
or_(
users_table.c.name == 'wendy',
users_table.c.name == 'jack'
)
)
The :func:`.or_` conjunction is also available using the
Python ``|`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') |
(users_table.c.name == 'jack')
)
.. seealso::
:func:`.and_`
"""
return cls._construct(operators.or_, False_, True_, *clauses)
@property
def _select_iterable(self):
return (self,)
def self_group(self, against=None):
if not self.clauses:
return self
else:
return super(BooleanClauseList, self).self_group(against=against)
def _negate(self):
return ClauseList._negate(self)
and_ = BooleanClauseList.and_
or_ = BooleanClauseList.or_
class Tuple(ClauseList, ColumnElement):
"""Represent a SQL tuple."""
def __init__(self, *clauses, **kw):
"""Return a :class:`.Tuple`.
Main usage is to produce a composite IN construct using
:meth:`.ColumnOperators.in_` ::
from sqlalchemy import tuple_
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
.. versionchanged:: 1.3.6 Added support for SQLite IN tuples.
.. warning::
The composite IN construct is not supported by all backends, and is
currently known to work on PostgreSQL, MySQL, and SQLite.
Unsupported backends will raise a subclass of
:class:`~sqlalchemy.exc.DBAPIError` when such an expression is
invoked.
"""
clauses = [_literal_as_binds(c) for c in clauses]
self._type_tuple = [arg.type for arg in clauses]
self.type = kw.pop(
"type_",
self._type_tuple[0] if self._type_tuple else type_api.NULLTYPE,
)
super(Tuple, self).__init__(*clauses, **kw)
@property
def _select_iterable(self):
return (self,)
def _bind_param(self, operator, obj, type_=None):
return Tuple(
*[
BindParameter(
None,
o,
_compared_to_operator=operator,
_compared_to_type=compared_to_type,
unique=True,
type_=type_,
)
for o, compared_to_type in zip(obj, self._type_tuple)
]
).self_group()
class Case(ColumnElement):
"""Represent a ``CASE`` expression.
:class:`.Case` is produced using the :func:`.case` factory function,
as in::
from sqlalchemy import case
stmt = select([users_table]).\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
Details on :class:`.Case` usage is at :func:`.case`.
.. seealso::
:func:`.case`
"""
__visit_name__ = "case"
def __init__(self, whens, value=None, else_=None):
r"""Produce a ``CASE`` expression.
The ``CASE`` construct in SQL is a conditional object that
acts somewhat analogously to an "if/then" construct in other
languages. It returns an instance of :class:`.Case`.
:func:`.case` in its usual form is passed a list of "when"
constructs, that is, a list of conditions and results as tuples::
from sqlalchemy import case
stmt = select([users_table]).\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
The above statement will produce SQL resembling::
SELECT id, name FROM user
WHERE CASE
WHEN (name = :name_1) THEN :param_1
WHEN (name = :name_2) THEN :param_2
ELSE :param_3
END
When simple equality expressions of several values against a single
parent column are needed, :func:`.case` also has a "shorthand" format
used via the
:paramref:`.case.value` parameter, which is passed a column
expression to be compared. In this form, the :paramref:`.case.whens`
parameter is passed as a dictionary containing expressions to be
compared against keyed to result expressions. The statement below is
equivalent to the preceding statement::
stmt = select([users_table]).\
where(
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name,
else_='E'
)
)
The values which are accepted as result values in
:paramref:`.case.whens` as well as with :paramref:`.case.else_` are
coerced from Python literals into :func:`.bindparam` constructs.
SQL expressions, e.g. :class:`_expression.ColumnElement` constructs,
are accepted
as well. To coerce a literal string expression into a constant
expression rendered inline, use the :func:`_expression.literal_column`
construct,
as in::
from sqlalchemy import case, literal_column
case(
[
(
orderline.c.qty > 100,
literal_column("'greaterthan100'")
),
(
orderline.c.qty > 10,
literal_column("'greaterthan10'")
)
],
else_=literal_column("'lessthan10'")
)
The above will render the given constants without using bound
parameters for the result values (but still for the comparison
values), as in::
CASE
WHEN (orderline.qty > :qty_1) THEN 'greaterthan100'
WHEN (orderline.qty > :qty_2) THEN 'greaterthan10'
ELSE 'lessthan10'
END
:param whens: The criteria to be compared against,
:paramref:`.case.whens` accepts two different forms, based on
whether or not :paramref:`.case.value` is used.
In the first form, it accepts a list of 2-tuples; each 2-tuple
consists of ``(<sql expression>, <value>)``, where the SQL
expression is a boolean expression and "value" is a resulting value,
e.g.::
case([
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
])
In the second form, it accepts a Python dictionary of comparison
values mapped to a resulting value; this form requires
:paramref:`.case.value` to be present, and values will be compared
using the ``==`` operator, e.g.::
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name
)
:param value: An optional SQL expression which will be used as a
fixed "comparison point" for candidate values within a dictionary
passed to :paramref:`.case.whens`.
:param else\_: An optional SQL expression which will be the evaluated
result of the ``CASE`` construct if all expressions within
:paramref:`.case.whens` evaluate to false. When omitted, most
databases will produce a result of NULL if none of the "when"
expressions evaluate to true.
"""
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(), _literal_as_binds(r))
for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(), _literal_as_binds(r))
for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone, **kw):
if self.value is not None:
self.value = clone(self.value, **kw)
self.whens = [(clone(x, **kw), clone(y, **kw)) for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_, **kw)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(
itertools.chain(*[x._from_objects for x in self.get_children()])
)
def literal_column(text, type_=None):
r"""Produce a :class:`.ColumnClause` object that has the
:paramref:`_expression.column.is_literal` flag set to True.
:func:`_expression.literal_column` is similar to
:func:`_expression.column`, except that
it is more often used as a "standalone" column expression that renders
exactly as stated; while :func:`_expression.column`
stores a string name that
will be assumed to be part of a table and may be quoted as such,
:func:`_expression.literal_column` can be that,
or any other arbitrary column-oriented
expression.
:param text: the text of the expression; can be any SQL expression.
Quoting rules will not be applied. To specify a column-name expression
which should be subject to quoting rules, use the :func:`column`
function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine`
object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
.. seealso::
:func:`_expression.column`
:func:`_expression.text`
:ref:`sqlexpression_literal_column`
"""
return ColumnClause(text, type_=type_, is_literal=True)
class Cast(ColumnElement):
"""Represent a ``CAST`` expression.
:class:`.Cast` is produced using the :func:`.cast` factory function,
as in::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
Details on :class:`.Cast` usage is at :func:`.cast`.
.. seealso::
:ref:`coretutorial_casts`
:func:`.cast`
:func:`.type_coerce` - an alternative to CAST that coerces the type
on the Python side only, which is often sufficient to generate the
correct SQL and data coercion.
"""
__visit_name__ = "cast"
def __init__(self, expression, type_):
r"""Produce a ``CAST`` expression.
:func:`.cast` returns an instance of :class:`.Cast`.
E.g.::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
The above statement will produce SQL resembling::
SELECT CAST(unit_price AS NUMERIC(10, 4)) FROM product
The :func:`.cast` function performs two distinct functions when
used. The first is that it renders the ``CAST`` expression within
the resulting SQL string. The second is that it associates the given
type (e.g. :class:`.TypeEngine` class or instance) with the column
expression on the Python side, which means the expression will take
on the expression operator behavior associated with that type,
as well as the bound-value handling and result-row-handling behavior
of the type.
.. versionchanged:: 0.9.0 :func:`.cast` now applies the given type
to the expression such that it takes effect on the bound-value,
e.g. the Python-to-database direction, in addition to the
result handling, e.g. database-to-Python, direction.
An alternative to :func:`.cast` is the :func:`.type_coerce` function.
This function performs the second task of associating an expression
with a specific type, but does not render the ``CAST`` expression
in SQL.
:param expression: A SQL expression, such as a
:class:`_expression.ColumnElement`
expression or a Python string which will be coerced into a bound
literal value.
:param type\_: A :class:`.TypeEngine` class or instance indicating
the type to which the ``CAST`` should apply.
.. seealso::
:ref:`coretutorial_casts`
:func:`.type_coerce` - an alternative to CAST that coerces the type
on the Python side only, which is often sufficient to generate the
correct SQL and data coercion.
"""
self.type = type_api.to_instance(type_)
self.clause = _literal_as_binds(expression, type_=self.type)
self.typeclause = TypeClause(self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.typeclause = clone(self.typeclause, **kw)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class TypeCoerce(ColumnElement):
"""Represent a Python-side type-coercion wrapper.
:class:`.TypeCoerce` supplies the :func:`_expression.type_coerce`
function; see that function for usage details.
.. versionchanged:: 1.1 The :func:`.type_coerce` function now produces
a persistent :class:`.TypeCoerce` wrapper object rather than
translating the given object in place.
.. seealso::
:func:`_expression.type_coerce`
:func:`.cast`
"""
__visit_name__ = "type_coerce"
def __init__(self, expression, type_):
r"""Associate a SQL expression with a particular type, without rendering
``CAST``.
E.g.::
from sqlalchemy import type_coerce
stmt = select([type_coerce(log_table.date_string, StringDateTime())])
The above construct will produce a :class:`.TypeCoerce` object, which
does not modify the rendering in any way on the SQL side, with the
possible exception of a generated label if used in a columns clause
context::
SELECT date_string AS anon_1 FROM log
When result rows are fetched, the ``StringDateTime`` type processor
will be applied to result rows on behalf of the ``date_string`` column.
.. note:: the :func:`.type_coerce` construct does not render any
SQL syntax of its own, including that it does not imply
parenthesization. Please use :meth:`.TypeCoerce.self_group`
if explicit parenthesization is required.
In order to provide a named label for the expression, use
:meth:`_expression.ColumnElement.label`::
stmt = select([
type_coerce(log_table.date_string, StringDateTime()).label('date')
])
A type that features bound-value handling will also have that behavior
take effect when literal values or :func:`.bindparam` constructs are
passed to :func:`.type_coerce` as targets.
For example, if a type implements the
:meth:`.TypeEngine.bind_expression`
method or :meth:`.TypeEngine.bind_processor` method or equivalent,
these functions will take effect at statement compilation/execution
time when a literal value is passed, as in::
# bound-value handling of MyStringType will be applied to the
# literal value "some string"
stmt = select([type_coerce("some string", MyStringType)])
When using :func:`.type_coerce` with composed expressions, note that
**parenthesis are not applied**. If :func:`.type_coerce` is being
used in an operator context where the parenthesis normally present from
CAST are necessary, use the :meth:`.TypeCoerce.self_group` method::
>>> some_integer = column("someint", Integer)
>>> some_string = column("somestr", String)
>>> expr = type_coerce(some_integer + 5, String) + some_string
>>> print(expr)
someint + :someint_1 || somestr
>>> expr = type_coerce(some_integer + 5, String).self_group() + some_string
>>> print(expr)
(someint + :someint_1) || somestr
:param expression: A SQL expression, such as a
:class:`_expression.ColumnElement`
expression or a Python string which will be coerced into a bound
literal value.
:param type\_: A :class:`.TypeEngine` class or instance indicating
the type to which the expression is coerced.
.. seealso::
:ref:`coretutorial_casts`
:func:`.cast`
""" # noqa
self.type = type_api.to_instance(type_)
self.clause = _literal_as_binds(expression, type_=self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.__dict__.pop("typed_expression", None)
def get_children(self, **kwargs):
return (self.clause,)
@property
def _from_objects(self):
return self.clause._from_objects
@util.memoized_property
def typed_expression(self):
if isinstance(self.clause, BindParameter):
bp = self.clause._clone()
bp.type = self.type
return bp
else:
return self.clause
def self_group(self, against=None):
grouped = self.clause.self_group(against=against)
if grouped is not self.clause:
return TypeCoerce(grouped, self.type)
else:
return self
class Extract(ColumnElement):
"""Represent a SQL EXTRACT clause, ``extract(field FROM expr)``."""
__visit_name__ = "extract"
def __init__(self, field, expr, **kwargs):
"""Return a :class:`.Extract` construct.
This is typically available as :func:`.extract`
as well as ``func.extract`` from the
:data:`.func` namespace.
"""
self.type = type_api.INTEGERTYPE
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone, **kw):
self.expr = clone(self.expr, **kw)
def get_children(self, **kwargs):
return (self.expr,)
@property
def _from_objects(self):
return self.expr._from_objects
class _label_reference(ColumnElement):
"""Wrap a column expression as it appears in a 'reference' context.
This expression is any that includes an _order_by_label_element,
which is a Label, or a DESC / ASC construct wrapping a Label.
The production of _label_reference() should occur when an expression
is added to this context; this includes the ORDER BY or GROUP BY of a
SELECT statement, as well as a few other places, such as the ORDER BY
within an OVER clause.
"""
__visit_name__ = "label_reference"
def __init__(self, element):
self.element = element
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return ()
class _textual_label_reference(ColumnElement):
__visit_name__ = "textual_label_reference"
def __init__(self, element):
self.element = element
@util.memoized_property
def _text_clause(self):
return TextClause._create_text(self.element)
class UnaryExpression(ColumnElement):
"""Define a 'unary' expression.
A unary expression has a single column expression
and an operator. The operator can be placed on the left
(where it is called the 'operator') or right (where it is called the
'modifier') of the column expression.
:class:`.UnaryExpression` is the basis for several unary operators
including those used by :func:`.desc`, :func:`.asc`, :func:`.distinct`,
:func:`.nullsfirst` and :func:`.nullslast`.
"""
__visit_name__ = "unary"
def __init__(
self,
element,
operator=None,
modifier=None,
type_=None,
negate=None,
wraps_column_expression=False,
):
self.operator = operator
self.modifier = modifier
self.element = element.self_group(
against=self.operator or self.modifier
)
self.type = type_api.to_instance(type_)
self.negate = negate
self.wraps_column_expression = wraps_column_expression
@classmethod
def _create_nullsfirst(cls, column):
"""Produce the ``NULLS FIRST`` modifier for an ``ORDER BY`` expression.
:func:`.nullsfirst` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullsfirst
stmt = select([users_table]).\
order_by(nullsfirst(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS FIRST
Like :func:`.asc` and :func:`.desc`, :func:`.nullsfirst` is typically
invoked from the column expression itself using
:meth:`_expression.ColumnElement.nullsfirst`,
rather than as its standalone
function version, as in::
stmt = (select([users_table]).
order_by(users_table.c.name.desc().nullsfirst())
)
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullslast`
:meth:`_expression.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.nullsfirst_op,
wraps_column_expression=False,
)
@classmethod
def _create_nullslast(cls, column):
"""Produce the ``NULLS LAST`` modifier for an ``ORDER BY`` expression.
:func:`.nullslast` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullslast
stmt = select([users_table]).\
order_by(nullslast(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS LAST
Like :func:`.asc` and :func:`.desc`, :func:`.nullslast` is typically
invoked from the column expression itself using
:meth:`_expression.ColumnElement.nullslast`,
rather than as its standalone
function version, as in::
stmt = select([users_table]).\
order_by(users_table.c.name.desc().nullslast())
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullsfirst`
:meth:`_expression.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.nullslast_op,
wraps_column_expression=False,
)
@classmethod
def _create_desc(cls, column):
"""Produce a descending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import desc
stmt = select([users_table]).order_by(desc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name DESC
The :func:`.desc` function is a standalone version of the
:meth:`_expression.ColumnElement.desc`
method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.desc())
:param column: A :class:`_expression.ColumnElement` (e.g.
scalar SQL expression)
with which to apply the :func:`.desc` operation.
.. seealso::
:func:`.asc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`_expression.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.desc_op,
wraps_column_expression=False,
)
@classmethod
def _create_asc(cls, column):
"""Produce an ascending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import asc
stmt = select([users_table]).order_by(asc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name ASC
The :func:`.asc` function is a standalone version of the
:meth:`_expression.ColumnElement.asc`
method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.asc())
:param column: A :class:`_expression.ColumnElement` (e.g.
scalar SQL expression)
with which to apply the :func:`.asc` operation.
.. seealso::
:func:`.desc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`_expression.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.asc_op,
wraps_column_expression=False,
)
@classmethod
def _create_distinct(cls, expr):
"""Produce an column-expression-level unary ``DISTINCT`` clause.
This applies the ``DISTINCT`` keyword to an individual column
expression, and is typically contained within an aggregate function,
as in::
from sqlalchemy import distinct, func
stmt = select([func.count(distinct(users_table.c.name))])
The above would produce an expression resembling::
SELECT COUNT(DISTINCT name) FROM user
The :func:`.distinct` function is also available as a column-level
method, e.g. :meth:`_expression.ColumnElement.distinct`, as in::
stmt = select([func.count(users_table.c.name.distinct())])
The :func:`.distinct` operator is different from the
:meth:`_expression.Select.distinct` method of
:class:`_expression.Select`,
which produces a ``SELECT`` statement
with ``DISTINCT`` applied to the result set as a whole,
e.g. a ``SELECT DISTINCT`` expression. See that method for further
information.
.. seealso::
:meth:`_expression.ColumnElement.distinct`
:meth:`_expression.Select.distinct`
:data:`.func`
"""
expr = _literal_as_binds(expr)
return UnaryExpression(
expr,
operator=operators.distinct_op,
type_=expr.type,
wraps_column_expression=False,
)
@property
def _order_by_label_element(self):
if self.modifier in (operators.desc_op, operators.asc_op):
return self.element._order_by_label_element
else:
return None
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return (self.element,)
def compare(self, other, **kw):
"""Compare this :class:`UnaryExpression` against the given
:class:`_expression.ClauseElement`."""
return (
isinstance(other, UnaryExpression)
and self.operator == other.operator
and self.modifier == other.modifier
and self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type,
wraps_column_expression=self.wraps_column_expression,
)
elif self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
type_=type_api.BOOLEANTYPE,
wraps_column_expression=self.wraps_column_expression,
negate=None,
)
else:
return ClauseElement._negate(self)
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
class CollectionAggregate(UnaryExpression):
"""Forms the basis for right-hand collection operator modifiers
ANY and ALL.
The ANY and ALL keywords are available in different ways on different
backends. On PostgreSQL, they only work for an ARRAY type. On
MySQL, they only work for subqueries.
"""
@classmethod
def _create_any(cls, expr):
"""Produce an ANY expression.
This may apply to an array type for some dialects (e.g. postgresql),
or to a subquery for others (e.g. mysql). e.g.::
# postgresql '5 = ANY (somearray)'
expr = 5 == any_(mytable.c.somearray)
# mysql '5 = ANY (SELECT value FROM table)'
expr = 5 == any_(select([table.c.value]))
.. versionadded:: 1.1
.. seealso::
:func:`_expression.all_`
"""
expr = _literal_as_binds(expr)
if expr.is_selectable and hasattr(expr, "as_scalar"):
expr = expr.as_scalar()
expr = expr.self_group()
return CollectionAggregate(
expr,
operator=operators.any_op,
type_=type_api.NULLTYPE,
wraps_column_expression=False,
)
@classmethod
def _create_all(cls, expr):
"""Produce an ALL expression.
This may apply to an array type for some dialects (e.g. postgresql),
or to a subquery for others (e.g. mysql). e.g.::
# postgresql '5 = ALL (somearray)'
expr = 5 == all_(mytable.c.somearray)
# mysql '5 = ALL (SELECT value FROM table)'
expr = 5 == all_(select([table.c.value]))
.. versionadded:: 1.1
.. seealso::
:func:`_expression.any_`
"""
expr = _literal_as_binds(expr)
if expr.is_selectable and hasattr(expr, "as_scalar"):
expr = expr.as_scalar()
expr = expr.self_group()
return CollectionAggregate(
expr,
operator=operators.all_op,
type_=type_api.NULLTYPE,
wraps_column_expression=False,
)
# operate and reverse_operate are hardwired to
# dispatch onto the type comparator directly, so that we can
# ensure "reversed" behavior.
def operate(self, op, *other, **kwargs):
if not operators.is_comparison(op):
raise exc.ArgumentError(
"Only comparison operators may be used with ANY/ALL"
)
kwargs["reverse"] = True
return self.comparator.operate(operators.mirror(op), *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
# comparison operators should never call reverse_operate
assert not operators.is_comparison(op)
raise exc.ArgumentError(
"Only comparison operators may be used with ANY/ALL"
)
class AsBoolean(UnaryExpression):
def __init__(self, element, operator, negate):
self.element = element
self.type = type_api.BOOLEANTYPE
self.operator = operator
self.negate = negate
self.modifier = None
self.wraps_column_expression = True
self._is_implicitly_boolean = element._is_implicitly_boolean
def self_group(self, against=None):
return self
def _negate(self):
if isinstance(self.element, (True_, False_)):
return self.element._negate()
else:
return AsBoolean(self.element, self.negate, self.operator)
class BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``.
A :class:`.BinaryExpression` is generated automatically
whenever two column expressions are used in a Python binary expression::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print(column('a') + column('b'))
a + b
"""
__visit_name__ = "binary"
_is_implicitly_boolean = True
"""Indicates that any database will know this is a boolean expression
even if the database does not have an explicit boolean datatype.
"""
def __init__(
self, left, right, operator, type_=None, negate=None, modifiers=None
):
# allow compatibility with libraries that
# refer to BinaryExpression directly and pass strings
if isinstance(operator, util.string_types):
operator = operators.custom_op(operator)
self._orig = (left, right)
self.left = left.self_group(against=operator)
self.right = right.self_group(against=operator)
self.operator = operator
self.type = type_api.to_instance(type_)
self.negate = negate
self._is_implicitly_boolean = operators.is_boolean(operator)
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __bool__(self):
if self.operator in (operator.eq, operator.ne):
return self.operator(hash(self._orig[0]), hash(self._orig[1]))
else:
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
@property
def is_comparison(self):
return operators.is_comparison(self.operator)
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`BinaryExpression` against the
given :class:`BinaryExpression`."""
return (
isinstance(other, BinaryExpression)
and self.operator == other.operator
and (
self.left.compare(other.left, **kw)
and self.right.compare(other.right, **kw)
or (
operators.is_commutative(self.operator)
and self.left.compare(other.right, **kw)
and self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=self.type,
modifiers=self.modifiers,
)
else:
return super(BinaryExpression, self)._negate()
class Slice(ColumnElement):
"""Represent SQL for a Python array-slice object.
This is not a specific SQL construct at this level, but
may be interpreted by specific dialects, e.g. PostgreSQL.
"""
__visit_name__ = "slice"
def __init__(self, start, stop, step):
self.start = start
self.stop = stop
self.step = step
self.type = type_api.NULLTYPE
def self_group(self, against=None):
assert against is operator.getitem
return self
class IndexExpression(BinaryExpression):
"""Represent the class of expressions that are like an "index" operation.
"""
pass
class Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = "grouping"
def __init__(self, element):
self.element = element
self.type = getattr(element, "type", type_api.NULLTYPE)
def self_group(self, against=None):
return self
@util.memoized_property
def _is_implicitly_boolean(self):
return self.element._is_implicitly_boolean
@property
def _key_label(self):
return self._label
@property
def _label(self):
return getattr(self.element, "_label", None) or self.anon_label
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return (self.element,)
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {"element": self.element, "type": self.type}
def __setstate__(self, state):
self.element = state["element"]
self.type = state["type"]
def compare(self, other, **kw):
return isinstance(other, Grouping) and self.element.compare(
other.element
)
RANGE_UNBOUNDED = util.symbol("RANGE_UNBOUNDED")
RANGE_CURRENT = util.symbol("RANGE_CURRENT")
class Over(ColumnElement):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. It's supported only by certain database
backends.
"""
__visit_name__ = "over"
order_by = None
partition_by = None
element = None
"""The underlying expression object to which this :class:`.Over`
object refers towards."""
def __init__(
self, element, partition_by=None, order_by=None, range_=None, rows=None
):
r"""Produce an :class:`.Over` object against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
:func:`_expression.over` is usually called using
the :meth:`.FunctionElement.over` method, e.g.::
func.row_number().over(order_by=mytable.c.some_column)
Would produce::
ROW_NUMBER() OVER(ORDER BY some_column)
Ranges are also possible using the :paramref:`.expression.over.range_`
and :paramref:`.expression.over.rows` parameters. These
mutually-exclusive parameters each accept a 2-tuple, which contains
a combination of integers and None::
func.row_number().over(
order_by=my_table.c.some_column, range_=(None, 0))
The above would produce::
ROW_NUMBER() OVER(ORDER BY some_column
RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)
A value of None indicates "unbounded", a
value of zero indicates "current row", and negative / positive
integers indicate "preceding" and "following":
* RANGE BETWEEN 5 PRECEDING AND 10 FOLLOWING::
func.row_number().over(order_by='x', range_=(-5, 10))
* ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW::
func.row_number().over(order_by='x', rows=(None, 0))
* RANGE BETWEEN 2 PRECEDING AND UNBOUNDED FOLLOWING::
func.row_number().over(order_by='x', range_=(-2, None))
* RANGE BETWEEN 1 FOLLOWING AND 3 FOLLOWING::
func.row_number().over(order_by='x', range_=(1, 3))
.. versionadded:: 1.1 support for RANGE / ROWS within a window
:param element: a :class:`.FunctionElement`, :class:`.WithinGroup`,
or other compatible construct.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
:param order_by: a column element or string, or a list
of such, that will be used as the ORDER BY clause
of the OVER construct.
:param range\_: optional range clause for the window. This is a
tuple value which can contain integer values or None, and will
render a RANGE BETWEEN PRECEDING / FOLLOWING clause
.. versionadded:: 1.1
:param rows: optional rows clause for the window. This is a tuple
value which can contain integer values or None, and will render
a ROWS BETWEEN PRECEDING / FOLLOWING clause.
.. versionadded:: 1.1
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.over` method.
.. seealso::
:data:`.expression.func`
:func:`_expression.within_group`
"""
self.element = element
if order_by is not None:
self.order_by = ClauseList(
*util.to_list(order_by),
_literal_as_text=_literal_as_label_reference
)
if partition_by is not None:
self.partition_by = ClauseList(
*util.to_list(partition_by),
_literal_as_text=_literal_as_label_reference
)
if range_:
self.range_ = self._interpret_range(range_)
if rows:
raise exc.ArgumentError(
"'range_' and 'rows' are mutually exclusive"
)
else:
self.rows = None
elif rows:
self.rows = self._interpret_range(rows)
self.range_ = None
else:
self.rows = self.range_ = None
def _interpret_range(self, range_):
if not isinstance(range_, tuple) or len(range_) != 2:
raise exc.ArgumentError("2-tuple expected for range/rows")
if range_[0] is None:
lower = RANGE_UNBOUNDED
else:
try:
lower = int(range_[0])
except ValueError as err:
util.raise_(
exc.ArgumentError(
"Integer or None expected for range value"
),
replace_context=err,
)
else:
if lower == 0:
lower = RANGE_CURRENT
if range_[1] is None:
upper = RANGE_UNBOUNDED
else:
try:
upper = int(range_[1])
except ValueError as err:
util.raise_(
exc.ArgumentError(
"Integer or None expected for range value"
),
replace_context=err,
)
else:
if upper == 0:
upper = RANGE_CURRENT
return lower, upper
@property
@util.deprecated(
"1.1",
"the :attr:`.Over.func` member of the :class:`.Over` "
"class is deprecated and will be removed in a future release. "
"Please refer to the :attr:`.Over.element` attribute.",
)
def func(self):
"""the element referred to by this :class:`.Over`
clause.
"""
return self.element
@util.memoized_property
def type(self):
return self.element.type
def get_children(self, **kwargs):
return [
c
for c in (self.element, self.partition_by, self.order_by)
if c is not None
]
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(
itertools.chain(
*[
c._from_objects
for c in (self.element, self.partition_by, self.order_by)
if c is not None
]
)
)
class WithinGroup(ColumnElement):
"""Represent a WITHIN GROUP (ORDER BY) clause.
This is a special operator against so-called
"ordered set aggregate" and "hypothetical
set aggregate" functions, including ``percentile_cont()``,
``rank()``, ``dense_rank()``, etc.
It's supported only by certain database backends, such as PostgreSQL,
Oracle and MS SQL Server.
The :class:`.WithinGroup` construct extracts its type from the
method :meth:`.FunctionElement.within_group_type`. If this returns
``None``, the function's ``.type`` is used.
"""
__visit_name__ = "withingroup"
order_by = None
def __init__(self, element, *order_by):
r"""Produce a :class:`.WithinGroup` object against a function.
Used against so-called "ordered set aggregate" and "hypothetical
set aggregate" functions, including :class:`.percentile_cont`,
:class:`.rank`, :class:`.dense_rank`, etc.
:func:`_expression.within_group` is usually called using
the :meth:`.FunctionElement.within_group` method, e.g.::
from sqlalchemy import within_group
stmt = select([
department.c.id,
func.percentile_cont(0.5).within_group(
department.c.salary.desc()
)
])
The above statement would produce SQL similar to
``SELECT department.id, percentile_cont(0.5)
WITHIN GROUP (ORDER BY department.salary DESC)``.
:param element: a :class:`.FunctionElement` construct, typically
generated by :data:`~.expression.func`.
:param \*order_by: one or more column elements that will be used
as the ORDER BY clause of the WITHIN GROUP construct.
.. versionadded:: 1.1
.. seealso::
:data:`.expression.func`
:func:`_expression.over`
"""
self.element = element
if order_by is not None:
self.order_by = ClauseList(
*util.to_list(order_by),
_literal_as_text=_literal_as_label_reference
)
def over(self, partition_by=None, order_by=None, range_=None, rows=None):
"""Produce an OVER clause against this :class:`.WithinGroup`
construct.
This function has the same signature as that of
:meth:`.FunctionElement.over`.
"""
return Over(
self,
partition_by=partition_by,
order_by=order_by,
range_=range_,
rows=rows,
)
@util.memoized_property
def type(self):
wgt = self.element.within_group_type(self)
if wgt is not None:
return wgt
else:
return self.element.type
def get_children(self, **kwargs):
return [c for c in (self.element, self.order_by) if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(
itertools.chain(
*[
c._from_objects
for c in (self.element, self.order_by)
if c is not None
]
)
)
class FunctionFilter(ColumnElement):
"""Represent a function FILTER clause.
This is a special operator against aggregate and window functions,
which controls which rows are passed to it.
It's supported only by certain database backends.
Invocation of :class:`.FunctionFilter` is via
:meth:`.FunctionElement.filter`::
func.count(1).filter(True)
.. versionadded:: 1.0.0
.. seealso::
:meth:`.FunctionElement.filter`
"""
__visit_name__ = "funcfilter"
criterion = None
def __init__(self, func, *criterion):
"""Produce a :class:`.FunctionFilter` object against a function.
Used against aggregate and window functions,
for database backends that support the "FILTER" clause.
E.g.::
from sqlalchemy import funcfilter
funcfilter(func.count(1), MyClass.name == 'some name')
Would produce "COUNT(1) FILTER (WHERE myclass.name = 'some name')".
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.filter` method.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.FunctionElement.filter`
"""
self.func = func
self.filter(*criterion)
def filter(self, *criterion):
"""Produce an additional FILTER against the function.
This method adds additional criteria to the initial criteria
set up by :meth:`.FunctionElement.filter`.
Multiple criteria are joined together at SQL render time
via ``AND``.
"""
for criterion in list(criterion):
criterion = _expression_literal_as_text(criterion)
if self.criterion is not None:
self.criterion = self.criterion & criterion
else:
self.criterion = criterion
return self
def over(self, partition_by=None, order_by=None, range_=None, rows=None):
"""Produce an OVER clause against this filtered function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
The expression::
func.rank().filter(MyClass.y > 5).over(order_by='x')
is shorthand for::
from sqlalchemy import over, funcfilter
over(funcfilter(func.rank(), MyClass.y > 5), order_by='x')
See :func:`_expression.over` for a full description.
"""
return Over(
self,
partition_by=partition_by,
order_by=order_by,
range_=range_,
rows=rows,
)
def self_group(self, against=None):
if operators.is_precedent(operators.filter_op, against):
return Grouping(self)
else:
return self
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in (self.func, self.criterion) if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.criterion is not None:
self.criterion = clone(self.criterion, **kw)
@property
def _from_objects(self):
return list(
itertools.chain(
*[
c._from_objects
for c in (self.func, self.criterion)
if c is not None
]
)
)
class Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
"""
__visit_name__ = "label"
def __init__(self, name, element, type_=None):
"""Return a :class:`Label` object for the
given :class:`_expression.ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:meth:`_expression.ColumnElement.label` method on
:class:`_expression.ColumnElement`.
:param name: label name
:param obj: a :class:`_expression.ColumnElement`.
"""
if isinstance(element, Label):
self._resolve_label = element._label
while isinstance(element, Label):
element = element.element
if name:
self.name = name
self._resolve_label = self.name
else:
self.name = _anonymous_label(
"%%(%d %s)s" % (id(self), getattr(element, "name", "anon"))
)
self.key = self._label = self._key_label = self.name
self._element = element
self._type = type_
self._proxies = [element]
def __reduce__(self):
return self.__class__, (self.name, self._element, self._type)
@util.memoized_property
def _is_implicitly_boolean(self):
return self.element._is_implicitly_boolean
@util.memoized_property
def _allow_label_resolve(self):
return self.element._allow_label_resolve
@property
def _order_by_label_element(self):
return self
@util.memoized_property
def type(self):
return type_api.to_instance(
self._type or getattr(self._element, "type", None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
return self._apply_to_inner(self._element.self_group, against=against)
def _negate(self):
return self._apply_to_inner(self._element._negate)
def _apply_to_inner(self, fn, *arg, **kw):
sub_element = fn(*arg, **kw)
if sub_element is not self._element:
return Label(self.name, sub_element, type_=self._type)
else:
return self
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def get_children(self, **kwargs):
return (self.element,)
def _copy_internals(self, clone=_clone, anonymize_labels=False, **kw):
self._element = clone(self._element, **kw)
self.__dict__.pop("element", None)
self.__dict__.pop("_allow_label_resolve", None)
if anonymize_labels:
self.name = self._resolve_label = _anonymous_label(
"%%(%d %s)s"
% (id(self), getattr(self.element, "name", "anon"))
)
self.key = self._label = self._key_label = self.name
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name=None, **kw):
e = self.element._make_proxy(
selectable,
name=name if name else self.name,
disallow_is_literal=True,
)
e._proxies.append(self)
if self._type is not None:
e.type = self._type
return e
class ColumnClause(Immutable, ColumnElement):
"""Represents a column expression from any textual string.
The :class:`.ColumnClause`, a lightweight analogue to the
:class:`_schema.Column` class, is typically invoked using the
:func:`_expression.column` function, as in::
from sqlalchemy import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
:class:`.ColumnClause` is the immediate superclass of the schema-specific
:class:`_schema.Column` object. While the :class:`_schema.Column`
class has all the
same capabilities as :class:`.ColumnClause`, the :class:`.ColumnClause`
class is usable by itself in those cases where behavioral requirements
are limited to simple SQL expression generation. The object has none of
the associations with schema-level metadata or with execution-time
behavior that :class:`_schema.Column` does,
so in that sense is a "lightweight"
version of :class:`_schema.Column`.
Full details on :class:`.ColumnClause` usage is at
:func:`_expression.column`.
.. seealso::
:func:`_expression.column`
:class:`_schema.Column`
"""
__visit_name__ = "column"
onupdate = default = server_default = server_onupdate = None
_is_multiparam_column = False
_memoized_property = util.group_expirable_memoized_property()
def __init__(self, text, type_=None, is_literal=False, _selectable=None):
"""Produce a :class:`.ColumnClause` object.
The :class:`.ColumnClause` is a lightweight analogue to the
:class:`_schema.Column` class. The :func:`_expression.column`
function can
be invoked with just a name alone, as in::
from sqlalchemy import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
Once constructed, :func:`_expression.column`
may be used like any other SQL
expression element such as within :func:`_expression.select`
constructs::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The text handled by :func:`_expression.column`
is assumed to be handled
like the name of a database column; if the string contains mixed case,
special characters, or matches a known reserved word on the target
backend, the column expression will render using the quoting
behavior determined by the backend. To produce a textual SQL
expression that is rendered exactly without any quoting,
use :func:`_expression.literal_column` instead,
or pass ``True`` as the
value of :paramref:`_expression.column.is_literal`. Additionally,
full SQL
statements are best handled using the :func:`_expression.text`
construct.
:func:`_expression.column` can be used in a table-like
fashion by combining it with the :func:`.table` function
(which is the lightweight analogue to :class:`_schema.Table`
) to produce
a working table construct with minimal boilerplate::
from sqlalchemy import table, column, select
user = table("user",
column("id"),
column("name"),
column("description"),
)
stmt = select([user.c.description]).where(user.c.name == 'wendy')
A :func:`_expression.column` / :func:`.table`
construct like that illustrated
above can be created in an
ad-hoc fashion and is not associated with any
:class:`_schema.MetaData`, DDL, or events, unlike its
:class:`_schema.Table` counterpart.
.. versionchanged:: 1.0.0 :func:`_expression.column` can now
be imported from the plain ``sqlalchemy`` namespace like any
other SQL element.
:param text: the text of the element.
:param type: :class:`_types.TypeEngine` object which can associate
this :class:`.ColumnClause` with a type.
:param is_literal: if True, the :class:`.ColumnClause` is assumed to
be an exact expression that will be delivered to the output with no
quoting rules applied regardless of case sensitive settings. the
:func:`_expression.literal_column()` function essentially invokes
:func:`_expression.column` while passing ``is_literal=True``.
.. seealso::
:class:`_schema.Column`
:func:`_expression.literal_column`
:func:`.table`
:func:`_expression.text`
:ref:`sqlexpression_literal_column`
"""
self.key = self.name = text
self.table = _selectable
self.type = type_api.to_instance(type_)
self.is_literal = is_literal
def _compare_name_for_result(self, other):
if (
self.is_literal
or self.table is None
or self.table._textual
or not hasattr(other, "proxy_set")
or (
isinstance(other, ColumnClause)
and (
other.is_literal
or other.table is None
or other.table._textual
)
)
):
return (hasattr(other, "name") and self.name == other.name) or (
hasattr(other, "_label") and self._label == other._label
)
else:
return other.proxy_set.intersection(self.proxy_set)
def _get_table(self):
return self.__dict__["table"]
def _set_table(self, table):
self._memoized_property.expire_instance(self)
self.__dict__["table"] = table
table = property(_get_table, _set_table)
@_memoized_property
def _from_objects(self):
t = self.table
if t is not None:
return [t]
else:
return []
@util.memoized_property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode("ascii", "backslashreplace")
@_memoized_property
def _key_label(self):
if self.key != self.name:
return self._gen_label(self.key)
else:
return self._label
@_memoized_property
def _label(self):
return self._gen_label(self.name)
@_memoized_property
def _render_label_in_columns_clause(self):
return self.table is not None
@property
def _ddl_label(self):
return self._gen_label(self.name, dedupe_on_key=False)
def _gen_label(self, name, dedupe_on_key=True):
t = self.table
if self.is_literal:
return None
elif t is not None and t.named_with_column:
if getattr(t, "schema", None):
label = t.schema.replace(".", "_") + "_" + t.name + "_" + name
else:
label = t.name + "_" + name
# propagate name quoting rules for labels.
if getattr(name, "quote", None) is not None:
if isinstance(label, quoted_name):
label.quote = name.quote
else:
label = quoted_name(label, name.quote)
elif getattr(t.name, "quote", None) is not None:
# can't get this situation to occur, so let's
# assert false on it for now
assert not isinstance(label, quoted_name)
label = quoted_name(label, t.name.quote)
if dedupe_on_key:
# ensure the label name doesn't conflict with that of an
# existing column. note that this implies that any Column
# must **not** set up its _label before its parent table has
# all of its other Column objects set up. There are several
# tables in the test suite which will fail otherwise; example:
# table "owner" has columns "name" and "owner_name". Therefore
# column owner.name cannot use the label "owner_name", it has
# to be "owner_name_1".
if label in t.c:
_label = label
counter = 1
while _label in t.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return _as_truncated(label)
else:
return name
def _bind_param(self, operator, obj, type_=None):
return BindParameter(
self.key,
obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
type_=type_,
unique=True,
)
def _make_proxy(
self,
selectable,
name=None,
attach=True,
name_is_truncatable=False,
disallow_is_literal=False,
**kw
):
# the "is_literal" flag normally should never be propagated; a proxied
# column is always a SQL identifier and never the actual expression
# being evaluated. however, there is a case where the "is_literal" flag
# might be used to allow the given identifier to have a fixed quoting
# pattern already, so maintain the flag for the proxy unless a
# :class:`.Label` object is creating the proxy. See [ticket:4730].
is_literal = (
not disallow_is_literal
and self.is_literal
and (
# note this does not accommodate for quoted_name differences
# right now
name is None
or name == self.name
)
)
c = self._constructor(
_as_truncated(name or self.name)
if name_is_truncatable
else (name or self.name),
type_=self.type,
_selectable=selectable,
is_literal=is_literal,
)
if name is None:
c.key = self.key
c._proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = selectable._is_clone_of.columns.get(c.key)
if attach:
selectable._columns[c.key] = c
return c
class CollationClause(ColumnElement):
__visit_name__ = "collation"
def __init__(self, collation):
self.collation = collation
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = "identified"
_execution_options = Executable._execution_options.union(
{"autocommit": False}
)
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = "savepoint"
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = "rollback_to_savepoint"
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = "release_savepoint"
class quoted_name(util.MemoizedSlots, util.text_type):
"""Represent a SQL identifier combined with quoting preferences.
:class:`.quoted_name` is a Python unicode/str subclass which
represents a particular identifier name along with a
``quote`` flag. This ``quote`` flag, when set to
``True`` or ``False``, overrides automatic quoting behavior
for this identifier in order to either unconditionally quote
or to not quote the name. If left at its default of ``None``,
quoting behavior is applied to the identifier on a per-backend basis
based on an examination of the token itself.
A :class:`.quoted_name` object with ``quote=True`` is also
prevented from being modified in the case of a so-called
"name normalize" option. Certain database backends, such as
Oracle, Firebird, and DB2 "normalize" case-insensitive names
as uppercase. The SQLAlchemy dialects for these backends
convert from SQLAlchemy's lower-case-means-insensitive convention
to the upper-case-means-insensitive conventions of those backends.
The ``quote=True`` flag here will prevent this conversion from occurring
to support an identifier that's quoted as all lower case against
such a backend.
The :class:`.quoted_name` object is normally created automatically
when specifying the name for key schema constructs such as
:class:`_schema.Table`, :class:`_schema.Column`, and others.
The class can also be
passed explicitly as the name to any function that receives a name which
can be quoted. Such as to use the :meth:`_engine.Engine.has_table`
method with
an unconditionally quoted name::
from sqlalchemy import create_engine
from sqlalchemy.sql import quoted_name
engine = create_engine("oracle+cx_oracle://some_dsn")
engine.has_table(quoted_name("some_table", True))
The above logic will run the "has table" logic against the Oracle backend,
passing the name exactly as ``"some_table"`` without converting to
upper case.
.. versionadded:: 0.9.0
.. versionchanged:: 1.2 The :class:`.quoted_name` construct is now
importable from ``sqlalchemy.sql``, in addition to the previous
location of ``sqlalchemy.sql.elements``.
"""
__slots__ = "quote", "lower", "upper"
def __new__(cls, value, quote):
if value is None:
return None
# experimental - don't bother with quoted_name
# if quote flag is None. doesn't seem to make any dent
# in performance however
# elif not sprcls and quote is None:
# return value
elif isinstance(value, cls) and (
quote is None or value.quote == quote
):
return value
self = super(quoted_name, cls).__new__(cls, value)
self.quote = quote
return self
def __reduce__(self):
return quoted_name, (util.text_type(self), self.quote)
def _memoized_method_lower(self):
if self.quote:
return self
else:
return util.text_type(self).lower()
def _memoized_method_upper(self):
if self.quote:
return self
else:
return util.text_type(self).upper()
def __repr__(self):
if util.py2k:
backslashed = self.encode("ascii", "backslashreplace")
if not util.py2k:
backslashed = backslashed.decode("ascii")
return "'%s'" % backslashed
else:
return str.__repr__(self)
class _truncated_label(quoted_name):
"""A unicode subclass used to identify symbolic "
"names that may require truncation."""
__slots__ = ()
def __new__(cls, value, quote=None):
quote = getattr(value, "quote", quote)
# return super(_truncated_label, cls).__new__(cls, value, quote, True)
return super(_truncated_label, cls).__new__(cls, value, quote)
def __reduce__(self):
return self.__class__, (util.text_type(self), self.quote)
def apply_map(self, map_):
return self
class conv(_truncated_label):
"""Mark a string indicating that a name has already been converted
by a naming convention.
This is a string subclass that indicates a name that should not be
subject to any further naming conventions.
E.g. when we create a :class:`.Constraint` using a naming convention
as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name='x5'))
The name of the above constraint will be rendered as ``"ck_t_x5"``.
That is, the existing name ``x5`` is used in the naming convention as the
``constraint_name`` token.
In some situations, such as in migration scripts, we may be rendering
the above :class:`.CheckConstraint` with a name that's already been
converted. In order to make sure the name isn't double-modified, the
new name is applied using the :func:`_schema.conv` marker. We can
use this explicitly as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name=conv('ck_t_x5')))
Where above, the :func:`_schema.conv` marker indicates that the constraint
name here is final, and the name will render as ``"ck_t_x5"`` and not
``"ck_t_ck_t_x5"``
.. versionadded:: 0.9.4
.. seealso::
:ref:`constraint_naming_conventions`
"""
__slots__ = ()
class _defer_name(_truncated_label):
"""mark a name as 'deferred' for the purposes of automated name
generation.
"""
__slots__ = ()
def __new__(cls, value):
if value is None:
return _NONE_NAME
elif isinstance(value, conv):
return value
else:
return super(_defer_name, cls).__new__(cls, value)
def __reduce__(self):
return self.__class__, (util.text_type(self),)
class _defer_none_name(_defer_name):
"""indicate a 'deferred' name that was ultimately the value None."""
__slots__ = ()
_NONE_NAME = _defer_none_name("_unnamed_")
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
"""A unicode subclass used to identify anonymously
generated names."""
__slots__ = ()
def __add__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(self, util.text_type(other)), self.quote
)
)
def __radd__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(util.text_type(other), self), self.quote
)
)
def apply_map(self, map_):
if self.quote is not None:
# preserve quoting only if necessary
return quoted_name(self % map_, self.quote)
else:
# else skip the constructor call
return self % map_
def _as_truncated(value):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(value, _truncated_label):
return value
else:
return _truncated_label(value)
def _string_or_unprintable(element):
if isinstance(element, util.string_types):
return element
else:
try:
return str(element)
except Exception:
return "unprintable element %r" % element
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(
elem for elem in a if all_overlap.intersection(elem._cloned_set)
)
def _cloned_difference(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(
elem for elem in a if not all_overlap.intersection(elem._cloned_set)
)
@util.dependencies("sqlalchemy.sql.functions")
def _labeled(functions, element):
if not hasattr(element, "name") or isinstance(
element, functions.FunctionElement
):
return element.label(None)
else:
return element
def _is_column(col):
"""True if ``col`` is an instance of
:class:`_expression.ColumnElement`. """
return isinstance(col, ColumnElement)
def _find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
traverse(clause, {}, {"column": cols.add})
return cols
# there is some inconsistency here between the usage of
# inspect() vs. checking for Visitable and __clause_element__.
# Ideally all functions here would derive from inspect(),
# however the inspect() versions add significant callcount
# overhead for critical functions like _interpret_as_column_or_from().
# Generally, the column-based functions are more performance critical
# and are fine just checking for __clause_element__(). It is only
# _interpret_as_from() where we'd like to be able to receive ORM entities
# that have no defined namespace, hence inspect() is needed there.
def _column_as_key(element):
if isinstance(element, util.string_types):
return element
if hasattr(element, "__clause_element__"):
element = element.__clause_element__()
try:
return element.key
except AttributeError:
return None
def _clause_element_as_expr(element):
if hasattr(element, "__clause_element__"):
return element.__clause_element__()
else:
return element
def _literal_as_label_reference(element):
if isinstance(element, util.string_types):
return _textual_label_reference(element)
elif hasattr(element, "__clause_element__"):
element = element.__clause_element__()
return _literal_as_text(element)
def _literal_and_labels_as_label_reference(element):
if isinstance(element, util.string_types):
return _textual_label_reference(element)
elif hasattr(element, "__clause_element__"):
element = element.__clause_element__()
if (
isinstance(element, ColumnElement)
and element._order_by_label_element is not None
):
return _label_reference(element)
else:
return _literal_as_text(element)
def _expression_literal_as_text(element):
return _literal_as_text(element)
def _literal_as(element, text_fallback):
if isinstance(element, Visitable):
return element
elif hasattr(element, "__clause_element__"):
return element.__clause_element__()
elif isinstance(element, util.string_types):
return text_fallback(element)
elif isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
else:
raise exc.ArgumentError(
"SQL expression object expected, got object of type %r "
"instead" % type(element)
)
def _literal_as_text(element, allow_coercion_to_text=False):
if allow_coercion_to_text:
return _literal_as(element, TextClause)
else:
return _literal_as(element, _no_text_coercion)
def _literal_as_column(element):
return _literal_as(element, ColumnClause)
def _no_column_coercion(element):
element = str(element)
guess_is_literal = not _guess_straight_column.match(element)
raise exc.ArgumentError(
"Textual column expression %(column)r should be "
"explicitly declared with text(%(column)r), "
"or use %(literal_column)s(%(column)r) "
"for more specificity"
% {
"column": util.ellipses_string(element),
"literal_column": "literal_column"
if guess_is_literal
else "column",
}
)
def _no_text_coercion(
element, exc_cls=exc.ArgumentError, extra=None, err=None
):
util.raise_(
exc_cls(
"%(extra)sTextual SQL expression %(expr)r should be "
"explicitly declared as text(%(expr)r)"
% {
"expr": util.ellipses_string(element),
"extra": "%s " % extra if extra else "",
}
),
replace_context=err,
)
def _no_literals(element):
if hasattr(element, "__clause_element__"):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError(
"Ambiguous literal: %r. Use the 'text()' "
"function to indicate a SQL expression "
"literal, or 'literal()' to indicate a "
"bound value." % (element,)
)
else:
return element
def _is_literal(element):
return not isinstance(element, Visitable) and not hasattr(
element, "__clause_element__"
)
def _only_column_elements_or_none(element, name):
if element is None:
return None
else:
return _only_column_elements(element, name)
def _only_column_elements(element, name):
if hasattr(element, "__clause_element__"):
element = element.__clause_element__()
if not isinstance(element, ColumnElement):
raise exc.ArgumentError(
"Column-based expression object expected for argument "
"'%s'; got: '%s', type %s" % (name, element, type(element))
)
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, "__clause_element__"):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return Null()
else:
return BindParameter(name, element, type_=type_, unique=True)
else:
return element
_guess_straight_column = re.compile(r"^\w\S*$", re.I)
def _interpret_as_column_or_from(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, "__clause_element__"):
return element.__clause_element__()
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
elif hasattr(insp, "selectable"):
return insp.selectable
# be forgiving as this is an extremely common
# and known expression
if element == "*":
guess_is_literal = True
elif isinstance(element, (numbers.Number)):
return ColumnClause(str(element), is_literal=True)
else:
_no_column_coercion(element)
return ColumnClause(element, is_literal=guess_is_literal)
def _const_expr(element):
if isinstance(element, (Null, False_, True_)):
return element
elif element is None:
return Null()
elif element is False:
return False_()
elif element is True:
return True_()
else:
raise exc.ArgumentError("Expected None, False, or True")
def _type_from_args(args):
for a in args:
if not a.type._isnull:
return a.type
else:
return type_api.NULLTYPE
def _corresponding_column_or_error(fromclause, column, require_embedded=False):
c = fromclause.corresponding_column(
column, require_embedded=require_embedded
)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
% (column, getattr(column, "table", None), fromclause.description)
)
return c
class AnnotatedColumnElement(Annotated):
def __init__(self, element, values):
Annotated.__init__(self, element, values)
ColumnElement.comparator._reset(self)
for attr in ("name", "key", "table"):
if self.__dict__.get(attr, False) is None:
self.__dict__.pop(attr)
def _with_annotations(self, values):
clone = super(AnnotatedColumnElement, self)._with_annotations(values)
ColumnElement.comparator._reset(clone)
return clone
@util.memoized_property
def name(self):
"""pull 'name' from parent, if not present"""
return self._Annotated__element.name
@util.memoized_property
def table(self):
"""pull 'table' from parent, if not present"""
return self._Annotated__element.table
@util.memoized_property
def key(self):
"""pull 'key' from parent, if not present"""
return self._Annotated__element.key
@util.memoized_property
def info(self):
return self._Annotated__element.info
@util.memoized_property
def anon_label(self):
return self._Annotated__element.anon_label
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/__init__.py
|
# sql/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .expression import Alias # noqa
from .expression import alias # noqa
from .expression import all_ # noqa
from .expression import and_ # noqa
from .expression import any_ # noqa
from .expression import asc # noqa
from .expression import between # noqa
from .expression import bindparam # noqa
from .expression import case # noqa
from .expression import cast # noqa
from .expression import ClauseElement # noqa
from .expression import collate # noqa
from .expression import column # noqa
from .expression import ColumnCollection # noqa
from .expression import ColumnElement # noqa
from .expression import CompoundSelect # noqa
from .expression import cte # noqa
from .expression import Delete # noqa
from .expression import delete # noqa
from .expression import desc # noqa
from .expression import distinct # noqa
from .expression import except_ # noqa
from .expression import except_all # noqa
from .expression import exists # noqa
from .expression import extract # noqa
from .expression import false # noqa
from .expression import False_ # noqa
from .expression import FromClause # noqa
from .expression import func # noqa
from .expression import funcfilter # noqa
from .expression import Insert # noqa
from .expression import insert # noqa
from .expression import intersect # noqa
from .expression import intersect_all # noqa
from .expression import Join # noqa
from .expression import join # noqa
from .expression import label # noqa
from .expression import lateral # noqa
from .expression import literal # noqa
from .expression import literal_column # noqa
from .expression import modifier # noqa
from .expression import not_ # noqa
from .expression import null # noqa
from .expression import nullsfirst # noqa
from .expression import nullslast # noqa
from .expression import or_ # noqa
from .expression import outerjoin # noqa
from .expression import outparam # noqa
from .expression import over # noqa
from .expression import quoted_name # noqa
from .expression import Select # noqa
from .expression import select # noqa
from .expression import Selectable # noqa
from .expression import subquery # noqa
from .expression import table # noqa
from .expression import TableClause # noqa
from .expression import TableSample # noqa
from .expression import tablesample # noqa
from .expression import text # noqa
from .expression import true # noqa
from .expression import True_ # noqa
from .expression import tuple_ # noqa
from .expression import type_coerce # noqa
from .expression import union # noqa
from .expression import union_all # noqa
from .expression import Update # noqa
from .expression import update # noqa
from .expression import within_group # noqa
from .visitors import ClauseVisitor # noqa
def __go(lcls):
global __all__
from .. import util as _sa_util
import inspect as _inspect
__all__ = sorted(
name
for name, obj in lcls.items()
if not (name.startswith("_") or _inspect.ismodule(obj))
)
from .annotation import _prepare_annotations
from .annotation import Annotated # noqa
from .elements import AnnotatedColumnElement
from .elements import ClauseList # noqa
from .selectable import AnnotatedFromClause # noqa
_prepare_annotations(ColumnElement, AnnotatedColumnElement)
_prepare_annotations(FromClause, AnnotatedFromClause)
_prepare_annotations(ClauseList, Annotated)
_sa_util.dependencies.resolve_all("sqlalchemy.sql")
from . import naming # noqa
__go(locals())
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/sqltypes.py
|
# sql/sqltypes.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL specific types.
"""
import codecs
import datetime as dt
import decimal
import json
from . import elements
from . import operators
from . import type_api
from .base import _bind_or_error
from .base import NO_ARG
from .base import SchemaEventTarget
from .elements import _defer_name
from .elements import _literal_as_binds
from .elements import quoted_name
from .elements import Slice
from .elements import TypeCoerce as type_coerce # noqa
from .type_api import Emulated
from .type_api import NativeForEmulated # noqa
from .type_api import to_instance
from .type_api import TypeDecorator
from .type_api import TypeEngine
from .type_api import Variant
from .. import event
from .. import exc
from .. import inspection
from .. import processors
from .. import util
from ..util import compat
from ..util import pickle
if util.jython:
import array
class _LookupExpressionAdapter(object):
"""Mixin expression adaptations based on lookup tables.
These rules are currently used by the numeric, integer and date types
which have detailed cross-expression coercion rules.
"""
@property
def _expression_adaptations(self):
raise NotImplementedError()
class Comparator(TypeEngine.Comparator):
_blank_dict = util.immutabledict()
def _adapt_expression(self, op, other_comparator):
othertype = other_comparator.type._type_affinity
lookup = self.type._expression_adaptations.get(
op, self._blank_dict
).get(othertype, self.type)
if lookup is othertype:
return (op, other_comparator.type)
elif lookup is self.type._type_affinity:
return (op, self.type)
else:
return (op, to_instance(lookup))
comparator_factory = Comparator
class Concatenable(object):
"""A mixin that marks a type as supporting 'concatenation',
typically strings."""
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if op is operators.add and isinstance(
other_comparator,
(Concatenable.Comparator, NullType.Comparator),
):
return operators.concat_op, self.expr.type
else:
return super(Concatenable.Comparator, self)._adapt_expression(
op, other_comparator
)
comparator_factory = Comparator
class Indexable(object):
"""A mixin that marks a type as supporting indexing operations,
such as array or JSON structures.
.. versionadded:: 1.1.0
"""
class Comparator(TypeEngine.Comparator):
def _setup_getitem(self, index):
raise NotImplementedError()
def __getitem__(self, index):
(
adjusted_op,
adjusted_right_expr,
result_type,
) = self._setup_getitem(index)
return self.operate(
adjusted_op, adjusted_right_expr, result_type=result_type
)
comparator_factory = Comparator
class String(Concatenable, TypeEngine):
"""The base for all string and character types.
In SQL, corresponds to VARCHAR. Can also take Python unicode objects
and encode to the database's encoding in bind params (and the reverse for
result sets.)
The `length` field is usually required when the `String` type is
used within a CREATE TABLE statement, as VARCHAR requires a length
on most databases.
"""
__visit_name__ = "string"
@util.deprecated_params(
convert_unicode=(
"1.3",
"The :paramref:`.String.convert_unicode` parameter is deprecated "
"and will be removed in a future release. All modern DBAPIs "
"now support Python Unicode directly and this parameter is "
"unnecessary.",
),
unicode_error=(
"1.3",
"The :paramref:`.String.unicode_errors` parameter is deprecated "
"and will be removed in a future release. This parameter is "
"unnecessary for modern Python DBAPIs and degrades performance "
"significantly.",
),
)
def __init__(
self,
length=None,
collation=None,
convert_unicode=False,
unicode_error=None,
_warn_on_bytestring=False,
_expect_unicode=False,
):
"""
Create a string-holding type.
:param length: optional, a length for the column for use in
DDL and CAST expressions. May be safely omitted if no ``CREATE
TABLE`` will be issued. Certain databases may require a
``length`` for use in DDL, and will raise an exception when
the ``CREATE TABLE`` DDL is issued if a ``VARCHAR``
with no length is included. Whether the value is
interpreted as bytes or characters is database specific.
:param collation: Optional, a column-level collation for
use in DDL and CAST expressions. Renders using the
COLLATE keyword supported by SQLite, MySQL, and PostgreSQL.
E.g.::
>>> from sqlalchemy import cast, select, String
>>> print(select([cast('some string', String(collation='utf8'))]))
SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1
:param convert_unicode: When set to ``True``, the
:class:`.String` type will assume that
input is to be passed as Python Unicode objects under Python 2,
and results returned as Python Unicode objects.
In the rare circumstance that the DBAPI does not support
Python unicode under Python 2, SQLAlchemy will use its own
encoder/decoder functionality on strings, referring to the
value of the :paramref:`_sa.create_engine.encoding` parameter
parameter passed to :func:`_sa.create_engine` as the encoding.
For the extremely rare case that Python Unicode
is to be encoded/decoded by SQLAlchemy on a backend
that *does* natively support Python Unicode,
the string value ``"force"`` can be passed here which will
cause SQLAlchemy's encode/decode services to be
used unconditionally.
.. note::
SQLAlchemy's unicode-conversion flags and features only apply
to Python 2; in Python 3, all string objects are Unicode objects.
For this reason, as well as the fact that virtually all modern
DBAPIs now support Unicode natively even under Python 2,
the :paramref:`.String.convert_unicode` flag is inherently a
legacy feature.
.. note::
In the vast majority of cases, the :class:`.Unicode` or
:class:`.UnicodeText` datatypes should be used for a
:class:`_schema.Column` that expects to store non-ascii data.
These
datatypes will ensure that the correct types are used on the
database side as well as set up the correct Unicode behaviors
under Python 2.
.. seealso::
:paramref:`_sa.create_engine.convert_unicode` -
:class:`_engine.Engine`-wide parameter
:param unicode_error: Optional, a method to use to handle Unicode
conversion errors. Behaves like the ``errors`` keyword argument to
the standard library's ``string.decode()`` functions, requires
that :paramref:`.String.convert_unicode` is set to
``"force"``
"""
if unicode_error is not None and convert_unicode != "force":
raise exc.ArgumentError(
"convert_unicode must be 'force' " "when unicode_error is set."
)
self.length = length
self.collation = collation
self._expect_unicode = convert_unicode or _expect_unicode
self._expect_unicode_error = unicode_error
self._warn_on_bytestring = _warn_on_bytestring
def literal_processor(self, dialect):
def process(value):
value = value.replace("'", "''")
if dialect.identifier_preparer._double_percents:
value = value.replace("%", "%%")
return "'%s'" % value
return process
def bind_processor(self, dialect):
if self._expect_unicode or dialect.convert_unicode:
if (
dialect.supports_unicode_binds
and self._expect_unicode != "force"
):
if self._warn_on_bytestring:
def process(value):
if isinstance(value, util.binary_type):
util.warn_limited(
"Unicode type received non-unicode "
"bind param value %r.",
(util.ellipses_string(value),),
)
return value
return process
else:
return None
else:
encoder = codecs.getencoder(dialect.encoding)
warn_on_bytestring = self._warn_on_bytestring
def process(value):
if isinstance(value, util.text_type):
return encoder(value, self._expect_unicode_error)[0]
elif warn_on_bytestring and value is not None:
util.warn_limited(
"Unicode type received non-unicode bind "
"param value %r.",
(util.ellipses_string(value),),
)
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
wants_unicode = self._expect_unicode or dialect.convert_unicode
needs_convert = wants_unicode and (
dialect.returns_unicode_strings is not True
or self._expect_unicode in ("force", "force_nocheck")
)
needs_isinstance = (
needs_convert
and dialect.returns_unicode_strings
and self._expect_unicode != "force_nocheck"
)
if needs_convert:
if needs_isinstance:
return processors.to_conditional_unicode_processor_factory(
dialect.encoding, self._expect_unicode_error
)
else:
return processors.to_unicode_processor_factory(
dialect.encoding, self._expect_unicode_error
)
else:
return None
@property
def python_type(self):
if self._expect_unicode:
return util.text_type
else:
return str
def get_dbapi_type(self, dbapi):
return dbapi.STRING
@classmethod
def _warn_deprecated_unicode(cls):
util.warn_deprecated(
"The convert_unicode on Engine and String as well as the "
"unicode_error flag on String are deprecated. All modern "
"DBAPIs now support Python Unicode natively under Python 2, and "
"under Python 3 all strings are inherently Unicode. These flags "
"will be removed in a future release."
)
class Text(String):
"""A variably sized string type.
In SQL, usually corresponds to CLOB or TEXT. Can also take Python
unicode objects and encode to the database's encoding in bind
params (and the reverse for result sets.) In general, TEXT objects
do not have a length; while some databases will accept a length
argument here, it will be rejected by others.
"""
__visit_name__ = "text"
class Unicode(String):
"""A variable length Unicode string type.
The :class:`.Unicode` type is a :class:`.String` subclass
that assumes input and output as Python ``unicode`` data,
and in that regard is equivalent to the usage of the
``convert_unicode`` flag with the :class:`.String` type.
However, unlike plain :class:`.String`, it also implies an
underlying column type that is explicitly supporting of non-ASCII
data, such as ``NVARCHAR`` on Oracle and SQL Server.
This can impact the output of ``CREATE TABLE`` statements
and ``CAST`` functions at the dialect level, and can
also affect the handling of bound parameters in some
specific DBAPI scenarios.
The encoding used by the :class:`.Unicode` type is usually
determined by the DBAPI itself; most modern DBAPIs
feature support for Python ``unicode`` objects as bound
values and result set values, and the encoding should
be configured as detailed in the notes for the target
DBAPI in the :ref:`dialect_toplevel` section.
For those DBAPIs which do not support, or are not configured
to accommodate Python ``unicode`` objects
directly, SQLAlchemy does the encoding and decoding
outside of the DBAPI. The encoding in this scenario
is determined by the ``encoding`` flag passed to
:func:`_sa.create_engine`.
When using the :class:`.Unicode` type, it is only appropriate
to pass Python ``unicode`` objects, and not plain ``str``.
If a plain ``str`` is passed under Python 2, a warning
is emitted. If you notice your application emitting these warnings but
you're not sure of the source of them, the Python
``warnings`` filter, documented at
http://docs.python.org/library/warnings.html,
can be used to turn these warnings into exceptions
which will illustrate a stack trace::
import warnings
warnings.simplefilter('error')
For an application that wishes to pass plain bytestrings
and Python ``unicode`` objects to the ``Unicode`` type
equally, the bytestrings must first be decoded into
unicode. The recipe at :ref:`coerce_to_unicode` illustrates
how this is done.
.. seealso::
:class:`.UnicodeText` - unlengthed textual counterpart
to :class:`.Unicode`.
"""
__visit_name__ = "unicode"
def __init__(self, length=None, **kwargs):
"""
Create a :class:`.Unicode` object.
Parameters are the same as that of :class:`.String`,
with the exception that ``convert_unicode``
defaults to ``True``.
"""
kwargs.setdefault("_expect_unicode", True)
kwargs.setdefault("_warn_on_bytestring", True)
super(Unicode, self).__init__(length=length, **kwargs)
class UnicodeText(Text):
"""An unbounded-length Unicode string type.
See :class:`.Unicode` for details on the unicode
behavior of this object.
Like :class:`.Unicode`, usage the :class:`.UnicodeText` type implies a
unicode-capable type being used on the backend, such as
``NCLOB``, ``NTEXT``.
"""
__visit_name__ = "unicode_text"
def __init__(self, length=None, **kwargs):
"""
Create a Unicode-converting Text type.
Parameters are the same as that of :class:`_expression.TextClause`,
with the exception that ``convert_unicode``
defaults to ``True``.
"""
kwargs.setdefault("_expect_unicode", True)
kwargs.setdefault("_warn_on_bytestring", True)
super(UnicodeText, self).__init__(length=length, **kwargs)
def _warn_deprecated_unicode(self):
pass
class Integer(_LookupExpressionAdapter, TypeEngine):
"""A type for ``int`` integers."""
__visit_name__ = "integer"
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
@property
def python_type(self):
return int
def literal_processor(self, dialect):
def process(value):
return str(value)
return process
@util.memoized_property
def _expression_adaptations(self):
# TODO: need a dictionary object that will
# handle operators generically here, this is incomplete
return {
operators.add: {
Date: Date,
Integer: self.__class__,
Numeric: Numeric,
},
operators.mul: {
Interval: Interval,
Integer: self.__class__,
Numeric: Numeric,
},
operators.div: {Integer: self.__class__, Numeric: Numeric},
operators.truediv: {Integer: self.__class__, Numeric: Numeric},
operators.sub: {Integer: self.__class__, Numeric: Numeric},
}
class SmallInteger(Integer):
"""A type for smaller ``int`` integers.
Typically generates a ``SMALLINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = "small_integer"
class BigInteger(Integer):
"""A type for bigger ``int`` integers.
Typically generates a ``BIGINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = "big_integer"
class Numeric(_LookupExpressionAdapter, TypeEngine):
"""A type for fixed precision numbers, such as ``NUMERIC`` or ``DECIMAL``.
This type returns Python ``decimal.Decimal`` objects by default, unless
the :paramref:`.Numeric.asdecimal` flag is set to False, in which case
they are coerced to Python ``float`` objects.
.. note::
The :class:`.Numeric` type is designed to receive data from a database
type that is explicitly known to be a decimal type
(e.g. ``DECIMAL``, ``NUMERIC``, others) and not a floating point
type (e.g. ``FLOAT``, ``REAL``, others).
If the database column on the server is in fact a floating-point type
type, such as ``FLOAT`` or ``REAL``, use the :class:`.Float`
type or a subclass, otherwise numeric coercion between
``float``/``Decimal`` may or may not function as expected.
.. note::
The Python ``decimal.Decimal`` class is generally slow
performing; cPython 3.3 has now switched to use the `cdecimal
<http://pypi.python.org/pypi/cdecimal/>`_ library natively. For
older Python versions, the ``cdecimal`` library can be patched
into any application where it will replace the ``decimal``
library fully, however this needs to be applied globally and
before any other modules have been imported, as follows::
import sys
import cdecimal
sys.modules["decimal"] = cdecimal
Note that the ``cdecimal`` and ``decimal`` libraries are **not
compatible with each other**, so patching ``cdecimal`` at the
global level is the only way it can be used effectively with
various DBAPIs that hardcode to import the ``decimal`` library.
"""
__visit_name__ = "numeric"
_default_decimal_return_scale = 10
def __init__(
self,
precision=None,
scale=None,
decimal_return_scale=None,
asdecimal=True,
):
"""
Construct a Numeric.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param scale: the numeric scale for use in DDL ``CREATE TABLE``.
:param asdecimal: default True. Return whether or not
values should be sent as Python Decimal objects, or
as floats. Different DBAPIs send one or the other based on
datatypes - the Numeric type will ensure that return values
are one or the other across DBAPIs consistently.
:param decimal_return_scale: Default scale to use when converting
from floats to Python decimals. Floating point values will typically
be much longer due to decimal inaccuracy, and most floating point
database types don't have a notion of "scale", so by default the
float type looks for the first ten decimal places when converting.
Specifying this value will override that length. Types which
do include an explicit ".scale" value, such as the base
:class:`.Numeric` as well as the MySQL float types, will use the
value of ".scale" as the default for decimal_return_scale, if not
otherwise specified.
.. versionadded:: 0.9.0
When using the ``Numeric`` type, care should be taken to ensure
that the asdecimal setting is appropriate for the DBAPI in use -
when Numeric applies a conversion from Decimal->float or float->
Decimal, this conversion incurs an additional performance overhead
for all result columns received.
DBAPIs that return Decimal natively (e.g. psycopg2) will have
better accuracy and higher performance with a setting of ``True``,
as the native translation to Decimal reduces the amount of floating-
point issues at play, and the Numeric type itself doesn't need
to apply any further conversions. However, another DBAPI which
returns floats natively *will* incur an additional conversion
overhead, and is still subject to floating point data loss - in
which case ``asdecimal=False`` will at least remove the extra
conversion overhead.
"""
self.precision = precision
self.scale = scale
self.decimal_return_scale = decimal_return_scale
self.asdecimal = asdecimal
@property
def _effective_decimal_return_scale(self):
if self.decimal_return_scale is not None:
return self.decimal_return_scale
elif getattr(self, "scale", None) is not None:
return self.scale
else:
return self._default_decimal_return_scale
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
def literal_processor(self, dialect):
def process(value):
return str(value)
return process
@property
def python_type(self):
if self.asdecimal:
return decimal.Decimal
else:
return float
def bind_processor(self, dialect):
if dialect.supports_native_decimal:
return None
else:
return processors.to_float
def result_processor(self, dialect, coltype):
if self.asdecimal:
if dialect.supports_native_decimal:
# we're a "numeric", DBAPI will give us Decimal directly
return None
else:
util.warn(
"Dialect %s+%s does *not* support Decimal "
"objects natively, and SQLAlchemy must "
"convert from floating point - rounding "
"errors and other issues may occur. Please "
"consider storing Decimal numbers as strings "
"or integers on this platform for lossless "
"storage." % (dialect.name, dialect.driver)
)
# we're a "numeric", DBAPI returns floats, convert.
return processors.to_decimal_processor_factory(
decimal.Decimal,
self.scale
if self.scale is not None
else self._default_decimal_return_scale,
)
else:
if dialect.supports_native_decimal:
return processors.to_float
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
Integer: self.__class__,
},
operators.div: {Numeric: self.__class__, Integer: self.__class__},
operators.truediv: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.add: {Numeric: self.__class__, Integer: self.__class__},
operators.sub: {Numeric: self.__class__, Integer: self.__class__},
}
class Float(Numeric):
"""Type representing floating point types, such as ``FLOAT`` or ``REAL``.
This type returns Python ``float`` objects by default, unless the
:paramref:`.Float.asdecimal` flag is set to True, in which case they
are coerced to ``decimal.Decimal`` objects.
.. note::
The :class:`.Float` type is designed to receive data from a database
type that is explicitly known to be a floating point type
(e.g. ``FLOAT``, ``REAL``, others)
and not a decimal type (e.g. ``DECIMAL``, ``NUMERIC``, others).
If the database column on the server is in fact a Numeric
type, such as ``DECIMAL`` or ``NUMERIC``, use the :class:`.Numeric`
type or a subclass, otherwise numeric coercion between
``float``/``Decimal`` may or may not function as expected.
"""
__visit_name__ = "float"
scale = None
def __init__(
self, precision=None, asdecimal=False, decimal_return_scale=None
):
r"""
Construct a Float.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param asdecimal: the same flag as that of :class:`.Numeric`, but
defaults to ``False``. Note that setting this flag to ``True``
results in floating point conversion.
:param decimal_return_scale: Default scale to use when converting
from floats to Python decimals. Floating point values will typically
be much longer due to decimal inaccuracy, and most floating point
database types don't have a notion of "scale", so by default the
float type looks for the first ten decimal places when converting.
Specifying this value will override that length. Note that the
MySQL float types, which do include "scale", will use "scale"
as the default for decimal_return_scale, if not otherwise specified.
.. versionadded:: 0.9.0
"""
self.precision = precision
self.asdecimal = asdecimal
self.decimal_return_scale = decimal_return_scale
def result_processor(self, dialect, coltype):
if self.asdecimal:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
elif dialect.supports_native_decimal:
return processors.to_float
else:
return None
class DateTime(_LookupExpressionAdapter, TypeEngine):
"""A type for ``datetime.datetime()`` objects.
Date and time types return objects from the Python ``datetime``
module. Most DBAPIs have built in support for the datetime
module, with the noted exception of SQLite. In the case of
SQLite, date and time types are stored as strings which are then
converted back to datetime objects when rows are returned.
For the time representation within the datetime type, some
backends include additional options, such as timezone support and
fractional seconds support. For fractional seconds, use the
dialect-specific datatype, such as :class:`.mysql.TIME`. For
timezone support, use at least the :class:`_types.TIMESTAMP` datatype,
if not the dialect-specific datatype object.
"""
__visit_name__ = "datetime"
def __init__(self, timezone=False):
"""Construct a new :class:`.DateTime`.
:param timezone: boolean. Indicates that the datetime type should
enable timezone support, if available on the
**base date/time-holding type only**. It is recommended
to make use of the :class:`_types.TIMESTAMP` datatype directly when
using this flag, as some databases include separate generic
date/time-holding types distinct from the timezone-capable
TIMESTAMP datatype, such as Oracle.
"""
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.datetime
@util.memoized_property
def _expression_adaptations(self):
# Based on http://www.postgresql.org/docs/current/\
# static/functions-datetime.html.
return {
operators.add: {Interval: self.__class__},
operators.sub: {Interval: self.__class__, DateTime: Interval},
}
class Date(_LookupExpressionAdapter, TypeEngine):
"""A type for ``datetime.date()`` objects."""
__visit_name__ = "date"
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.date
@util.memoized_property
def _expression_adaptations(self):
# Based on http://www.postgresql.org/docs/current/\
# static/functions-datetime.html.
return {
operators.add: {
Integer: self.__class__,
Interval: DateTime,
Time: DateTime,
},
operators.sub: {
# date - integer = date
Integer: self.__class__,
# date - date = integer.
Date: Integer,
Interval: DateTime,
# date - datetime = interval,
# this one is not in the PG docs
# but works
DateTime: Interval,
},
}
class Time(_LookupExpressionAdapter, TypeEngine):
"""A type for ``datetime.time()`` objects."""
__visit_name__ = "time"
def __init__(self, timezone=False):
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.time
@util.memoized_property
def _expression_adaptations(self):
# Based on http://www.postgresql.org/docs/current/\
# static/functions-datetime.html.
return {
operators.add: {Date: DateTime, Interval: self.__class__},
operators.sub: {Time: Interval, Interval: self.__class__},
}
class _Binary(TypeEngine):
"""Define base behavior for binary types."""
def __init__(self, length=None):
self.length = length
def literal_processor(self, dialect):
def process(value):
value = value.decode(dialect.encoding).replace("'", "''")
return "'%s'" % value
return process
@property
def python_type(self):
return util.binary_type
# Python 3 - sqlite3 doesn't need the `Binary` conversion
# here, though pg8000 does to indicate "bytea"
def bind_processor(self, dialect):
if dialect.dbapi is None:
return None
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
return None
return process
# Python 3 has native bytes() type
# both sqlite3 and pg8000 seem to return it,
# psycopg2 as of 2.5 returns 'memoryview'
if util.py2k:
def result_processor(self, dialect, coltype):
if util.jython:
def process(value):
if value is not None:
if isinstance(value, array.array):
return value.tostring()
return str(value)
else:
return None
else:
process = processors.to_str
return process
else:
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
value = bytes(value)
return value
return process
def coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine.coerce_compared_value` for a description."""
if isinstance(value, util.string_types):
return self
else:
return super(_Binary, self).coerce_compared_value(op, value)
def get_dbapi_type(self, dbapi):
return dbapi.BINARY
class LargeBinary(_Binary):
"""A type for large binary byte data.
The :class:`.LargeBinary` type corresponds to a large and/or unlengthed
binary type for the target platform, such as BLOB on MySQL and BYTEA for
PostgreSQL. It also handles the necessary conversions for the DBAPI.
"""
__visit_name__ = "large_binary"
def __init__(self, length=None):
"""
Construct a LargeBinary type.
:param length: optional, a length for the column for use in
DDL statements, for those binary types that accept a length,
such as the MySQL BLOB type.
"""
_Binary.__init__(self, length=length)
@util.deprecated_cls(
"0.6",
"The :class:`.Binary` class is deprecated and will be removed "
"in a future relase. Please use :class:`.LargeBinary`.",
)
class Binary(LargeBinary):
def __init__(self, *arg, **kw):
LargeBinary.__init__(self, *arg, **kw)
class SchemaType(SchemaEventTarget):
"""Mark a type as possibly requiring schema-level DDL for usage.
Supports types that must be explicitly created/dropped (i.e. PG ENUM type)
as well as types that are complimented by table or schema level
constraints, triggers, and other rules.
:class:`.SchemaType` classes can also be targets for the
:meth:`.DDLEvents.before_parent_attach` and
:meth:`.DDLEvents.after_parent_attach` events, where the events fire off
surrounding the association of the type object with a parent
:class:`_schema.Column`.
.. seealso::
:class:`.Enum`
:class:`.Boolean`
"""
def __init__(
self,
name=None,
schema=None,
metadata=None,
inherit_schema=False,
quote=None,
_create_events=True,
):
if name is not None:
self.name = quoted_name(name, quote)
else:
self.name = None
self.schema = schema
self.metadata = metadata
self.inherit_schema = inherit_schema
self._create_events = _create_events
if _create_events and self.metadata:
event.listen(
self.metadata,
"before_create",
util.portable_instancemethod(self._on_metadata_create),
)
event.listen(
self.metadata,
"after_drop",
util.portable_instancemethod(self._on_metadata_drop),
)
def _translate_schema(self, effective_schema, map_):
return map_.get(effective_schema, effective_schema)
def _set_parent(self, column):
column._on_table_attach(util.portable_instancemethod(self._set_table))
def _variant_mapping_for_set_table(self, column):
if isinstance(column.type, Variant):
variant_mapping = column.type.mapping.copy()
variant_mapping["_default"] = column.type.impl
else:
variant_mapping = None
return variant_mapping
def _set_table(self, column, table):
if self.inherit_schema:
self.schema = table.schema
if not self._create_events:
return
variant_mapping = self._variant_mapping_for_set_table(column)
event.listen(
table,
"before_create",
util.portable_instancemethod(
self._on_table_create, {"variant_mapping": variant_mapping}
),
)
event.listen(
table,
"after_drop",
util.portable_instancemethod(
self._on_table_drop, {"variant_mapping": variant_mapping}
),
)
if self.metadata is None:
# TODO: what's the difference between self.metadata
# and table.metadata here ?
event.listen(
table.metadata,
"before_create",
util.portable_instancemethod(
self._on_metadata_create,
{"variant_mapping": variant_mapping},
),
)
event.listen(
table.metadata,
"after_drop",
util.portable_instancemethod(
self._on_metadata_drop,
{"variant_mapping": variant_mapping},
),
)
def copy(self, **kw):
return self.adapt(self.__class__, _create_events=True)
def adapt(self, impltype, **kw):
schema = kw.pop("schema", self.schema)
metadata = kw.pop("metadata", self.metadata)
_create_events = kw.pop("_create_events", False)
return impltype(
name=self.name,
schema=schema,
inherit_schema=self.inherit_schema,
metadata=metadata,
_create_events=_create_events,
**kw
)
@property
def bind(self):
return self.metadata and self.metadata.bind or None
def create(self, bind=None, checkfirst=False):
"""Issue CREATE ddl for this type, if applicable."""
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.create(bind=bind, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
"""Issue DROP ddl for this type, if applicable."""
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.drop(bind=bind, checkfirst=checkfirst)
def _on_table_create(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_create(target, bind, **kw)
def _on_table_drop(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_drop(target, bind, **kw)
def _on_metadata_create(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_create(target, bind, **kw)
def _on_metadata_drop(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_drop(target, bind, **kw)
def _is_impl_for_variant(self, dialect, kw):
variant_mapping = kw.pop("variant_mapping", None)
if variant_mapping is None:
return True
if (
dialect.name in variant_mapping
and variant_mapping[dialect.name] is self
):
return True
elif dialect.name not in variant_mapping:
return variant_mapping["_default"] is self
class Enum(Emulated, String, SchemaType):
"""Generic Enum Type.
The :class:`.Enum` type provides a set of possible string values
which the column is constrained towards.
The :class:`.Enum` type will make use of the backend's native "ENUM"
type if one is available; otherwise, it uses a VARCHAR datatype and
produces a CHECK constraint. Use of the backend-native enum type
can be disabled using the :paramref:`.Enum.native_enum` flag, and
the production of the CHECK constraint is configurable using the
:paramref:`.Enum.create_constraint` flag.
The :class:`.Enum` type also provides in-Python validation of string
values during both read and write operations. When reading a value
from the database in a result set, the string value is always checked
against the list of possible values and a ``LookupError`` is raised
if no match is found. When passing a value to the database as a
plain string within a SQL statement, if the
:paramref:`.Enum.validate_strings` parameter is
set to True, a ``LookupError`` is raised for any string value that's
not located in the given list of possible values; note that this
impacts usage of LIKE expressions with enumerated values (an unusual
use case).
.. versionchanged:: 1.1 the :class:`.Enum` type now provides in-Python
validation of input values as well as on data being returned by
the database.
The source of enumerated values may be a list of string values, or
alternatively a PEP-435-compliant enumerated class. For the purposes
of the :class:`.Enum` datatype, this class need only provide a
``__members__`` method.
When using an enumerated class, the enumerated objects are used
both for input and output, rather than strings as is the case with
a plain-string enumerated type::
import enum
class MyEnum(enum.Enum):
one = 1
two = 2
three = 3
t = Table(
'data', MetaData(),
Column('value', Enum(MyEnum))
)
connection.execute(t.insert(), {"value": MyEnum.two})
assert connection.scalar(t.select()) is MyEnum.two
Above, the string names of each element, e.g. "one", "two", "three",
are persisted to the database; the values of the Python Enum, here
indicated as integers, are **not** used; the value of each enum can
therefore be any kind of Python object whether or not it is persistable.
In order to persist the values and not the names, the
:paramref:`.Enum.values_callable` parameter may be used. The value of
this parameter is a user-supplied callable, which is intended to be used
with a PEP-435-compliant enumerated class and returns a list of string
values to be persisted. For a simple enumeration that uses string values,
a callable such as ``lambda x: [e.value for e in x]`` is sufficient.
.. versionadded:: 1.1 - support for PEP-435-style enumerated
classes.
.. seealso::
:class:`_postgresql.ENUM` - PostgreSQL-specific type,
which has additional functionality.
:class:`.mysql.ENUM` - MySQL-specific type
"""
__visit_name__ = "enum"
@util.deprecated_params(
convert_unicode=(
"1.3",
"The :paramref:`.Enum.convert_unicode` parameter is deprecated "
"and will be removed in a future release. All modern DBAPIs "
"now support Python Unicode directly and this parameter is "
"unnecessary.",
)
)
def __init__(self, *enums, **kw):
r"""Construct an enum.
Keyword arguments which don't apply to a specific backend are ignored
by that backend.
:param \*enums: either exactly one PEP-435 compliant enumerated type
or one or more string or unicode enumeration labels. If unicode
labels are present, the `convert_unicode` flag is auto-enabled.
.. versionadded:: 1.1 a PEP-435 style enumerated class may be
passed.
:param convert_unicode: Enable unicode-aware bind parameter and
result-set processing for this Enum's data. This is set
automatically based on the presence of unicode label strings.
:param create_constraint: defaults to True. When creating a non-native
enumerated type, also build a CHECK constraint on the database
against the valid values.
.. versionadded:: 1.1 - added :paramref:`.Enum.create_constraint`
which provides the option to disable the production of the
CHECK constraint for a non-native enumerated type.
:param metadata: Associate this type directly with a ``MetaData``
object. For types that exist on the target database as an
independent schema construct (PostgreSQL), this type will be
created and dropped within ``create_all()`` and ``drop_all()``
operations. If the type is not associated with any ``MetaData``
object, it will associate itself with each ``Table`` in which it is
used, and will be created when any of those individual tables are
created, after a check is performed for its existence. The type is
only dropped when ``drop_all()`` is called for that ``Table``
object's metadata, however.
:param name: The name of this type. This is required for PostgreSQL
and any future supported database which requires an explicitly
named type, or an explicitly named constraint in order to generate
the type and/or a table that uses it. If a PEP-435 enumerated
class was used, its name (converted to lower case) is used by
default.
:param native_enum: Use the database's native ENUM type when
available. Defaults to True. When False, uses VARCHAR + check
constraint for all backends. The VARCHAR length can be controlled
with :paramref:`.Enum.length`
:param length: Allows specifying a custom length for the VARCHAR
when :paramref:`.Enum.native_enum` is False. By default it uses the
length of the longest value.
.. versionadded:: 1.3.16
:param schema: Schema name of this type. For types that exist on the
target database as an independent schema construct (PostgreSQL),
this parameter specifies the named schema in which the type is
present.
.. note::
The ``schema`` of the :class:`.Enum` type does not
by default make use of the ``schema`` established on the
owning :class:`_schema.Table`. If this behavior is desired,
set the ``inherit_schema`` flag to ``True``.
:param quote: Set explicit quoting preferences for the type's name.
:param inherit_schema: When ``True``, the "schema" from the owning
:class:`_schema.Table`
will be copied to the "schema" attribute of this
:class:`.Enum`, replacing whatever value was passed for the
``schema`` attribute. This also takes effect when using the
:meth:`_schema.Table.tometadata` operation.
:param validate_strings: when True, string values that are being
passed to the database in a SQL statement will be checked
for validity against the list of enumerated values. Unrecognized
values will result in a ``LookupError`` being raised.
.. versionadded:: 1.1.0b2
:param values_callable: A callable which will be passed the PEP-435
compliant enumerated type, which should then return a list of string
values to be persisted. This allows for alternate usages such as
using the string value of an enum to be persisted to the database
instead of its name.
.. versionadded:: 1.2.3
:param sort_key_function: a Python callable which may be used as the
"key" argument in the Python ``sorted()`` built-in. The SQLAlchemy
ORM requires that primary key columns which are mapped must
be sortable in some way. When using an unsortable enumeration
object such as a Python 3 ``Enum`` object, this parameter may be
used to set a default sort key function for the objects. By
default, the database value of the enumeration is used as the
sorting function.
.. versionadded:: 1.3.8
"""
self._enum_init(enums, kw)
@property
def _enums_argument(self):
if self.enum_class is not None:
return [self.enum_class]
else:
return self.enums
def _enum_init(self, enums, kw):
"""internal init for :class:`.Enum` and subclasses.
friendly init helper used by subclasses to remove
all the Enum-specific keyword arguments from kw. Allows all
other arguments in kw to pass through.
"""
self.native_enum = kw.pop("native_enum", True)
self.create_constraint = kw.pop("create_constraint", True)
self.values_callable = kw.pop("values_callable", None)
self._sort_key_function = kw.pop("sort_key_function", NO_ARG)
length_arg = kw.pop("length", NO_ARG)
values, objects = self._parse_into_values(enums, kw)
self._setup_for_values(values, objects, kw)
convert_unicode = kw.pop("convert_unicode", None)
self.validate_strings = kw.pop("validate_strings", False)
if convert_unicode is None:
for e in self.enums:
# this is all py2k logic that can go away for py3k only,
# "expect unicode" will always be implicitly true
if isinstance(e, util.text_type):
_expect_unicode = True
break
else:
_expect_unicode = False
else:
_expect_unicode = convert_unicode
if self.enums:
length = max(len(x) for x in self.enums)
else:
length = 0
if not self.native_enum and length_arg is not NO_ARG:
if length_arg < length:
raise ValueError(
"When provided, length must be larger or equal"
" than the length of the longest enum value. %s < %s"
% (length_arg, length)
)
length = length_arg
self._valid_lookup[None] = self._object_lookup[None] = None
super(Enum, self).__init__(
length=length, _expect_unicode=_expect_unicode
)
if self.enum_class:
kw.setdefault("name", self.enum_class.__name__.lower())
SchemaType.__init__(
self,
name=kw.pop("name", None),
schema=kw.pop("schema", None),
metadata=kw.pop("metadata", None),
inherit_schema=kw.pop("inherit_schema", False),
quote=kw.pop("quote", None),
_create_events=kw.pop("_create_events", True),
)
def _parse_into_values(self, enums, kw):
if not enums and "_enums" in kw:
enums = kw.pop("_enums")
if len(enums) == 1 and hasattr(enums[0], "__members__"):
self.enum_class = enums[0]
members = self.enum_class.__members__
if self.values_callable:
values = self.values_callable(self.enum_class)
else:
values = list(members)
objects = [members[k] for k in members]
return values, objects
else:
self.enum_class = None
return enums, enums
def _setup_for_values(self, values, objects, kw):
self.enums = list(values)
self._valid_lookup = dict(zip(reversed(objects), reversed(values)))
self._object_lookup = dict(zip(values, objects))
self._valid_lookup.update(
[
(value, self._valid_lookup[self._object_lookup[value]])
for value in values
]
)
@property
def sort_key_function(self):
if self._sort_key_function is NO_ARG:
return self._db_value_for_elem
else:
return self._sort_key_function
@property
def native(self):
return self.native_enum
def _db_value_for_elem(self, elem):
try:
return self._valid_lookup[elem]
except KeyError as err:
# for unknown string values, we return as is. While we can
# validate these if we wanted, that does not allow for lesser-used
# end-user use cases, such as using a LIKE comparison with an enum,
# or for an application that wishes to apply string tests to an
# ENUM (see [ticket:3725]). While we can decide to differentiate
# here between an INSERT statement and a criteria used in a SELECT,
# for now we're staying conservative w/ behavioral changes (perhaps
# someone has a trigger that handles strings on INSERT)
if not self.validate_strings and isinstance(
elem, compat.string_types
):
return elem
else:
util.raise_(
LookupError(
'"%s" is not among the defined enum values' % elem
),
replace_context=err,
)
class Comparator(String.Comparator):
def _adapt_expression(self, op, other_comparator):
op, typ = super(Enum.Comparator, self)._adapt_expression(
op, other_comparator
)
if op is operators.concat_op:
typ = String(
self.type.length, _expect_unicode=self.type._expect_unicode
)
return op, typ
comparator_factory = Comparator
def _object_value_for_elem(self, elem):
try:
return self._object_lookup[elem]
except KeyError as err:
util.raise_(
LookupError(
'"%s" is not among the defined enum values' % elem
),
replace_context=err,
)
def __repr__(self):
return util.generic_repr(
self,
additional_kw=[("native_enum", True)],
to_inspect=[Enum, SchemaType],
)
def adapt_to_emulated(self, impltype, **kw):
kw.setdefault("_expect_unicode", self._expect_unicode)
kw.setdefault("validate_strings", self.validate_strings)
kw.setdefault("name", self.name)
kw.setdefault("schema", self.schema)
kw.setdefault("inherit_schema", self.inherit_schema)
kw.setdefault("metadata", self.metadata)
kw.setdefault("_create_events", False)
kw.setdefault("native_enum", self.native_enum)
kw.setdefault("values_callable", self.values_callable)
kw.setdefault("create_constraint", self.create_constraint)
kw.setdefault("length", self.length)
assert "_enums" in kw
return impltype(**kw)
def adapt(self, impltype, **kw):
kw["_enums"] = self._enums_argument
return super(Enum, self).adapt(impltype, **kw)
def _should_create_constraint(self, compiler, **kw):
if not self._is_impl_for_variant(compiler.dialect, kw):
return False
return (
not self.native_enum or not compiler.dialect.supports_native_enum
)
@util.dependencies("sqlalchemy.sql.schema")
def _set_table(self, schema, column, table):
SchemaType._set_table(self, column, table)
if not self.create_constraint:
return
variant_mapping = self._variant_mapping_for_set_table(column)
e = schema.CheckConstraint(
type_coerce(column, self).in_(self.enums),
name=_defer_name(self.name),
_create_rule=util.portable_instancemethod(
self._should_create_constraint,
{"variant_mapping": variant_mapping},
),
_type_bound=True,
)
assert e.table is table
def literal_processor(self, dialect):
parent_processor = super(Enum, self).literal_processor(dialect)
def process(value):
value = self._db_value_for_elem(value)
if parent_processor:
value = parent_processor(value)
return value
return process
def bind_processor(self, dialect):
def process(value):
value = self._db_value_for_elem(value)
if parent_processor:
value = parent_processor(value)
return value
parent_processor = super(Enum, self).bind_processor(dialect)
return process
def result_processor(self, dialect, coltype):
parent_processor = super(Enum, self).result_processor(dialect, coltype)
def process(value):
if parent_processor:
value = parent_processor(value)
value = self._object_value_for_elem(value)
return value
return process
def copy(self, **kw):
return SchemaType.copy(self, **kw)
@property
def python_type(self):
if self.enum_class:
return self.enum_class
else:
return super(Enum, self).python_type
class PickleType(TypeDecorator):
"""Holds Python objects, which are serialized using pickle.
PickleType builds upon the Binary type to apply Python's
``pickle.dumps()`` to incoming objects, and ``pickle.loads()`` on
the way out, allowing any pickleable Python object to be stored as
a serialized binary field.
To allow ORM change events to propagate for elements associated
with :class:`.PickleType`, see :ref:`mutable_toplevel`.
"""
impl = LargeBinary
def __init__(
self, protocol=pickle.HIGHEST_PROTOCOL, pickler=None, comparator=None
):
"""
Construct a PickleType.
:param protocol: defaults to ``pickle.HIGHEST_PROTOCOL``.
:param pickler: defaults to cPickle.pickle or pickle.pickle if
cPickle is not available. May be any object with
pickle-compatible ``dumps`` and ``loads`` methods.
:param comparator: a 2-arg callable predicate used
to compare values of this type. If left as ``None``,
the Python "equals" operator is used to compare values.
"""
self.protocol = protocol
self.pickler = pickler or pickle
self.comparator = comparator
super(PickleType, self).__init__()
def __reduce__(self):
return PickleType, (self.protocol, None, self.comparator)
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
dumps = self.pickler.dumps
protocol = self.protocol
if impl_processor:
def process(value):
if value is not None:
value = dumps(value, protocol)
return impl_processor(value)
else:
def process(value):
if value is not None:
value = dumps(value, protocol)
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
loads = self.pickler.loads
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return loads(value)
else:
def process(value):
if value is None:
return None
return loads(value)
return process
def compare_values(self, x, y):
if self.comparator:
return self.comparator(x, y)
else:
return x == y
class Boolean(Emulated, TypeEngine, SchemaType):
"""A bool datatype.
:class:`.Boolean` typically uses BOOLEAN or SMALLINT on the DDL side,
and on the Python side deals in ``True`` or ``False``.
The :class:`.Boolean` datatype currently has two levels of assertion
that the values persisted are simple true/false values. For all
backends, only the Python values ``None``, ``True``, ``False``, ``1``
or ``0`` are accepted as parameter values. For those backends that
don't support a "native boolean" datatype, a CHECK constraint is also
created on the target column. Production of the CHECK constraint
can be disabled by passing the :paramref:`.Boolean.create_constraint`
flag set to ``False``.
.. versionchanged:: 1.2 the :class:`.Boolean` datatype now asserts that
incoming Python values are already in pure boolean form.
"""
__visit_name__ = "boolean"
native = True
def __init__(self, create_constraint=True, name=None, _create_events=True):
"""Construct a Boolean.
:param create_constraint: defaults to True. If the boolean
is generated as an int/smallint, also create a CHECK constraint
on the table that ensures 1 or 0 as a value.
:param name: if a CHECK constraint is generated, specify
the name of the constraint.
"""
self.create_constraint = create_constraint
self.name = name
self._create_events = _create_events
def _should_create_constraint(self, compiler, **kw):
if not self._is_impl_for_variant(compiler.dialect, kw):
return False
return (
not compiler.dialect.supports_native_boolean
and compiler.dialect.non_native_boolean_check_constraint
)
@util.dependencies("sqlalchemy.sql.schema")
def _set_table(self, schema, column, table):
if not self.create_constraint:
return
variant_mapping = self._variant_mapping_for_set_table(column)
e = schema.CheckConstraint(
type_coerce(column, self).in_([0, 1]),
name=_defer_name(self.name),
_create_rule=util.portable_instancemethod(
self._should_create_constraint,
{"variant_mapping": variant_mapping},
),
_type_bound=True,
)
assert e.table is table
@property
def python_type(self):
return bool
_strict_bools = frozenset([None, True, False])
def _strict_as_bool(self, value):
if value not in self._strict_bools:
if not isinstance(value, int):
raise TypeError("Not a boolean value: %r" % value)
else:
raise ValueError(
"Value %r is not None, True, or False" % value
)
return value
def literal_processor(self, dialect):
compiler = dialect.statement_compiler(dialect, None)
true = compiler.visit_true(None)
false = compiler.visit_false(None)
def process(value):
return true if self._strict_as_bool(value) else false
return process
def bind_processor(self, dialect):
_strict_as_bool = self._strict_as_bool
if dialect.supports_native_boolean:
_coerce = bool
else:
_coerce = int
def process(value):
value = _strict_as_bool(value)
if value is not None:
value = _coerce(value)
return value
return process
def result_processor(self, dialect, coltype):
if dialect.supports_native_boolean:
return None
else:
return processors.int_to_boolean
class _AbstractInterval(_LookupExpressionAdapter, TypeEngine):
@util.memoized_property
def _expression_adaptations(self):
# Based on http://www.postgresql.org/docs/current/\
# static/functions-datetime.html.
return {
operators.add: {
Date: DateTime,
Interval: self.__class__,
DateTime: DateTime,
Time: Time,
},
operators.sub: {Interval: self.__class__},
operators.mul: {Numeric: self.__class__},
operators.truediv: {Numeric: self.__class__},
operators.div: {Numeric: self.__class__},
}
@property
def _type_affinity(self):
return Interval
def coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine.coerce_compared_value` for a description."""
return self.impl.coerce_compared_value(op, value)
class Interval(Emulated, _AbstractInterval, TypeDecorator):
"""A type for ``datetime.timedelta()`` objects.
The Interval type deals with ``datetime.timedelta`` objects. In
PostgreSQL, the native ``INTERVAL`` type is used; for others, the
value is stored as a date which is relative to the "epoch"
(Jan. 1, 1970).
Note that the ``Interval`` type does not currently provide date arithmetic
operations on platforms which do not support interval types natively. Such
operations usually require transformation of both sides of the expression
(such as, conversion of both sides into integer epoch values first) which
currently is a manual procedure (such as via
:attr:`~sqlalchemy.sql.expression.func`).
"""
impl = DateTime
epoch = dt.datetime.utcfromtimestamp(0)
def __init__(self, native=True, second_precision=None, day_precision=None):
"""Construct an Interval object.
:param native: when True, use the actual
INTERVAL type provided by the database, if
supported (currently PostgreSQL, Oracle).
Otherwise, represent the interval data as
an epoch value regardless.
:param second_precision: For native interval types
which support a "fractional seconds precision" parameter,
i.e. Oracle and PostgreSQL
:param day_precision: for native interval types which
support a "day precision" parameter, i.e. Oracle.
"""
super(Interval, self).__init__()
self.native = native
self.second_precision = second_precision
self.day_precision = day_precision
@property
def python_type(self):
return dt.timedelta
def adapt_to_emulated(self, impltype, **kw):
return _AbstractInterval.adapt(self, impltype, **kw)
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
epoch = self.epoch
if impl_processor:
def process(value):
if value is not None:
value = epoch + value
return impl_processor(value)
else:
def process(value):
if value is not None:
value = epoch + value
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
epoch = self.epoch
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return value - epoch
else:
def process(value):
if value is None:
return None
return value - epoch
return process
class JSON(Indexable, TypeEngine):
"""Represent a SQL JSON type.
.. note:: :class:`_types.JSON`
is provided as a facade for vendor-specific
JSON types. Since it supports JSON SQL operations, it only
works on backends that have an actual JSON type, currently:
* PostgreSQL
* MySQL as of version 5.7 (MariaDB as of the 10.2 series does not)
* SQLite as of version 3.9
:class:`_types.JSON` is part of the Core in support of the growing
popularity of native JSON datatypes.
The :class:`_types.JSON` type stores arbitrary JSON format data, e.g.::
data_table = Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', JSON)
)
with engine.connect() as conn:
conn.execute(
data_table.insert(),
data = {"key1": "value1", "key2": "value2"}
)
**JSON-Specific Expression Operators**
The :class:`_types.JSON`
datatype provides these additional SQL operations:
* Keyed index operations::
data_table.c.data['some key']
* Integer index operations::
data_table.c.data[3]
* Path index operations::
data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')]
* Data casters for specific JSON element types, subsequent to an index
or path operation being invoked::
data_table.c.data["some key"].as_integer()
.. versionadded:: 1.3.11
Additional operations may be available from the dialect-specific versions
of :class:`_types.JSON`, such as :class:`_postgresql.JSON` and
:class:`_postgresql.JSONB` which both offer additional PostgreSQL-specific
operations.
**Casting JSON Elements to Other Types**
Index operations, i.e. those invoked by calling upon the expression using
the Python bracket operator as in ``some_column['some key']``, return an
expression object whose type defaults to :class:`_types.JSON` by default,
so that
further JSON-oriented instructions may be called upon the result type.
However, it is likely more common that an index operation is expected
to return a specific scalar element, such as a string or integer. In
order to provide access to these elements in a backend-agnostic way,
a series of data casters are provided:
* :meth:`.JSON.Comparator.as_string` - return the element as a string
* :meth:`.JSON.Comparator.as_boolean` - return the element as a boolean
* :meth:`.JSON.Comparator.as_float` - return the element as a float
* :meth:`.JSON.Comparator.as_integer` - return the element as an integer
These data casters are implemented by supporting dialects in order to
assure that comparisons to the above types will work as expected, such as::
# integer comparison
data_table.c.data["some_integer_key"].as_integer() == 5
# boolean comparison
data_table.c.data["some_boolean"].as_boolean() == True
.. versionadded:: 1.3.11 Added type-specific casters for the basic JSON
data element types.
.. note::
The data caster functions are new in version 1.3.11, and supersede
the previous documented approaches of using CAST; for reference,
this looked like::
from sqlalchemy import cast, type_coerce
from sqlalchemy import String, JSON
cast(
data_table.c.data['some_key'], String
) == type_coerce(55, JSON)
The above case now works directly as::
data_table.c.data['some_key'].as_integer() == 5
For details on the previous comparison approach within the 1.3.x
series, see the documentation for SQLAlchemy 1.2 or the included HTML
files in the doc/ directory of the version's distribution.
**Detecting Changes in JSON columns when using the ORM**
The :class:`_types.JSON` type, when used with the SQLAlchemy ORM, does not
detect in-place mutations to the structure. In order to detect these, the
:mod:`sqlalchemy.ext.mutable` extension must be used. This extension will
allow "in-place" changes to the datastructure to produce events which
will be detected by the unit of work. See the example at :class:`.HSTORE`
for a simple example involving a dictionary.
**Support for JSON null vs. SQL NULL**
When working with NULL values, the :class:`_types.JSON`
type recommends the
use of two specific constants in order to differentiate between a column
that evaluates to SQL NULL, e.g. no value, vs. the JSON-encoded string
of ``"null"``. To insert or select against a value that is SQL NULL,
use the constant :func:`.null`::
from sqlalchemy import null
conn.execute(table.insert(), json_value=null())
To insert or select against a value that is JSON ``"null"``, use the
constant :attr:`_types.JSON.NULL`::
conn.execute(table.insert(), json_value=JSON.NULL)
The :class:`_types.JSON` type supports a flag
:paramref:`_types.JSON.none_as_null` which when set to True will result
in the Python constant ``None`` evaluating to the value of SQL
NULL, and when set to False results in the Python constant
``None`` evaluating to the value of JSON ``"null"``. The Python
value ``None`` may be used in conjunction with either
:attr:`_types.JSON.NULL` and :func:`.null` in order to indicate NULL
values, but care must be taken as to the value of the
:paramref:`_types.JSON.none_as_null` in these cases.
**Customizing the JSON Serializer**
The JSON serializer and deserializer used by :class:`_types.JSON`
defaults to
Python's ``json.dumps`` and ``json.loads`` functions; in the case of the
psycopg2 dialect, psycopg2 may be using its own custom loader function.
In order to affect the serializer / deserializer, they are currently
configurable at the :func:`_sa.create_engine` level via the
:paramref:`_sa.create_engine.json_serializer` and
:paramref:`_sa.create_engine.json_deserializer` parameters. For example,
to turn off ``ensure_ascii``::
engine = create_engine(
"sqlite://",
json_serializer=lambda obj: json.dumps(obj, ensure_ascii=False))
.. versionchanged:: 1.3.7
SQLite dialect's ``json_serializer`` and ``json_deserializer``
parameters renamed from ``_json_serializer`` and
``_json_deserializer``.
.. seealso::
:class:`_postgresql.JSON`
:class:`_postgresql.JSONB`
:class:`.mysql.JSON`
:class:`_sqlite.JSON`
.. versionadded:: 1.1
"""
__visit_name__ = "JSON"
hashable = False
NULL = util.symbol("JSON_NULL")
"""Describe the json value of NULL.
This value is used to force the JSON value of ``"null"`` to be
used as the value. A value of Python ``None`` will be recognized
either as SQL NULL or JSON ``"null"``, based on the setting
of the :paramref:`_types.JSON.none_as_null` flag; the
:attr:`_types.JSON.NULL`
constant can be used to always resolve to JSON ``"null"`` regardless
of this setting. This is in contrast to the :func:`_expression.null`
construct,
which always resolves to SQL NULL. E.g.::
from sqlalchemy import null
from sqlalchemy.dialects.postgresql import JSON
# will *always* insert SQL NULL
obj1 = MyObject(json_value=null())
# will *always* insert JSON string "null"
obj2 = MyObject(json_value=JSON.NULL)
session.add_all([obj1, obj2])
session.commit()
In order to set JSON NULL as a default value for a column, the most
transparent method is to use :func:`_expression.text`::
Table(
'my_table', metadata,
Column('json_data', JSON, default=text("'null'"))
)
While it is possible to use :attr:`_types.JSON.NULL` in this context, the
:attr:`_types.JSON.NULL` value will be returned as the value of the column
,
which in the context of the ORM or other repurposing of the default
value, may not be desirable. Using a SQL expression means the value
will be re-fetched from the database within the context of retrieving
generated defaults.
"""
def __init__(self, none_as_null=False):
"""Construct a :class:`_types.JSON` type.
:param none_as_null=False: if True, persist the value ``None`` as a
SQL NULL value, not the JSON encoding of ``null``. Note that
when this flag is False, the :func:`.null` construct can still
be used to persist a NULL value::
from sqlalchemy import null
conn.execute(table.insert(), data=null())
.. note::
:paramref:`_types.JSON.none_as_null` does **not** apply to the
values passed to :paramref:`_schema.Column.default` and
:paramref:`_schema.Column.server_default`; a value of ``None``
passed for these parameters means "no default present".
.. seealso::
:attr:`.types.JSON.NULL`
"""
self.none_as_null = none_as_null
class JSONElementType(TypeEngine):
"""common function for index / path elements in a JSON expression."""
_integer = Integer()
_string = String()
def string_bind_processor(self, dialect):
return self._string._cached_bind_processor(dialect)
def string_literal_processor(self, dialect):
return self._string._cached_literal_processor(dialect)
def bind_processor(self, dialect):
int_processor = self._integer._cached_bind_processor(dialect)
string_processor = self.string_bind_processor(dialect)
def process(value):
if int_processor and isinstance(value, int):
value = int_processor(value)
elif string_processor and isinstance(value, util.string_types):
value = string_processor(value)
return value
return process
def literal_processor(self, dialect):
int_processor = self._integer._cached_literal_processor(dialect)
string_processor = self.string_literal_processor(dialect)
def process(value):
if int_processor and isinstance(value, int):
value = int_processor(value)
elif string_processor and isinstance(value, util.string_types):
value = string_processor(value)
return value
return process
class JSONIndexType(JSONElementType):
"""Placeholder for the datatype of a JSON index value.
This allows execution-time processing of JSON index values
for special syntaxes.
"""
class JSONPathType(JSONElementType):
"""Placeholder type for JSON path operations.
This allows execution-time processing of a path-based
index value into a specific SQL syntax.
"""
class Comparator(Indexable.Comparator, Concatenable.Comparator):
"""Define comparison operations for :class:`_types.JSON`."""
@util.dependencies("sqlalchemy.sql.default_comparator")
def _setup_getitem(self, default_comparator, index):
if not isinstance(index, util.string_types) and isinstance(
index, compat.collections_abc.Sequence
):
index = default_comparator._check_literal(
self.expr,
operators.json_path_getitem_op,
index,
bindparam_type=JSON.JSONPathType,
)
operator = operators.json_path_getitem_op
else:
index = default_comparator._check_literal(
self.expr,
operators.json_getitem_op,
index,
bindparam_type=JSON.JSONIndexType,
)
operator = operators.json_getitem_op
return operator, index, self.type
def as_boolean(self):
"""Cast an indexed value as boolean.
e.g.::
stmt = select([
mytable.c.json_column['some_data'].as_boolean()
]).where(
mytable.c.json_column['some_data'].as_boolean() == True
)
.. versionadded:: 1.3.11
"""
return self._binary_w_type(Boolean(), "as_boolean")
def as_string(self):
"""Cast an indexed value as string.
e.g.::
stmt = select([
mytable.c.json_column['some_data'].as_string()
]).where(
mytable.c.json_column['some_data'].as_string() ==
'some string'
)
.. versionadded:: 1.3.11
"""
return self._binary_w_type(String(), "as_string")
def as_integer(self):
"""Cast an indexed value as integer.
e.g.::
stmt = select([
mytable.c.json_column['some_data'].as_integer()
]).where(
mytable.c.json_column['some_data'].as_integer() == 5
)
.. versionadded:: 1.3.11
"""
return self._binary_w_type(Integer(), "as_integer")
def as_float(self):
"""Cast an indexed value as float.
e.g.::
stmt = select([
mytable.c.json_column['some_data'].as_float()
]).where(
mytable.c.json_column['some_data'].as_float() == 29.75
)
.. versionadded:: 1.3.11
"""
# note there's no Numeric or Decimal support here yet
return self._binary_w_type(Float(), "as_float")
def as_json(self):
"""Cast an indexed value as JSON.
This is the default behavior of indexed elements in any case.
Note that comparison of full JSON structures may not be
supported by all backends.
.. versionadded:: 1.3.11
"""
return self.expr
def _binary_w_type(self, typ, method_name):
if not isinstance(
self.expr, elements.BinaryExpression
) or self.expr.operator not in (
operators.json_getitem_op,
operators.json_path_getitem_op,
):
raise exc.InvalidRequestError(
"The JSON cast operator JSON.%s() only works with a JSON "
"index expression e.g. col['q'].%s()"
% (method_name, method_name)
)
expr = self.expr._clone()
expr.type = typ
return expr
comparator_factory = Comparator
@property
def python_type(self):
return dict
@property
def should_evaluate_none(self):
"""Alias of :attr:`_types.JSON.none_as_null`"""
return not self.none_as_null
@should_evaluate_none.setter
def should_evaluate_none(self, value):
self.none_as_null = not value
@util.memoized_property
def _str_impl(self):
return String(_expect_unicode=True)
def bind_processor(self, dialect):
string_process = self._str_impl.bind_processor(dialect)
json_serializer = dialect._json_serializer or json.dumps
def process(value):
if value is self.NULL:
value = None
elif isinstance(value, elements.Null) or (
value is None and self.none_as_null
):
return None
serialized = json_serializer(value)
if string_process:
serialized = string_process(serialized)
return serialized
return process
def result_processor(self, dialect, coltype):
string_process = self._str_impl.result_processor(dialect, coltype)
json_deserializer = dialect._json_deserializer or json.loads
def process(value):
if value is None:
return None
if string_process:
value = string_process(value)
return json_deserializer(value)
return process
class ARRAY(SchemaEventTarget, Indexable, Concatenable, TypeEngine):
"""Represent a SQL Array type.
.. note:: This type serves as the basis for all ARRAY operations.
However, currently **only the PostgreSQL backend has support
for SQL arrays in SQLAlchemy**. It is recommended to use the
:class:`_postgresql.ARRAY` type directly when using ARRAY types
with PostgreSQL, as it provides additional operators specific
to that backend.
:class:`_types.ARRAY` is part of the Core in support of various SQL
standard functions such as :class:`_functions.array_agg`
which explicitly involve
arrays; however, with the exception of the PostgreSQL backend and possibly
some third-party dialects, no other SQLAlchemy built-in dialect has support
for this type.
An :class:`_types.ARRAY` type is constructed given the "type"
of element::
mytable = Table("mytable", metadata,
Column("data", ARRAY(Integer))
)
The above type represents an N-dimensional array,
meaning a supporting backend such as PostgreSQL will interpret values
with any number of dimensions automatically. To produce an INSERT
construct that passes in a 1-dimensional array of integers::
connection.execute(
mytable.insert(),
data=[1,2,3]
)
The :class:`_types.ARRAY` type can be constructed given a fixed number
of dimensions::
mytable = Table("mytable", metadata,
Column("data", ARRAY(Integer, dimensions=2))
)
Sending a number of dimensions is optional, but recommended if the
datatype is to represent arrays of more than one dimension. This number
is used:
* When emitting the type declaration itself to the database, e.g.
``INTEGER[][]``
* When translating Python values to database values, and vice versa, e.g.
an ARRAY of :class:`.Unicode` objects uses this number to efficiently
access the string values inside of array structures without resorting
to per-row type inspection
* When used with the Python ``getitem`` accessor, the number of dimensions
serves to define the kind of type that the ``[]`` operator should
return, e.g. for an ARRAY of INTEGER with two dimensions::
>>> expr = table.c.column[5] # returns ARRAY(Integer, dimensions=1)
>>> expr = expr[6] # returns Integer
For 1-dimensional arrays, an :class:`_types.ARRAY` instance with no
dimension parameter will generally assume single-dimensional behaviors.
SQL expressions of type :class:`_types.ARRAY` have support for "index" and
"slice" behavior. The Python ``[]`` operator works normally here, given
integer indexes or slices. Arrays default to 1-based indexing.
The operator produces binary expression
constructs which will produce the appropriate SQL, both for
SELECT statements::
select([mytable.c.data[5], mytable.c.data[2:7]])
as well as UPDATE statements when the :meth:`_expression.Update.values`
method
is used::
mytable.update().values({
mytable.c.data[5]: 7,
mytable.c.data[2:7]: [1, 2, 3]
})
The :class:`_types.ARRAY` type also provides for the operators
:meth:`.types.ARRAY.Comparator.any` and
:meth:`.types.ARRAY.Comparator.all`. The PostgreSQL-specific version of
:class:`_types.ARRAY` also provides additional operators.
.. versionadded:: 1.1.0
.. seealso::
:class:`_postgresql.ARRAY`
"""
__visit_name__ = "ARRAY"
zero_indexes = False
"""if True, Python zero-based indexes should be interpreted as one-based
on the SQL expression side."""
class Comparator(Indexable.Comparator, Concatenable.Comparator):
"""Define comparison operations for :class:`_types.ARRAY`.
More operators are available on the dialect-specific form
of this type. See :class:`.postgresql.ARRAY.Comparator`.
"""
def _setup_getitem(self, index):
if isinstance(index, slice):
return_type = self.type
if self.type.zero_indexes:
index = slice(index.start + 1, index.stop + 1, index.step)
index = Slice(
_literal_as_binds(
index.start,
name=self.expr.key,
type_=type_api.INTEGERTYPE,
),
_literal_as_binds(
index.stop,
name=self.expr.key,
type_=type_api.INTEGERTYPE,
),
_literal_as_binds(
index.step,
name=self.expr.key,
type_=type_api.INTEGERTYPE,
),
)
else:
if self.type.zero_indexes:
index += 1
if self.type.dimensions is None or self.type.dimensions == 1:
return_type = self.type.item_type
else:
adapt_kw = {"dimensions": self.type.dimensions - 1}
return_type = self.type.adapt(
self.type.__class__, **adapt_kw
)
return operators.getitem, index, return_type
def contains(self, *arg, **kw):
raise NotImplementedError(
"ARRAY.contains() not implemented for the base "
"ARRAY type; please use the dialect-specific ARRAY type"
)
@util.dependencies("sqlalchemy.sql.elements")
def any(self, elements, other, operator=None):
"""Return ``other operator ANY (array)`` clause.
Argument places are switched, because ANY requires array
expression to be on the right hand-side.
E.g.::
from sqlalchemy.sql import operators
conn.execute(
select([table.c.data]).where(
table.c.data.any(7, operator=operators.lt)
)
)
:param other: expression to be compared
:param operator: an operator object from the
:mod:`sqlalchemy.sql.operators`
package, defaults to :func:`.operators.eq`.
.. seealso::
:func:`_expression.any_`
:meth:`.types.ARRAY.Comparator.all`
"""
operator = operator if operator else operators.eq
return operator(
elements._literal_as_binds(other),
elements.CollectionAggregate._create_any(self.expr),
)
@util.dependencies("sqlalchemy.sql.elements")
def all(self, elements, other, operator=None):
"""Return ``other operator ALL (array)`` clause.
Argument places are switched, because ALL requires array
expression to be on the right hand-side.
E.g.::
from sqlalchemy.sql import operators
conn.execute(
select([table.c.data]).where(
table.c.data.all(7, operator=operators.lt)
)
)
:param other: expression to be compared
:param operator: an operator object from the
:mod:`sqlalchemy.sql.operators`
package, defaults to :func:`.operators.eq`.
.. seealso::
:func:`_expression.all_`
:meth:`.types.ARRAY.Comparator.any`
"""
operator = operator if operator else operators.eq
return operator(
elements._literal_as_binds(other),
elements.CollectionAggregate._create_all(self.expr),
)
comparator_factory = Comparator
def __init__(
self, item_type, as_tuple=False, dimensions=None, zero_indexes=False
):
"""Construct an :class:`_types.ARRAY`.
E.g.::
Column('myarray', ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
``ARRAY(ARRAY(Integer))`` or such.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. This parameter is
not generally needed as a Python list corresponds well
to a SQL array.
:param dimensions: if non-None, the ARRAY will assume a fixed
number of dimensions. This impacts how the array is declared
on the database, how it goes about interpreting Python and
result values, as well as how expression behavior in conjunction
with the "getitem" operator works. See the description at
:class:`_types.ARRAY` for additional detail.
:param zero_indexes=False: when True, index values will be converted
between Python zero-based and SQL one-based indexes, e.g.
a value of one will be added to all index values before passing
to the database.
"""
if isinstance(item_type, ARRAY):
raise ValueError(
"Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype"
)
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.as_tuple = as_tuple
self.dimensions = dimensions
self.zero_indexes = zero_indexes
@property
def hashable(self):
return self.as_tuple
@property
def python_type(self):
return list
def compare_values(self, x, y):
return x == y
def _set_parent(self, column):
"""Support SchemaEventTarget"""
if isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent(column)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEventTarget"""
super(ARRAY, self)._set_parent_with_dispatch(parent)
if isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent_with_dispatch(parent)
class REAL(Float):
"""The SQL REAL type."""
__visit_name__ = "REAL"
class FLOAT(Float):
"""The SQL FLOAT type."""
__visit_name__ = "FLOAT"
class NUMERIC(Numeric):
"""The SQL NUMERIC type."""
__visit_name__ = "NUMERIC"
class DECIMAL(Numeric):
"""The SQL DECIMAL type."""
__visit_name__ = "DECIMAL"
class INTEGER(Integer):
"""The SQL INT or INTEGER type."""
__visit_name__ = "INTEGER"
INT = INTEGER
class SMALLINT(SmallInteger):
"""The SQL SMALLINT type."""
__visit_name__ = "SMALLINT"
class BIGINT(BigInteger):
"""The SQL BIGINT type."""
__visit_name__ = "BIGINT"
class TIMESTAMP(DateTime):
"""The SQL TIMESTAMP type.
:class:`_types.TIMESTAMP` datatypes have support for timezone
storage on some backends, such as PostgreSQL and Oracle. Use the
:paramref:`~types.TIMESTAMP.timezone` argument in order to enable
"TIMESTAMP WITH TIMEZONE" for these backends.
"""
__visit_name__ = "TIMESTAMP"
def __init__(self, timezone=False):
"""Construct a new :class:`_types.TIMESTAMP`.
:param timezone: boolean. Indicates that the TIMESTAMP type should
enable timezone support, if available on the target database.
On a per-dialect basis is similar to "TIMESTAMP WITH TIMEZONE".
If the target database does not support timezones, this flag is
ignored.
"""
super(TIMESTAMP, self).__init__(timezone=timezone)
def get_dbapi_type(self, dbapi):
return dbapi.TIMESTAMP
class DATETIME(DateTime):
"""The SQL DATETIME type."""
__visit_name__ = "DATETIME"
class DATE(Date):
"""The SQL DATE type."""
__visit_name__ = "DATE"
class TIME(Time):
"""The SQL TIME type."""
__visit_name__ = "TIME"
class TEXT(Text):
"""The SQL TEXT type."""
__visit_name__ = "TEXT"
class CLOB(Text):
"""The CLOB type.
This type is found in Oracle and Informix.
"""
__visit_name__ = "CLOB"
class VARCHAR(String):
"""The SQL VARCHAR type."""
__visit_name__ = "VARCHAR"
class NVARCHAR(Unicode):
"""The SQL NVARCHAR type."""
__visit_name__ = "NVARCHAR"
class CHAR(String):
"""The SQL CHAR type."""
__visit_name__ = "CHAR"
class NCHAR(Unicode):
"""The SQL NCHAR type."""
__visit_name__ = "NCHAR"
class BLOB(LargeBinary):
"""The SQL BLOB type."""
__visit_name__ = "BLOB"
class BINARY(_Binary):
"""The SQL BINARY type."""
__visit_name__ = "BINARY"
class VARBINARY(_Binary):
"""The SQL VARBINARY type."""
__visit_name__ = "VARBINARY"
class BOOLEAN(Boolean):
"""The SQL BOOLEAN type."""
__visit_name__ = "BOOLEAN"
class NullType(TypeEngine):
"""An unknown type.
:class:`.NullType` is used as a default type for those cases where
a type cannot be determined, including:
* During table reflection, when the type of a column is not recognized
by the :class:`.Dialect`
* When constructing SQL expressions using plain Python objects of
unknown types (e.g. ``somecolumn == my_special_object``)
* When a new :class:`_schema.Column` is created,
and the given type is passed
as ``None`` or is not passed at all.
The :class:`.NullType` can be used within SQL expression invocation
without issue, it just has no behavior either at the expression
construction level or at the bind-parameter/result processing level.
:class:`.NullType` will result in a :exc:`.CompileError` if the compiler
is asked to render the type itself, such as if it is used in a
:func:`.cast` operation or within a schema creation operation such as that
invoked by :meth:`_schema.MetaData.create_all` or the
:class:`.CreateTable`
construct.
"""
__visit_name__ = "null"
_isnull = True
hashable = False
def literal_processor(self, dialect):
def process(value):
return "NULL"
return process
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if isinstance(
other_comparator, NullType.Comparator
) or not operators.is_commutative(op):
return op, self.expr.type
else:
return other_comparator._adapt_expression(op, self)
comparator_factory = Comparator
class MatchType(Boolean):
"""Refers to the return type of the MATCH operator.
As the :meth:`.ColumnOperators.match` is probably the most open-ended
operator in generic SQLAlchemy Core, we can't assume the return type
at SQL evaluation time, as MySQL returns a floating point, not a boolean,
and other backends might do something different. So this type
acts as a placeholder, currently subclassing :class:`.Boolean`.
The type allows dialects to inject result-processing functionality
if needed, and on MySQL will return floating-point values.
.. versionadded:: 1.0.0
"""
NULLTYPE = NullType()
BOOLEANTYPE = Boolean()
STRINGTYPE = String()
INTEGERTYPE = Integer()
MATCHTYPE = MatchType()
_type_map = {
int: Integer(),
float: Float(),
bool: BOOLEANTYPE,
decimal.Decimal: Numeric(),
dt.date: Date(),
dt.datetime: DateTime(),
dt.time: Time(),
dt.timedelta: Interval(),
util.NoneType: NULLTYPE,
}
if util.py3k:
_type_map[bytes] = LargeBinary() # noqa
_type_map[str] = Unicode()
else:
_type_map[unicode] = Unicode() # noqa
_type_map[str] = String()
_type_map_get = _type_map.get
def _resolve_value_to_type(value):
_result_type = _type_map_get(type(value), False)
if _result_type is False:
# use inspect() to detect SQLAlchemy built-in
# objects.
insp = inspection.inspect(value, False)
if (
insp is not None
and
# foil mock.Mock() and other impostors by ensuring
# the inspection target itself self-inspects
insp.__class__ in inspection._registrars
):
raise exc.ArgumentError(
"Object %r is not legal as a SQL literal value" % value
)
return NULLTYPE
else:
return _result_type
# back-assign to type_api
type_api.BOOLEANTYPE = BOOLEANTYPE
type_api.STRINGTYPE = STRINGTYPE
type_api.INTEGERTYPE = INTEGERTYPE
type_api.NULLTYPE = NULLTYPE
type_api.MATCHTYPE = MATCHTYPE
type_api.INDEXABLE = Indexable
type_api._resolve_value_to_type = _resolve_value_to_type
TypeEngine.Comparator.BOOLEANTYPE = BOOLEANTYPE
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/operators.py
|
# sql/operators.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines operators used in SQL expressions."""
from operator import add
from operator import and_
from operator import contains
from operator import eq
from operator import ge
from operator import getitem
from operator import gt
from operator import inv
from operator import le
from operator import lshift
from operator import lt
from operator import mod
from operator import mul
from operator import ne
from operator import neg
from operator import or_
from operator import rshift
from operator import sub
from operator import truediv
from .. import util
if util.py2k:
from operator import div
else:
div = truediv
class Operators(object):
"""Base of comparison and logical operators.
Implements base methods
:meth:`~sqlalchemy.sql.operators.Operators.operate` and
:meth:`~sqlalchemy.sql.operators.Operators.reverse_operate`, as well as
:meth:`~sqlalchemy.sql.operators.Operators.__and__`,
:meth:`~sqlalchemy.sql.operators.Operators.__or__`,
:meth:`~sqlalchemy.sql.operators.Operators.__invert__`.
Usually is used via its most common subclass
:class:`.ColumnOperators`.
"""
__slots__ = ()
def __and__(self, other):
"""Implement the ``&`` operator.
When used with SQL expressions, results in an
AND operation, equivalent to
:func:`_expression.and_`, that is::
a & b
is equivalent to::
from sqlalchemy import and_
and_(a, b)
Care should be taken when using ``&`` regarding
operator precedence; the ``&`` operator has the highest precedence.
The operands should be enclosed in parenthesis if they contain
further sub expressions::
(a == 2) & (b == 4)
"""
return self.operate(and_, other)
def __or__(self, other):
"""Implement the ``|`` operator.
When used with SQL expressions, results in an
OR operation, equivalent to
:func:`_expression.or_`, that is::
a | b
is equivalent to::
from sqlalchemy import or_
or_(a, b)
Care should be taken when using ``|`` regarding
operator precedence; the ``|`` operator has the highest precedence.
The operands should be enclosed in parenthesis if they contain
further sub expressions::
(a == 2) | (b == 4)
"""
return self.operate(or_, other)
def __invert__(self):
"""Implement the ``~`` operator.
When used with SQL expressions, results in a
NOT operation, equivalent to
:func:`_expression.not_`, that is::
~a
is equivalent to::
from sqlalchemy import not_
not_(a)
"""
return self.operate(inv)
def op(
self, opstring, precedence=0, is_comparison=False, return_type=None
):
"""produce a generic operator function.
e.g.::
somecolumn.op("*")(5)
produces::
somecolumn * 5
This function can also be used to make bitwise operators explicit. For
example::
somecolumn.op('&')(0xff)
is a bitwise AND of the value in ``somecolumn``.
:param operator: a string which will be output as the infix operator
between this element and the expression passed to the
generated function.
:param precedence: precedence to apply to the operator, when
parenthesizing expressions. A lower number will cause the expression
to be parenthesized when applied against another operator with
higher precedence. The default value of ``0`` is lower than all
operators except for the comma (``,``) and ``AS`` operators.
A value of 100 will be higher or equal to all operators, and -100
will be lower than or equal to all operators.
:param is_comparison: if True, the operator will be considered as a
"comparison" operator, that is which evaluates to a boolean
true/false value, like ``==``, ``>``, etc. This flag should be set
so that ORM relationships can establish that the operator is a
comparison operator when used in a custom join condition.
.. versionadded:: 0.9.2 - added the
:paramref:`.Operators.op.is_comparison` flag.
:param return_type: a :class:`.TypeEngine` class or object that will
force the return type of an expression produced by this operator
to be of that type. By default, operators that specify
:paramref:`.Operators.op.is_comparison` will resolve to
:class:`.Boolean`, and those that do not will be of the same
type as the left-hand operand.
.. versionadded:: 1.2.0b3 - added the
:paramref:`.Operators.op.return_type` argument.
.. seealso::
:ref:`types_operators`
:ref:`relationship_custom_operator`
"""
operator = custom_op(opstring, precedence, is_comparison, return_type)
def against(other):
return operator(self, other)
return against
def bool_op(self, opstring, precedence=0):
"""Return a custom boolean operator.
This method is shorthand for calling
:meth:`.Operators.op` and passing the
:paramref:`.Operators.op.is_comparison`
flag with True.
.. versionadded:: 1.2.0b3
.. seealso::
:meth:`.Operators.op`
"""
return self.op(opstring, precedence=precedence, is_comparison=True)
def operate(self, op, *other, **kwargs):
r"""Operate on an argument.
This is the lowest level of operation, raises
:class:`NotImplementedError` by default.
Overriding this on a subclass can allow common
behavior to be applied to all operations.
For example, overriding :class:`.ColumnOperators`
to apply ``func.lower()`` to the left and right
side::
class MyComparator(ColumnOperators):
def operate(self, op, other):
return op(func.lower(self), func.lower(other))
:param op: Operator callable.
:param \*other: the 'other' side of the operation. Will
be a single scalar for most operations.
:param \**kwargs: modifiers. These may be passed by special
operators such as :meth:`ColumnOperators.contains`.
"""
raise NotImplementedError(str(op))
def reverse_operate(self, op, other, **kwargs):
"""Reverse operate on an argument.
Usage is the same as :meth:`operate`.
"""
raise NotImplementedError(str(op))
class custom_op(object):
"""Represent a 'custom' operator.
:class:`.custom_op` is normally instantiated when the
:meth:`.Operators.op` or :meth:`.Operators.bool_op` methods
are used to create a custom operator callable. The class can also be
used directly when programmatically constructing expressions. E.g.
to represent the "factorial" operation::
from sqlalchemy.sql import UnaryExpression
from sqlalchemy.sql import operators
from sqlalchemy import Numeric
unary = UnaryExpression(table.c.somecolumn,
modifier=operators.custom_op("!"),
type_=Numeric)
.. seealso::
:meth:`.Operators.op`
:meth:`.Operators.bool_op`
"""
__name__ = "custom_op"
def __init__(
self,
opstring,
precedence=0,
is_comparison=False,
return_type=None,
natural_self_precedent=False,
eager_grouping=False,
):
self.opstring = opstring
self.precedence = precedence
self.is_comparison = is_comparison
self.natural_self_precedent = natural_self_precedent
self.eager_grouping = eager_grouping
self.return_type = (
return_type._to_instance(return_type) if return_type else None
)
def __eq__(self, other):
return isinstance(other, custom_op) and other.opstring == self.opstring
def __hash__(self):
return id(self)
def __call__(self, left, right, **kw):
return left.operate(self, right, **kw)
class ColumnOperators(Operators):
"""Defines boolean, comparison, and other operators for
:class:`_expression.ColumnElement` expressions.
By default, all methods call down to
:meth:`.operate` or :meth:`.reverse_operate`,
passing in the appropriate operator function from the
Python builtin ``operator`` module or
a SQLAlchemy-specific operator function from
:mod:`sqlalchemy.expression.operators`. For example
the ``__eq__`` function::
def __eq__(self, other):
return self.operate(operators.eq, other)
Where ``operators.eq`` is essentially::
def eq(a, b):
return a == b
The core column expression unit :class:`_expression.ColumnElement`
overrides :meth:`.Operators.operate` and others
to return further :class:`_expression.ColumnElement` constructs,
so that the ``==`` operation above is replaced by a clause
construct.
.. seealso::
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
:class:`.ColumnOperators`
:class:`.PropComparator`
"""
__slots__ = ()
timetuple = None
"""Hack, allows datetime objects to be compared on the LHS."""
def __lt__(self, other):
"""Implement the ``<`` operator.
In a column context, produces the clause ``a < b``.
"""
return self.operate(lt, other)
def __le__(self, other):
"""Implement the ``<=`` operator.
In a column context, produces the clause ``a <= b``.
"""
return self.operate(le, other)
__hash__ = Operators.__hash__
def __eq__(self, other):
"""Implement the ``==`` operator.
In a column context, produces the clause ``a = b``.
If the target is ``None``, produces ``a IS NULL``.
"""
return self.operate(eq, other)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a column context, produces the clause ``a != b``.
If the target is ``None``, produces ``a IS NOT NULL``.
"""
return self.operate(ne, other)
def is_distinct_from(self, other):
"""Implement the ``IS DISTINCT FROM`` operator.
Renders "a IS DISTINCT FROM b" on most platforms;
on some such as SQLite may render "a IS NOT b".
.. versionadded:: 1.1
"""
return self.operate(is_distinct_from, other)
def isnot_distinct_from(self, other):
"""Implement the ``IS NOT DISTINCT FROM`` operator.
Renders "a IS NOT DISTINCT FROM b" on most platforms;
on some such as SQLite may render "a IS b".
.. versionadded:: 1.1
"""
return self.operate(isnot_distinct_from, other)
def __gt__(self, other):
"""Implement the ``>`` operator.
In a column context, produces the clause ``a > b``.
"""
return self.operate(gt, other)
def __ge__(self, other):
"""Implement the ``>=`` operator.
In a column context, produces the clause ``a >= b``.
"""
return self.operate(ge, other)
def __neg__(self):
"""Implement the ``-`` operator.
In a column context, produces the clause ``-a``.
"""
return self.operate(neg)
def __contains__(self, other):
return self.operate(contains, other)
def __getitem__(self, index):
"""Implement the [] operator.
This can be used by some database-specific types
such as PostgreSQL ARRAY and HSTORE.
"""
return self.operate(getitem, index)
def __lshift__(self, other):
"""implement the << operator.
Not used by SQLAlchemy core, this is provided
for custom operator systems which want to use
<< as an extension point.
"""
return self.operate(lshift, other)
def __rshift__(self, other):
"""implement the >> operator.
Not used by SQLAlchemy core, this is provided
for custom operator systems which want to use
>> as an extension point.
"""
return self.operate(rshift, other)
def concat(self, other):
"""Implement the 'concat' operator.
In a column context, produces the clause ``a || b``,
or uses the ``concat()`` operator on MySQL.
"""
return self.operate(concat_op, other)
def like(self, other, escape=None):
r"""Implement the ``like`` operator.
In a column context, produces the expression::
a LIKE other
E.g.::
stmt = select([sometable]).\
where(sometable.c.column.like("%foobar%"))
:param other: expression to be compared
:param escape: optional escape character, renders the ``ESCAPE``
keyword, e.g.::
somecolumn.like("foo/%bar", escape="/")
.. seealso::
:meth:`.ColumnOperators.ilike`
"""
return self.operate(like_op, other, escape=escape)
def ilike(self, other, escape=None):
r"""Implement the ``ilike`` operator, e.g. case insensitive LIKE.
In a column context, produces an expression either of the form::
lower(a) LIKE lower(other)
Or on backends that support the ILIKE operator::
a ILIKE other
E.g.::
stmt = select([sometable]).\
where(sometable.c.column.ilike("%foobar%"))
:param other: expression to be compared
:param escape: optional escape character, renders the ``ESCAPE``
keyword, e.g.::
somecolumn.ilike("foo/%bar", escape="/")
.. seealso::
:meth:`.ColumnOperators.like`
"""
return self.operate(ilike_op, other, escape=escape)
def in_(self, other):
"""Implement the ``in`` operator.
In a column context, produces the clause ``column IN <other>``.
The given parameter ``other`` may be:
* A list of literal values, e.g.::
stmt.where(column.in_([1, 2, 3]))
In this calling form, the list of items is converted to a set of
bound parameters the same length as the list given::
WHERE COL IN (?, ?, ?)
* A list of tuples may be provided if the comparison is against a
:func:`.tuple_` containing multiple expressions::
from sqlalchemy import tuple_
stmt.where(tuple_(col1, col2).in_([(1, 10), (2, 20), (3, 30)]))
* An empty list, e.g.::
stmt.where(column.in_([]))
In this calling form, the expression renders a "false" expression,
e.g.::
WHERE 1 != 1
This "false" expression has historically had different behaviors
in older SQLAlchemy versions, see
:paramref:`_sa.create_engine.empty_in_strategy`
for behavioral options.
.. versionchanged:: 1.2 simplified the behavior of "empty in"
expressions
* A bound parameter, e.g. :func:`.bindparam`, may be used if it
includes the :paramref:`.bindparam.expanding` flag::
stmt.where(column.in_(bindparam('value', expanding=True)))
In this calling form, the expression renders a special non-SQL
placeholder expression that looks like::
WHERE COL IN ([EXPANDING_value])
This placeholder expression is intercepted at statement execution
time to be converted into the variable number of bound parameter
form illustrated earlier. If the statement were executed as::
connection.execute(stmt, {"value": [1, 2, 3]})
The database would be passed a bound parameter for each value::
WHERE COL IN (?, ?, ?)
.. versionadded:: 1.2 added "expanding" bound parameters
If an empty list is passed, a special "empty list" expression,
which is specific to the database in use, is rendered. On
SQLite this would be::
WHERE COL IN (SELECT 1 FROM (SELECT 1) WHERE 1!=1)
.. versionadded:: 1.3 "expanding" bound parameters now support
empty lists
* a :func:`_expression.select` construct,
which is usually a correlated
scalar select::
stmt.where(
column.in_(
select([othertable.c.y]).
where(table.c.x == othertable.c.x)
)
)
In this calling form, :meth:`.ColumnOperators.in_` renders as given::
WHERE COL IN (SELECT othertable.y
FROM othertable WHERE othertable.x = table.x)
:param other: a list of literals, a :func:`_expression.select`
construct,
or a :func:`.bindparam` construct that includes the
:paramref:`.bindparam.expanding` flag set to True.
"""
return self.operate(in_op, other)
def notin_(self, other):
"""implement the ``NOT IN`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.in_`, i.e. ``~x.in_(y)``.
In the case that ``other`` is an empty sequence, the compiler
produces an "empty not in" expression. This defaults to the
expression "1 = 1" to produce true in all cases. The
:paramref:`_sa.create_engine.empty_in_strategy` may be used to
alter this behavior.
.. versionchanged:: 1.2 The :meth:`.ColumnOperators.in_` and
:meth:`.ColumnOperators.notin_` operators
now produce a "static" expression for an empty IN sequence
by default.
.. seealso::
:meth:`.ColumnOperators.in_`
"""
return self.operate(notin_op, other)
def notlike(self, other, escape=None):
"""implement the ``NOT LIKE`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.like`, i.e. ``~x.like(y)``.
.. seealso::
:meth:`.ColumnOperators.like`
"""
return self.operate(notlike_op, other, escape=escape)
def notilike(self, other, escape=None):
"""implement the ``NOT ILIKE`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.ilike`, i.e. ``~x.ilike(y)``.
.. seealso::
:meth:`.ColumnOperators.ilike`
"""
return self.operate(notilike_op, other, escape=escape)
def is_(self, other):
"""Implement the ``IS`` operator.
Normally, ``IS`` is generated automatically when comparing to a
value of ``None``, which resolves to ``NULL``. However, explicit
usage of ``IS`` may be desirable if comparing to boolean values
on certain platforms.
.. seealso:: :meth:`.ColumnOperators.isnot`
"""
return self.operate(is_, other)
def isnot(self, other):
"""Implement the ``IS NOT`` operator.
Normally, ``IS NOT`` is generated automatically when comparing to a
value of ``None``, which resolves to ``NULL``. However, explicit
usage of ``IS NOT`` may be desirable if comparing to boolean values
on certain platforms.
.. seealso:: :meth:`.ColumnOperators.is_`
"""
return self.operate(isnot, other)
def startswith(self, other, **kwargs):
r"""Implement the ``startswith`` operator.
Produces a LIKE expression that tests against a match for the start
of a string value::
column LIKE <other> || '%'
E.g.::
stmt = select([sometable]).\
where(sometable.c.column.startswith("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
will behave like wildcards as well. For literal string
values, the :paramref:`.ColumnOperators.startswith.autoescape` flag
may be set to ``True`` to apply escaping to occurrences of these
characters within the string value so that they match as themselves
and not as wildcard characters. Alternatively, the
:paramref:`.ColumnOperators.startswith.escape` parameter will establish
a given character as an escape character which can be of use when
the target expression is not a literal string.
:param other: expression to be compared. This is usually a plain
string value, but can also be an arbitrary SQL expression. LIKE
wildcard characters ``%`` and ``_`` are not escaped by default unless
the :paramref:`.ColumnOperators.startswith.autoescape` flag is
set to True.
:param autoescape: boolean; when True, establishes an escape character
within the LIKE expression, then applies it to all occurrences of
``"%"``, ``"_"`` and the escape character itself within the
comparison value, which is assumed to be a literal string and not a
SQL expression.
An expression such as::
somecolumn.startswith("foo%bar", autoescape=True)
Will render as::
somecolumn LIKE :param || '%' ESCAPE '/'
With the value of :param as ``"foo/%bar"``.
.. versionadded:: 1.2
.. versionchanged:: 1.2.0 The
:paramref:`.ColumnOperators.startswith.autoescape` parameter is
now a simple boolean rather than a character; the escape
character itself is also escaped, and defaults to a forwards
slash, which itself can be customized using the
:paramref:`.ColumnOperators.startswith.escape` parameter.
:param escape: a character which when given will render with the
``ESCAPE`` keyword to establish that character as the escape
character. This character can then be placed preceding occurrences
of ``%`` and ``_`` to allow them to act as themselves and not
wildcard characters.
An expression such as::
somecolumn.startswith("foo/%bar", escape="^")
Will render as::
somecolumn LIKE :param || '%' ESCAPE '^'
The parameter may also be combined with
:paramref:`.ColumnOperators.startswith.autoescape`::
somecolumn.startswith("foo%bar^bat", escape="^", autoescape=True)
Where above, the given literal parameter will be converted to
``"foo^%bar^^bat"`` before being passed to the database.
.. seealso::
:meth:`.ColumnOperators.endswith`
:meth:`.ColumnOperators.contains`
:meth:`.ColumnOperators.like`
"""
return self.operate(startswith_op, other, **kwargs)
def endswith(self, other, **kwargs):
r"""Implement the 'endswith' operator.
Produces a LIKE expression that tests against a match for the end
of a string value::
column LIKE '%' || <other>
E.g.::
stmt = select([sometable]).\
where(sometable.c.column.endswith("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
will behave like wildcards as well. For literal string
values, the :paramref:`.ColumnOperators.endswith.autoescape` flag
may be set to ``True`` to apply escaping to occurrences of these
characters within the string value so that they match as themselves
and not as wildcard characters. Alternatively, the
:paramref:`.ColumnOperators.endswith.escape` parameter will establish
a given character as an escape character which can be of use when
the target expression is not a literal string.
:param other: expression to be compared. This is usually a plain
string value, but can also be an arbitrary SQL expression. LIKE
wildcard characters ``%`` and ``_`` are not escaped by default unless
the :paramref:`.ColumnOperators.endswith.autoescape` flag is
set to True.
:param autoescape: boolean; when True, establishes an escape character
within the LIKE expression, then applies it to all occurrences of
``"%"``, ``"_"`` and the escape character itself within the
comparison value, which is assumed to be a literal string and not a
SQL expression.
An expression such as::
somecolumn.endswith("foo%bar", autoescape=True)
Will render as::
somecolumn LIKE '%' || :param ESCAPE '/'
With the value of :param as ``"foo/%bar"``.
.. versionadded:: 1.2
.. versionchanged:: 1.2.0 The
:paramref:`.ColumnOperators.endswith.autoescape` parameter is
now a simple boolean rather than a character; the escape
character itself is also escaped, and defaults to a forwards
slash, which itself can be customized using the
:paramref:`.ColumnOperators.endswith.escape` parameter.
:param escape: a character which when given will render with the
``ESCAPE`` keyword to establish that character as the escape
character. This character can then be placed preceding occurrences
of ``%`` and ``_`` to allow them to act as themselves and not
wildcard characters.
An expression such as::
somecolumn.endswith("foo/%bar", escape="^")
Will render as::
somecolumn LIKE '%' || :param ESCAPE '^'
The parameter may also be combined with
:paramref:`.ColumnOperators.endswith.autoescape`::
somecolumn.endswith("foo%bar^bat", escape="^", autoescape=True)
Where above, the given literal parameter will be converted to
``"foo^%bar^^bat"`` before being passed to the database.
.. seealso::
:meth:`.ColumnOperators.startswith`
:meth:`.ColumnOperators.contains`
:meth:`.ColumnOperators.like`
"""
return self.operate(endswith_op, other, **kwargs)
def contains(self, other, **kwargs):
r"""Implement the 'contains' operator.
Produces a LIKE expression that tests against a match for the middle
of a string value::
column LIKE '%' || <other> || '%'
E.g.::
stmt = select([sometable]).\
where(sometable.c.column.contains("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
will behave like wildcards as well. For literal string
values, the :paramref:`.ColumnOperators.contains.autoescape` flag
may be set to ``True`` to apply escaping to occurrences of these
characters within the string value so that they match as themselves
and not as wildcard characters. Alternatively, the
:paramref:`.ColumnOperators.contains.escape` parameter will establish
a given character as an escape character which can be of use when
the target expression is not a literal string.
:param other: expression to be compared. This is usually a plain
string value, but can also be an arbitrary SQL expression. LIKE
wildcard characters ``%`` and ``_`` are not escaped by default unless
the :paramref:`.ColumnOperators.contains.autoescape` flag is
set to True.
:param autoescape: boolean; when True, establishes an escape character
within the LIKE expression, then applies it to all occurrences of
``"%"``, ``"_"`` and the escape character itself within the
comparison value, which is assumed to be a literal string and not a
SQL expression.
An expression such as::
somecolumn.contains("foo%bar", autoescape=True)
Will render as::
somecolumn LIKE '%' || :param || '%' ESCAPE '/'
With the value of :param as ``"foo/%bar"``.
.. versionadded:: 1.2
.. versionchanged:: 1.2.0 The
:paramref:`.ColumnOperators.contains.autoescape` parameter is
now a simple boolean rather than a character; the escape
character itself is also escaped, and defaults to a forwards
slash, which itself can be customized using the
:paramref:`.ColumnOperators.contains.escape` parameter.
:param escape: a character which when given will render with the
``ESCAPE`` keyword to establish that character as the escape
character. This character can then be placed preceding occurrences
of ``%`` and ``_`` to allow them to act as themselves and not
wildcard characters.
An expression such as::
somecolumn.contains("foo/%bar", escape="^")
Will render as::
somecolumn LIKE '%' || :param || '%' ESCAPE '^'
The parameter may also be combined with
:paramref:`.ColumnOperators.contains.autoescape`::
somecolumn.contains("foo%bar^bat", escape="^", autoescape=True)
Where above, the given literal parameter will be converted to
``"foo^%bar^^bat"`` before being passed to the database.
.. seealso::
:meth:`.ColumnOperators.startswith`
:meth:`.ColumnOperators.endswith`
:meth:`.ColumnOperators.like`
"""
return self.operate(contains_op, other, **kwargs)
def match(self, other, **kwargs):
"""Implements a database-specific 'match' operator.
:meth:`~.ColumnOperators.match` attempts to resolve to
a MATCH-like function or operator provided by the backend.
Examples include:
* PostgreSQL - renders ``x @@ to_tsquery(y)``
* MySQL - renders ``MATCH (x) AGAINST (y IN BOOLEAN MODE)``
* Oracle - renders ``CONTAINS(x, y)``
* other backends may provide special implementations.
* Backends without any special implementation will emit
the operator as "MATCH". This is compatible with SQLite, for
example.
"""
return self.operate(match_op, other, **kwargs)
def desc(self):
"""Produce a :func:`_expression.desc` clause against the
parent object."""
return self.operate(desc_op)
def asc(self):
"""Produce a :func:`_expression.asc` clause against the
parent object."""
return self.operate(asc_op)
def nullsfirst(self):
"""Produce a :func:`_expression.nullsfirst` clause against the
parent object."""
return self.operate(nullsfirst_op)
def nullslast(self):
"""Produce a :func:`_expression.nullslast` clause against the
parent object."""
return self.operate(nullslast_op)
def collate(self, collation):
"""Produce a :func:`_expression.collate` clause against
the parent object, given the collation string.
.. seealso::
:func:`_expression.collate`
"""
return self.operate(collate, collation)
def __radd__(self, other):
"""Implement the ``+`` operator in reverse.
See :meth:`.ColumnOperators.__add__`.
"""
return self.reverse_operate(add, other)
def __rsub__(self, other):
"""Implement the ``-`` operator in reverse.
See :meth:`.ColumnOperators.__sub__`.
"""
return self.reverse_operate(sub, other)
def __rmul__(self, other):
"""Implement the ``*`` operator in reverse.
See :meth:`.ColumnOperators.__mul__`.
"""
return self.reverse_operate(mul, other)
def __rdiv__(self, other):
"""Implement the ``/`` operator in reverse.
See :meth:`.ColumnOperators.__div__`.
"""
return self.reverse_operate(div, other)
def __rmod__(self, other):
"""Implement the ``%`` operator in reverse.
See :meth:`.ColumnOperators.__mod__`.
"""
return self.reverse_operate(mod, other)
def between(self, cleft, cright, symmetric=False):
"""Produce a :func:`_expression.between` clause against
the parent object, given the lower and upper range.
"""
return self.operate(between_op, cleft, cright, symmetric=symmetric)
def distinct(self):
"""Produce a :func:`_expression.distinct` clause against the
parent object.
"""
return self.operate(distinct_op)
def any_(self):
"""Produce a :func:`_expression.any_` clause against the
parent object.
This operator is only appropriate against a scalar subquery
object, or for some backends an column expression that is
against the ARRAY type, e.g.::
# postgresql '5 = ANY (somearray)'
expr = 5 == mytable.c.somearray.any_()
# mysql '5 = ANY (SELECT value FROM table)'
expr = 5 == select([table.c.value]).as_scalar().any_()
.. seealso::
:func:`_expression.any_` - standalone version
:func:`_expression.all_` - ALL operator
.. versionadded:: 1.1
"""
return self.operate(any_op)
def all_(self):
"""Produce a :func:`_expression.all_` clause against the
parent object.
This operator is only appropriate against a scalar subquery
object, or for some backends an column expression that is
against the ARRAY type, e.g.::
# postgresql '5 = ALL (somearray)'
expr = 5 == mytable.c.somearray.all_()
# mysql '5 = ALL (SELECT value FROM table)'
expr = 5 == select([table.c.value]).as_scalar().all_()
.. seealso::
:func:`_expression.all_` - standalone version
:func:`_expression.any_` - ANY operator
.. versionadded:: 1.1
"""
return self.operate(all_op)
def __add__(self, other):
"""Implement the ``+`` operator.
In a column context, produces the clause ``a + b``
if the parent object has non-string affinity.
If the parent object has a string affinity,
produces the concatenation operator, ``a || b`` -
see :meth:`.ColumnOperators.concat`.
"""
return self.operate(add, other)
def __sub__(self, other):
"""Implement the ``-`` operator.
In a column context, produces the clause ``a - b``.
"""
return self.operate(sub, other)
def __mul__(self, other):
"""Implement the ``*`` operator.
In a column context, produces the clause ``a * b``.
"""
return self.operate(mul, other)
def __div__(self, other):
"""Implement the ``/`` operator.
In a column context, produces the clause ``a / b``.
"""
return self.operate(div, other)
def __mod__(self, other):
"""Implement the ``%`` operator.
In a column context, produces the clause ``a % b``.
"""
return self.operate(mod, other)
def __truediv__(self, other):
"""Implement the ``//`` operator.
In a column context, produces the clause ``a / b``.
"""
return self.operate(truediv, other)
def __rtruediv__(self, other):
"""Implement the ``//`` operator in reverse.
See :meth:`.ColumnOperators.__truediv__`.
"""
return self.reverse_operate(truediv, other)
_commutative = {eq, ne, add, mul}
_comparison = {eq, ne, lt, gt, ge, le}
def commutative_op(fn):
_commutative.add(fn)
return fn
def comparison_op(fn):
_comparison.add(fn)
return fn
def from_():
raise NotImplementedError()
@comparison_op
def function_as_comparison_op():
raise NotImplementedError()
def as_():
raise NotImplementedError()
def exists():
raise NotImplementedError()
def istrue(a):
raise NotImplementedError()
def isfalse(a):
raise NotImplementedError()
@comparison_op
def is_distinct_from(a, b):
return a.is_distinct_from(b)
@comparison_op
def isnot_distinct_from(a, b):
return a.isnot_distinct_from(b)
@comparison_op
def is_(a, b):
return a.is_(b)
@comparison_op
def isnot(a, b):
return a.isnot(b)
def collate(a, b):
return a.collate(b)
def op(a, opstring, b):
return a.op(opstring)(b)
@comparison_op
def like_op(a, b, escape=None):
return a.like(b, escape=escape)
@comparison_op
def notlike_op(a, b, escape=None):
return a.notlike(b, escape=escape)
@comparison_op
def ilike_op(a, b, escape=None):
return a.ilike(b, escape=escape)
@comparison_op
def notilike_op(a, b, escape=None):
return a.notilike(b, escape=escape)
@comparison_op
def between_op(a, b, c, symmetric=False):
return a.between(b, c, symmetric=symmetric)
@comparison_op
def notbetween_op(a, b, c, symmetric=False):
return a.notbetween(b, c, symmetric=symmetric)
@comparison_op
def in_op(a, b):
return a.in_(b)
@comparison_op
def notin_op(a, b):
return a.notin_(b)
def distinct_op(a):
return a.distinct()
def any_op(a):
return a.any_()
def all_op(a):
return a.all_()
def _escaped_like_impl(fn, other, escape, autoescape):
if autoescape:
if autoescape is not True:
util.warn(
"The autoescape parameter is now a simple boolean True/False"
)
if escape is None:
escape = "/"
if not isinstance(other, util.compat.string_types):
raise TypeError("String value expected when autoescape=True")
if escape not in ("%", "_"):
other = other.replace(escape, escape + escape)
other = other.replace("%", escape + "%").replace("_", escape + "_")
return fn(other, escape=escape)
@comparison_op
def startswith_op(a, b, escape=None, autoescape=False):
return _escaped_like_impl(a.startswith, b, escape, autoescape)
@comparison_op
def notstartswith_op(a, b, escape=None, autoescape=False):
return ~_escaped_like_impl(a.startswith, b, escape, autoescape)
@comparison_op
def endswith_op(a, b, escape=None, autoescape=False):
return _escaped_like_impl(a.endswith, b, escape, autoescape)
@comparison_op
def notendswith_op(a, b, escape=None, autoescape=False):
return ~_escaped_like_impl(a.endswith, b, escape, autoescape)
@comparison_op
def contains_op(a, b, escape=None, autoescape=False):
return _escaped_like_impl(a.contains, b, escape, autoescape)
@comparison_op
def notcontains_op(a, b, escape=None, autoescape=False):
return ~_escaped_like_impl(a.contains, b, escape, autoescape)
@comparison_op
def match_op(a, b, **kw):
return a.match(b, **kw)
@comparison_op
def notmatch_op(a, b, **kw):
return a.notmatch(b, **kw)
def comma_op(a, b):
raise NotImplementedError()
@comparison_op
def empty_in_op(a, b):
raise NotImplementedError()
@comparison_op
def empty_notin_op(a, b):
raise NotImplementedError()
def filter_op(a, b):
raise NotImplementedError()
def concat_op(a, b):
return a.concat(b)
def desc_op(a):
return a.desc()
def asc_op(a):
return a.asc()
def nullsfirst_op(a):
return a.nullsfirst()
def nullslast_op(a):
return a.nullslast()
def json_getitem_op(a, b):
raise NotImplementedError()
def json_path_getitem_op(a, b):
raise NotImplementedError()
def is_comparison(op):
return op in _comparison or isinstance(op, custom_op) and op.is_comparison
def is_commutative(op):
return op in _commutative
def is_ordering_modifier(op):
return op in (asc_op, desc_op, nullsfirst_op, nullslast_op)
def is_natural_self_precedent(op):
return (
op in _natural_self_precedent
or isinstance(op, custom_op)
and op.natural_self_precedent
)
_booleans = (inv, istrue, isfalse, and_, or_)
def is_boolean(op):
return is_comparison(op) or op in _booleans
_mirror = {gt: lt, ge: le, lt: gt, le: ge}
def mirror(op):
"""rotate a comparison operator 180 degrees.
Note this is not the same as negation.
"""
return _mirror.get(op, op)
_associative = _commutative.union([concat_op, and_, or_]).difference([eq, ne])
_natural_self_precedent = _associative.union(
[getitem, json_getitem_op, json_path_getitem_op]
)
"""Operators where if we have (a op b) op c, we don't want to
parenthesize (a op b).
"""
_asbool = util.symbol("_asbool", canonical=-10)
_smallest = util.symbol("_smallest", canonical=-100)
_largest = util.symbol("_largest", canonical=100)
_PRECEDENCE = {
from_: 15,
function_as_comparison_op: 15,
any_op: 15,
all_op: 15,
getitem: 15,
json_getitem_op: 15,
json_path_getitem_op: 15,
mul: 8,
truediv: 8,
div: 8,
mod: 8,
neg: 8,
add: 7,
sub: 7,
concat_op: 6,
filter_op: 6,
match_op: 5,
notmatch_op: 5,
ilike_op: 5,
notilike_op: 5,
like_op: 5,
notlike_op: 5,
in_op: 5,
notin_op: 5,
is_: 5,
isnot: 5,
eq: 5,
ne: 5,
is_distinct_from: 5,
isnot_distinct_from: 5,
empty_in_op: 5,
empty_notin_op: 5,
gt: 5,
lt: 5,
ge: 5,
le: 5,
between_op: 5,
notbetween_op: 5,
distinct_op: 5,
inv: 5,
istrue: 5,
isfalse: 5,
and_: 3,
or_: 2,
comma_op: -1,
desc_op: 3,
asc_op: 3,
collate: 4,
as_: -1,
exists: 0,
_asbool: -10,
_smallest: _smallest,
_largest: _largest,
}
def is_precedent(operator, against):
if operator is against and is_natural_self_precedent(operator):
return False
else:
return _PRECEDENCE.get(
operator, getattr(operator, "precedence", _smallest)
) <= _PRECEDENCE.get(against, getattr(against, "precedence", _largest))
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/selectable.py
|
# sql/selectable.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The :class:`_expression.FromClause` class of SQL expression elements,
representing
SQL tables and derived rowsets.
"""
import collections
import itertools
import operator
from operator import attrgetter
from sqlalchemy.sql.visitors import Visitable
from . import operators
from . import type_api
from .annotation import Annotated
from .base import _from_objects
from .base import _generative
from .base import ColumnCollection
from .base import ColumnSet
from .base import Executable
from .base import Generative
from .base import Immutable
from .elements import _anonymous_label
from .elements import _clause_element_as_expr
from .elements import _clone
from .elements import _cloned_difference
from .elements import _cloned_intersection
from .elements import _document_text_coercion
from .elements import _expand_cloned
from .elements import _interpret_as_column_or_from
from .elements import _literal_and_labels_as_label_reference
from .elements import _literal_as_label_reference
from .elements import _literal_as_text
from .elements import _no_text_coercion
from .elements import _select_iterables
from .elements import and_
from .elements import BindParameter
from .elements import ClauseElement
from .elements import ClauseList
from .elements import Grouping
from .elements import literal_column
from .elements import True_
from .elements import UnaryExpression
from .. import exc
from .. import inspection
from .. import util
def _interpret_as_from(element):
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, util.string_types):
_no_text_coercion(element)
try:
return insp.selectable
except AttributeError as err:
util.raise_(
exc.ArgumentError("FROM expression expected"), replace_context=err
)
def _interpret_as_select(element):
element = _interpret_as_from(element)
if isinstance(element, Alias):
element = element.original
if not isinstance(element, SelectBase):
element = element.select()
return element
class _OffsetLimitParam(BindParameter):
@property
def _limit_offset_value(self):
return self.effective_value
def _offset_or_limit_clause(element, name=None, type_=None):
"""Convert the given value to an "offset or limit" clause.
This handles incoming integers and converts to an expression; if
an expression is already given, it is passed through.
"""
if element is None:
return None
elif hasattr(element, "__clause_element__"):
return element.__clause_element__()
elif isinstance(element, Visitable):
return element
else:
value = util.asint(element)
return _OffsetLimitParam(name, value, type_=type_, unique=True)
def _offset_or_limit_clause_asint(clause, attrname):
"""Convert the "offset or limit" clause of a select construct to an
integer.
This is only possible if the value is stored as a simple bound parameter.
Otherwise, a compilation error is raised.
"""
if clause is None:
return None
try:
value = clause._limit_offset_value
except AttributeError as err:
util.raise_(
exc.CompileError(
"This SELECT structure does not use a simple "
"integer value for %s" % attrname
),
replace_context=err,
)
else:
return util.asint(value)
def subquery(alias, *args, **kwargs):
r"""Return an :class:`_expression.Alias` object derived
from a :class:`_expression.Select`.
name
alias name
\*args, \**kwargs
all other arguments are delivered to the
:func:`select` function.
"""
return Select(*args, **kwargs).alias(alias)
class Selectable(ClauseElement):
"""mark a class as being selectable"""
__visit_name__ = "selectable"
is_selectable = True
@property
def selectable(self):
return self
class HasPrefixes(object):
_prefixes = ()
@_generative
@_document_text_coercion(
"expr",
":meth:`_expression.HasPrefixes.prefix_with`",
":paramref:`.HasPrefixes.prefix_with.*expr`",
)
def prefix_with(self, *expr, **kw):
r"""Add one or more expressions following the statement keyword, i.e.
SELECT, INSERT, UPDATE, or DELETE. Generative.
This is used to support backend-specific prefix keywords such as those
provided by MySQL.
E.g.::
stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql")
# MySQL 5.7 optimizer hints
stmt = select([table]).prefix_with(
"/*+ BKA(t1) */", dialect="mysql")
Multiple prefixes can be specified by multiple calls
to :meth:`_expression.HasPrefixes.prefix_with`.
:param \*expr: textual or :class:`_expression.ClauseElement`
construct which
will be rendered following the INSERT, UPDATE, or DELETE
keyword.
:param \**kw: A single keyword 'dialect' is accepted. This is an
optional string dialect name which will
limit rendering of this prefix to only that dialect.
"""
dialect = kw.pop("dialect", None)
if kw:
raise exc.ArgumentError(
"Unsupported argument(s): %s" % ",".join(kw)
)
self._setup_prefixes(expr, dialect)
def _setup_prefixes(self, prefixes, dialect=None):
self._prefixes = self._prefixes + tuple(
[
(_literal_as_text(p, allow_coercion_to_text=True), dialect)
for p in prefixes
]
)
class HasSuffixes(object):
_suffixes = ()
@_generative
@_document_text_coercion(
"expr",
":meth:`_expression.HasSuffixes.suffix_with`",
":paramref:`.HasSuffixes.suffix_with.*expr`",
)
def suffix_with(self, *expr, **kw):
r"""Add one or more expressions following the statement as a whole.
This is used to support backend-specific suffix keywords on
certain constructs.
E.g.::
stmt = select([col1, col2]).cte().suffix_with(
"cycle empno set y_cycle to 1 default 0", dialect="oracle")
Multiple suffixes can be specified by multiple calls
to :meth:`_expression.HasSuffixes.suffix_with`.
:param \*expr: textual or :class:`_expression.ClauseElement`
construct which
will be rendered following the target clause.
:param \**kw: A single keyword 'dialect' is accepted. This is an
optional string dialect name which will
limit rendering of this suffix to only that dialect.
"""
dialect = kw.pop("dialect", None)
if kw:
raise exc.ArgumentError(
"Unsupported argument(s): %s" % ",".join(kw)
)
self._setup_suffixes(expr, dialect)
def _setup_suffixes(self, suffixes, dialect=None):
self._suffixes = self._suffixes + tuple(
[
(_literal_as_text(p, allow_coercion_to_text=True), dialect)
for p in suffixes
]
)
class FromClause(Selectable):
"""Represent an element that can be used within the ``FROM``
clause of a ``SELECT`` statement.
The most common forms of :class:`_expression.FromClause` are the
:class:`_schema.Table` and the :func:`_expression.select` constructs. Key
features common to all :class:`_expression.FromClause` objects include:
* a :attr:`.c` collection, which provides per-name access to a collection
of :class:`_expression.ColumnElement` objects.
* a :attr:`.primary_key` attribute, which is a collection of all those
:class:`_expression.ColumnElement`
objects that indicate the ``primary_key`` flag.
* Methods to generate various derivations of a "from" clause, including
:meth:`_expression.FromClause.alias`,
:meth:`_expression.FromClause.join`,
:meth:`_expression.FromClause.select`.
"""
__visit_name__ = "fromclause"
named_with_column = False
_hide_froms = []
_is_join = False
_is_select = False
_is_from_container = False
_is_lateral = False
_textual = False
"""a marker that allows us to easily distinguish a :class:`.TextAsFrom`
or similar object from other kinds of :class:`_expression.FromClause`
objects."""
schema = None
"""Define the 'schema' attribute for this :class:`_expression.FromClause`.
This is typically ``None`` for most objects except that of
:class:`_schema.Table`, where it is taken as the value of the
:paramref:`_schema.Table.schema` argument.
"""
def _translate_schema(self, effective_schema, map_):
return effective_schema
_memoized_property = util.group_expirable_memoized_property(["_columns"])
@util.deprecated(
"1.1",
message="The :meth:`.FromClause.count` method is deprecated, "
"and will be removed in a future release. Please use the "
":class:`_functions.count` function available from the "
":attr:`.func` namespace.",
)
@util.dependencies("sqlalchemy.sql.functions")
def count(self, functions, whereclause=None, **params):
"""return a SELECT COUNT generated against this
:class:`_expression.FromClause`.
.. seealso::
:class:`_functions.count`
"""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return Select(
[functions.func.count(col).label("tbl_row_count")],
whereclause,
from_obj=[self],
**params
)
def select(self, whereclause=None, **params):
"""return a SELECT of this :class:`_expression.FromClause`.
.. seealso::
:func:`_expression.select` - general purpose
method which allows for arbitrary column lists.
"""
return Select([self], whereclause, **params)
def join(self, right, onclause=None, isouter=False, full=False):
"""Return a :class:`_expression.Join` from this
:class:`_expression.FromClause`
to another :class:`FromClause`.
E.g.::
from sqlalchemy import join
j = user_table.join(address_table,
user_table.c.id == address_table.c.user_id)
stmt = select([user_table]).select_from(j)
would emit SQL along the lines of::
SELECT user.id, user.name FROM user
JOIN address ON user.id = address.user_id
:param right: the right side of the join; this is any
:class:`_expression.FromClause` object such as a
:class:`_schema.Table` object, and
may also be a selectable-compatible object such as an ORM-mapped
class.
:param onclause: a SQL expression representing the ON clause of the
join. If left at ``None``, :meth:`_expression.FromClause.join`
will attempt to
join the two tables based on a foreign key relationship.
:param isouter: if True, render a LEFT OUTER JOIN, instead of JOIN.
:param full: if True, render a FULL OUTER JOIN, instead of LEFT OUTER
JOIN. Implies :paramref:`.FromClause.join.isouter`.
.. versionadded:: 1.1
.. seealso::
:func:`_expression.join` - standalone function
:class:`_expression.Join` - the type of object produced
"""
return Join(self, right, onclause, isouter, full)
def outerjoin(self, right, onclause=None, full=False):
"""Return a :class:`_expression.Join` from this
:class:`_expression.FromClause`
to another :class:`FromClause`, with the "isouter" flag set to
True.
E.g.::
from sqlalchemy import outerjoin
j = user_table.outerjoin(address_table,
user_table.c.id == address_table.c.user_id)
The above is equivalent to::
j = user_table.join(
address_table,
user_table.c.id == address_table.c.user_id,
isouter=True)
:param right: the right side of the join; this is any
:class:`_expression.FromClause` object such as a
:class:`_schema.Table` object, and
may also be a selectable-compatible object such as an ORM-mapped
class.
:param onclause: a SQL expression representing the ON clause of the
join. If left at ``None``, :meth:`_expression.FromClause.join`
will attempt to
join the two tables based on a foreign key relationship.
:param full: if True, render a FULL OUTER JOIN, instead of
LEFT OUTER JOIN.
.. versionadded:: 1.1
.. seealso::
:meth:`_expression.FromClause.join`
:class:`_expression.Join`
"""
return Join(self, right, onclause, True, full)
def alias(self, name=None, flat=False):
"""return an alias of this :class:`_expression.FromClause`.
E.g.::
a2 = some_table.alias('a2')
The above code creates an :class:`_expression.Alias`
object which can be used
as a FROM clause in any SELECT statement.
.. seealso::
:ref:`core_tutorial_aliases`
:func:`_expression.alias`
"""
return Alias._construct(self, name)
def lateral(self, name=None):
"""Return a LATERAL alias of this :class:`_expression.FromClause`.
The return value is the :class:`_expression.Lateral` construct also
provided by the top-level :func:`_expression.lateral` function.
.. versionadded:: 1.1
.. seealso::
:ref:`lateral_selects` - overview of usage.
"""
return Lateral._construct(self, name)
def tablesample(self, sampling, name=None, seed=None):
"""Return a TABLESAMPLE alias of this :class:`_expression.FromClause`.
The return value is the :class:`_expression.TableSample`
construct also
provided by the top-level :func:`_expression.tablesample` function.
.. versionadded:: 1.1
.. seealso::
:func:`_expression.tablesample` - usage guidelines and parameters
"""
return TableSample._construct(self, sampling, name, seed)
def is_derived_from(self, fromclause):
"""Return True if this FromClause is 'derived' from the given
FromClause.
An example would be an Alias of a Table is derived from that Table.
"""
# this is essentially an "identity" check in the base class.
# Other constructs override this to traverse through
# contained elements.
return fromclause in self._cloned_set
def _is_lexical_equivalent(self, other):
"""Return True if this FromClause and the other represent
the same lexical identity.
This tests if either one is a copy of the other, or
if they are the same via annotation identity.
"""
return self._cloned_set.intersection(other._cloned_set)
@util.dependencies("sqlalchemy.sql.util")
def replace_selectable(self, sqlutil, old, alias):
"""replace all occurrences of FromClause 'old' with the given Alias
object, returning a copy of this :class:`_expression.FromClause`.
"""
return sqlutil.ClauseAdapter(alias).traverse(self)
def correspond_on_equivalents(self, column, equivalents):
"""Return corresponding_column for the given column, or if None
search for a match in the given dictionary.
"""
col = self.corresponding_column(column, require_embedded=True)
if col is None and col in equivalents:
for equiv in equivalents[col]:
nc = self.corresponding_column(equiv, require_embedded=True)
if nc:
return nc
return col
def corresponding_column(self, column, require_embedded=False):
"""Given a :class:`_expression.ColumnElement`, return the exported
:class:`_expression.ColumnElement` object from this
:class:`expression.Selectable`
which corresponds to that original
:class:`~sqlalchemy.schema.Column` via a common ancestor
column.
:param column: the target :class:`_expression.ColumnElement`
to be matched
:param require_embedded: only return corresponding columns for
the given :class:`_expression.ColumnElement`, if the given
:class:`_expression.ColumnElement`
is actually present within a sub-element
of this :class:`_expression.FromClause`.
Normally the column will match if
it merely shares a common ancestor with one of the exported
columns of this :class:`_expression.FromClause`.
"""
def embedded(expanded_proxy_set, target_set):
for t in target_set.difference(expanded_proxy_set):
if not set(_expand_cloned([t])).intersection(
expanded_proxy_set
):
return False
return True
# don't dig around if the column is locally present
if self.c.contains_column(column):
return column
col, intersect = None, None
target_set = column.proxy_set
cols = self.c._all_columns
for c in cols:
expanded_proxy_set = set(_expand_cloned(c.proxy_set))
i = target_set.intersection(expanded_proxy_set)
if i and (
not require_embedded
or embedded(expanded_proxy_set, target_set)
):
if col is None:
# no corresponding column yet, pick this one.
col, intersect = c, i
elif len(i) > len(intersect):
# 'c' has a larger field of correspondence than
# 'col'. i.e. selectable.c.a1_x->a1.c.x->table.c.x
# matches a1.c.x->table.c.x better than
# selectable.c.x->table.c.x does.
col, intersect = c, i
elif i == intersect:
# they have the same field of correspondence. see
# which proxy_set has fewer columns in it, which
# indicates a closer relationship with the root
# column. Also take into account the "weight"
# attribute which CompoundSelect() uses to give
# higher precedence to columns based on vertical
# position in the compound statement, and discard
# columns that have no reference to the target
# column (also occurs with CompoundSelect)
col_distance = util.reduce(
operator.add,
[
sc._annotations.get("weight", 1)
for sc in col._uncached_proxy_set()
if sc.shares_lineage(column)
],
)
c_distance = util.reduce(
operator.add,
[
sc._annotations.get("weight", 1)
for sc in c._uncached_proxy_set()
if sc.shares_lineage(column)
],
)
if c_distance < col_distance:
col, intersect = c, i
return col
@property
def description(self):
"""a brief description of this FromClause.
Used primarily for error message formatting.
"""
return getattr(self, "name", self.__class__.__name__ + " object")
def _reset_exported(self):
"""delete memoized collections when a FromClause is cloned."""
self._memoized_property.expire_instance(self)
@_memoized_property
def columns(self):
"""A named-based collection of :class:`_expression.ColumnElement`
objects
maintained by this :class:`_expression.FromClause`.
The :attr:`.columns`, or :attr:`.c` collection, is the gateway
to the construction of SQL expressions using table-bound or
other selectable-bound columns::
select([mytable]).where(mytable.c.somecolumn == 5)
"""
if "_columns" not in self.__dict__:
self._init_collections()
self._populate_column_collection()
return self._columns.as_immutable()
@_memoized_property
def primary_key(self):
"""Return the collection of Column objects which comprise the
primary key of this FromClause."""
self._init_collections()
self._populate_column_collection()
return self.primary_key
@_memoized_property
def foreign_keys(self):
"""Return the collection of ForeignKey objects which this
FromClause references."""
self._init_collections()
self._populate_column_collection()
return self.foreign_keys
c = property(
attrgetter("columns"),
doc="An alias for the :attr:`.columns` attribute.",
)
_select_iterable = property(attrgetter("columns"))
def _init_collections(self):
assert "_columns" not in self.__dict__
assert "primary_key" not in self.__dict__
assert "foreign_keys" not in self.__dict__
self._columns = ColumnCollection()
self.primary_key = ColumnSet()
self.foreign_keys = set()
@property
def _cols_populated(self):
return "_columns" in self.__dict__
def _populate_column_collection(self):
"""Called on subclasses to establish the .c collection.
Each implementation has a different way of establishing
this collection.
"""
def _refresh_for_new_column(self, column):
"""Given a column added to the .c collection of an underlying
selectable, produce the local version of that column, assuming this
selectable ultimately should proxy this column.
this is used to "ping" a derived selectable to add a new column
to its .c. collection when a Column has been added to one of the
Table objects it ultimtely derives from.
If the given selectable hasn't populated its .c. collection yet,
it should at least pass on the message to the contained selectables,
but it will return None.
This method is currently used by Declarative to allow Table
columns to be added to a partially constructed inheritance
mapping that may have already produced joins. The method
isn't public right now, as the full span of implications
and/or caveats aren't yet clear.
It's also possible that this functionality could be invoked by
default via an event, which would require that
selectables maintain a weak referencing collection of all
derivations.
"""
if not self._cols_populated:
return None
elif column.key in self.columns and self.columns[column.key] is column:
return column
else:
return None
class Join(FromClause):
"""represent a ``JOIN`` construct between two
:class:`_expression.FromClause`
elements.
The public constructor function for :class:`_expression.Join`
is the module-level
:func:`_expression.join()` function, as well as the
:meth:`_expression.FromClause.join` method
of any :class:`_expression.FromClause` (e.g. such as
:class:`_schema.Table`).
.. seealso::
:func:`_expression.join`
:meth:`_expression.FromClause.join`
"""
__visit_name__ = "join"
_is_join = True
def __init__(self, left, right, onclause=None, isouter=False, full=False):
"""Construct a new :class:`_expression.Join`.
The usual entrypoint here is the :func:`_expression.join`
function or the :meth:`_expression.FromClause.join` method of any
:class:`_expression.FromClause` object.
"""
self.left = _interpret_as_from(left)
self.right = _interpret_as_from(right).self_group()
if onclause is None:
self.onclause = self._match_primaries(self.left, self.right)
else:
self.onclause = onclause
self.isouter = isouter
self.full = full
@classmethod
def _create_outerjoin(cls, left, right, onclause=None, full=False):
"""Return an ``OUTER JOIN`` clause element.
The returned object is an instance of :class:`_expression.Join`.
Similar functionality is also available via the
:meth:`_expression.FromClause.outerjoin()` method on any
:class:`_expression.FromClause`.
:param left: The left side of the join.
:param right: The right side of the join.
:param onclause: Optional criterion for the ``ON`` clause, is
derived from foreign key relationships established between
left and right otherwise.
To chain joins together, use the :meth:`_expression.FromClause.join`
or
:meth:`_expression.FromClause.outerjoin` methods on the resulting
:class:`_expression.Join` object.
"""
return cls(left, right, onclause, isouter=True, full=full)
@classmethod
def _create_join(
cls, left, right, onclause=None, isouter=False, full=False
):
"""Produce a :class:`_expression.Join` object, given two
:class:`_expression.FromClause`
expressions.
E.g.::
j = join(user_table, address_table,
user_table.c.id == address_table.c.user_id)
stmt = select([user_table]).select_from(j)
would emit SQL along the lines of::
SELECT user.id, user.name FROM user
JOIN address ON user.id = address.user_id
Similar functionality is available given any
:class:`_expression.FromClause` object (e.g. such as a
:class:`_schema.Table`) using
the :meth:`_expression.FromClause.join` method.
:param left: The left side of the join.
:param right: the right side of the join; this is any
:class:`_expression.FromClause` object such as a
:class:`_schema.Table` object, and
may also be a selectable-compatible object such as an ORM-mapped
class.
:param onclause: a SQL expression representing the ON clause of the
join. If left at ``None``, :meth:`_expression.FromClause.join`
will attempt to
join the two tables based on a foreign key relationship.
:param isouter: if True, render a LEFT OUTER JOIN, instead of JOIN.
:param full: if True, render a FULL OUTER JOIN, instead of JOIN.
.. versionadded:: 1.1
.. seealso::
:meth:`_expression.FromClause.join` - method form,
based on a given left side
:class:`_expression.Join` - the type of object produced
"""
return cls(left, right, onclause, isouter, full)
@property
def description(self):
return "Join object on %s(%d) and %s(%d)" % (
self.left.description,
id(self.left),
self.right.description,
id(self.right),
)
def is_derived_from(self, fromclause):
return (
fromclause is self
or self.left.is_derived_from(fromclause)
or self.right.is_derived_from(fromclause)
)
def self_group(self, against=None):
return FromGrouping(self)
@util.dependencies("sqlalchemy.sql.util")
def _populate_column_collection(self, sqlutil):
columns = [c for c in self.left.columns] + [
c for c in self.right.columns
]
self.primary_key.extend(
sqlutil.reduce_columns(
(c for c in columns if c.primary_key), self.onclause
)
)
self._columns.update((col._label, col) for col in columns)
self.foreign_keys.update(
itertools.chain(*[col.foreign_keys for col in columns])
)
def _refresh_for_new_column(self, column):
col = self.left._refresh_for_new_column(column)
if col is None:
col = self.right._refresh_for_new_column(column)
if col is not None:
if self._cols_populated:
self._columns[col._label] = col
self.foreign_keys.update(col.foreign_keys)
if col.primary_key:
self.primary_key.add(col)
return col
return None
def _copy_internals(self, clone=_clone, **kw):
self._reset_exported()
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
self.onclause = clone(self.onclause, **kw)
def get_children(self, **kwargs):
return self.left, self.right, self.onclause
def _match_primaries(self, left, right):
if isinstance(left, Join):
left_right = left.right
else:
left_right = None
return self._join_condition(left, right, a_subset=left_right)
@classmethod
@util.deprecated_params(
ignore_nonexistent_tables=(
"0.9",
"The :paramref:`.join_condition.ignore_nonexistent_tables` "
"parameter is deprecated and will be removed in a future "
"release. Tables outside of the two tables being handled "
"are no longer considered.",
)
)
def _join_condition(
cls,
a,
b,
ignore_nonexistent_tables=False,
a_subset=None,
consider_as_foreign_keys=None,
):
"""create a join condition between two tables or selectables.
e.g.::
join_condition(tablea, tableb)
would produce an expression along the lines of::
tablea.c.id==tableb.c.tablea_id
The join is determined based on the foreign key relationships
between the two selectables. If there are multiple ways
to join, or no way to join, an error is raised.
:param ignore_nonexistent_tables: unused - tables outside of the
two tables being handled are not considered.
:param a_subset: An optional expression that is a sub-component
of ``a``. An attempt will be made to join to just this sub-component
first before looking at the full ``a`` construct, and if found
will be successful even if there are other ways to join to ``a``.
This allows the "right side" of a join to be passed thereby
providing a "natural join".
"""
constraints = cls._joincond_scan_left_right(
a, a_subset, b, consider_as_foreign_keys
)
if len(constraints) > 1:
cls._joincond_trim_constraints(
a, b, constraints, consider_as_foreign_keys
)
if len(constraints) == 0:
if isinstance(b, FromGrouping):
hint = (
" Perhaps you meant to convert the right side to a "
"subquery using alias()?"
)
else:
hint = ""
raise exc.NoForeignKeysError(
"Can't find any foreign key relationships "
"between '%s' and '%s'.%s"
% (a.description, b.description, hint)
)
crit = [(x == y) for x, y in list(constraints.values())[0]]
if len(crit) == 1:
return crit[0]
else:
return and_(*crit)
@classmethod
def _can_join(cls, left, right, consider_as_foreign_keys=None):
if isinstance(left, Join):
left_right = left.right
else:
left_right = None
constraints = cls._joincond_scan_left_right(
a=left,
b=right,
a_subset=left_right,
consider_as_foreign_keys=consider_as_foreign_keys,
)
return bool(constraints)
@classmethod
def _joincond_scan_left_right(
cls, a, a_subset, b, consider_as_foreign_keys
):
constraints = collections.defaultdict(list)
for left in (a_subset, a):
if left is None:
continue
for fk in sorted(
b.foreign_keys, key=lambda fk: fk.parent._creation_order
):
if (
consider_as_foreign_keys is not None
and fk.parent not in consider_as_foreign_keys
):
continue
try:
col = fk.get_referent(left)
except exc.NoReferenceError as nrte:
if nrte.table_name == left.name:
raise
else:
continue
if col is not None:
constraints[fk.constraint].append((col, fk.parent))
if left is not b:
for fk in sorted(
left.foreign_keys, key=lambda fk: fk.parent._creation_order
):
if (
consider_as_foreign_keys is not None
and fk.parent not in consider_as_foreign_keys
):
continue
try:
col = fk.get_referent(b)
except exc.NoReferenceError as nrte:
if nrte.table_name == b.name:
raise
else:
continue
if col is not None:
constraints[fk.constraint].append((col, fk.parent))
if constraints:
break
return constraints
@classmethod
def _joincond_trim_constraints(
cls, a, b, constraints, consider_as_foreign_keys
):
# more than one constraint matched. narrow down the list
# to include just those FKCs that match exactly to
# "consider_as_foreign_keys".
if consider_as_foreign_keys:
for const in list(constraints):
if set(f.parent for f in const.elements) != set(
consider_as_foreign_keys
):
del constraints[const]
# if still multiple constraints, but
# they all refer to the exact same end result, use it.
if len(constraints) > 1:
dedupe = set(tuple(crit) for crit in constraints.values())
if len(dedupe) == 1:
key = list(constraints)[0]
constraints = {key: constraints[key]}
if len(constraints) != 1:
raise exc.AmbiguousForeignKeysError(
"Can't determine join between '%s' and '%s'; "
"tables have more than one foreign key "
"constraint relationship between them. "
"Please specify the 'onclause' of this "
"join explicitly." % (a.description, b.description)
)
def select(self, whereclause=None, **kwargs):
r"""Create a :class:`_expression.Select` from this
:class:`_expression.Join`.
The equivalent long-hand form, given a :class:`_expression.Join`
object
``j``, is::
from sqlalchemy import select
j = select([j.left, j.right], **kw).\
where(whereclause).\
select_from(j)
:param whereclause: the WHERE criterion that will be sent to
the :func:`select()` function
:param \**kwargs: all other kwargs are sent to the
underlying :func:`select()` function.
"""
collist = [self.left, self.right]
return Select(collist, whereclause, from_obj=[self], **kwargs)
@property
def bind(self):
return self.left.bind or self.right.bind
@util.dependencies("sqlalchemy.sql.util")
def alias(self, sqlutil, name=None, flat=False):
r"""return an alias of this :class:`_expression.Join`.
The default behavior here is to first produce a SELECT
construct from this :class:`_expression.Join`, then to produce an
:class:`_expression.Alias` from that. So given a join of the form::
j = table_a.join(table_b, table_a.c.id == table_b.c.a_id)
The JOIN by itself would look like::
table_a JOIN table_b ON table_a.id = table_b.a_id
Whereas the alias of the above, ``j.alias()``, would in a
SELECT context look like::
(SELECT table_a.id AS table_a_id, table_b.id AS table_b_id,
table_b.a_id AS table_b_a_id
FROM table_a
JOIN table_b ON table_a.id = table_b.a_id) AS anon_1
The equivalent long-hand form, given a :class:`_expression.Join`
object
``j``, is::
from sqlalchemy import select, alias
j = alias(
select([j.left, j.right]).\
select_from(j).\
with_labels(True).\
correlate(False),
name=name
)
The selectable produced by :meth:`_expression.Join.alias`
features the same
columns as that of the two individual selectables presented under
a single name - the individual columns are "auto-labeled", meaning
the ``.c.`` collection of the resulting :class:`_expression.Alias`
represents
the names of the individual columns using a
``<tablename>_<columname>`` scheme::
j.c.table_a_id
j.c.table_b_a_id
:meth:`_expression.Join.alias` also features an alternate
option for aliasing joins which produces no enclosing SELECT and
does not normally apply labels to the column names. The
``flat=True`` option will call :meth:`_expression.FromClause.alias`
against the left and right sides individually.
Using this option, no new ``SELECT`` is produced;
we instead, from a construct as below::
j = table_a.join(table_b, table_a.c.id == table_b.c.a_id)
j = j.alias(flat=True)
we get a result like this::
table_a AS table_a_1 JOIN table_b AS table_b_1 ON
table_a_1.id = table_b_1.a_id
The ``flat=True`` argument is also propagated to the contained
selectables, so that a composite join such as::
j = table_a.join(
table_b.join(table_c,
table_b.c.id == table_c.c.b_id),
table_b.c.a_id == table_a.c.id
).alias(flat=True)
Will produce an expression like::
table_a AS table_a_1 JOIN (
table_b AS table_b_1 JOIN table_c AS table_c_1
ON table_b_1.id = table_c_1.b_id
) ON table_a_1.id = table_b_1.a_id
The standalone :func:`_expression.alias` function as well as the
base :meth:`_expression.FromClause.alias`
method also support the ``flat=True``
argument as a no-op, so that the argument can be passed to the
``alias()`` method of any selectable.
.. versionadded:: 0.9.0 Added the ``flat=True`` option to create
"aliases" of joins without enclosing inside of a SELECT
subquery.
:param name: name given to the alias.
:param flat: if True, produce an alias of the left and right
sides of this :class:`_expression.Join` and return the join of those
two selectables. This produces join expression that does not
include an enclosing SELECT.
.. versionadded:: 0.9.0
.. seealso::
:ref:`core_tutorial_aliases`
:func:`_expression.alias`
"""
if flat:
assert name is None, "Can't send name argument with flat"
left_a, right_a = (
self.left.alias(flat=True),
self.right.alias(flat=True),
)
adapter = sqlutil.ClauseAdapter(left_a).chain(
sqlutil.ClauseAdapter(right_a)
)
return left_a.join(
right_a,
adapter.traverse(self.onclause),
isouter=self.isouter,
full=self.full,
)
else:
return self.select(use_labels=True, correlate=False).alias(name)
@property
def _hide_froms(self):
return itertools.chain(
*[_from_objects(x.left, x.right) for x in self._cloned_set]
)
@property
def _from_objects(self):
return (
[self]
+ self.onclause._from_objects
+ self.left._from_objects
+ self.right._from_objects
)
class Alias(FromClause):
"""Represents an table or selectable alias (AS).
Represents an alias, as typically applied to any table or
sub-select within a SQL statement using the ``AS`` keyword (or
without the keyword on certain databases such as Oracle).
This object is constructed from the :func:`_expression.alias` module
level function as well as the :meth:`_expression.FromClause.alias`
method available
on all :class:`_expression.FromClause` subclasses.
"""
__visit_name__ = "alias"
named_with_column = True
_is_from_container = True
def __init__(self, *arg, **kw):
raise NotImplementedError(
"The %s class is not intended to be constructed "
"directly. Please use the %s() standalone "
"function or the %s() method available from appropriate "
"selectable objects."
% (
self.__class__.__name__,
self.__class__.__name__.lower(),
self.__class__.__name__.lower(),
)
)
@classmethod
def _construct(cls, *arg, **kw):
obj = cls.__new__(cls)
obj._init(*arg, **kw)
return obj
@classmethod
def _factory(cls, selectable, name=None, flat=False):
"""Return an :class:`_expression.Alias` object.
An :class:`_expression.Alias` represents any
:class:`_expression.FromClause`
with an alternate name assigned within SQL, typically using the ``AS``
clause when generated, e.g. ``SELECT * FROM table AS aliasname``.
Similar functionality is available via the
:meth:`_expression.FromClause.alias` method
available on all :class:`_expression.FromClause` subclasses.
In terms of a
SELECT object as generated from the :func:`_expression.select`
function, the
:meth:`_expression.SelectBase.alias` method returns an
:class:`_expression.Alias` or
similar object which represents a named, parenthesized subquery.
When an :class:`_expression.Alias` is created from a
:class:`_schema.Table` object,
this has the effect of the table being rendered
as ``tablename AS aliasname`` in a SELECT statement.
For :func:`_expression.select` objects,
the effect is that of creating a named
subquery, i.e. ``(select ...) AS aliasname``.
The ``name`` parameter is optional, and provides the name
to use in the rendered SQL. If blank, an "anonymous" name
will be deterministically generated at compile time.
Deterministic means the name is guaranteed to be unique against
other constructs used in the same statement, and will also be the
same name for each successive compilation of the same statement
object.
:param selectable: any :class:`_expression.FromClause` subclass,
such as a table, select statement, etc.
:param name: string name to be assigned as the alias.
If ``None``, a name will be deterministically generated
at compile time.
:param flat: Will be passed through to if the given selectable
is an instance of :class:`_expression.Join` - see
:meth:`_expression.Join.alias`
for details.
.. versionadded:: 0.9.0
"""
return _interpret_as_from(selectable).alias(name=name, flat=flat)
def _init(self, selectable, name=None):
baseselectable = selectable
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
self.supports_execution = baseselectable.supports_execution
if self.supports_execution:
self._execution_options = baseselectable._execution_options
self.element = selectable
if name is None:
if self.original.named_with_column:
name = getattr(self.original, "name", None)
name = _anonymous_label("%%(%d %s)s" % (id(self), name or "anon"))
self.name = name
def self_group(self, against=None):
if (
isinstance(against, CompoundSelect)
and isinstance(self.original, Select)
and self.original._needs_parens_for_grouping()
):
return FromGrouping(self)
return super(Alias, self).self_group(against=against)
@property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode("ascii", "backslashreplace")
def as_scalar(self):
try:
return self.element.as_scalar()
except AttributeError as err:
util.raise_(
AttributeError(
"Element %s does not support "
"'as_scalar()'" % self.element
),
replace_context=err,
)
def is_derived_from(self, fromclause):
if fromclause in self._cloned_set:
return True
return self.element.is_derived_from(fromclause)
def _populate_column_collection(self):
for col in self.element.columns._all_columns:
col._make_proxy(self)
def _refresh_for_new_column(self, column):
col = self.element._refresh_for_new_column(column)
if col is not None:
if not self._cols_populated:
return None
else:
return col._make_proxy(self)
else:
return None
def _copy_internals(self, clone=_clone, **kw):
# don't apply anything to an aliased Table
# for now. May want to drive this from
# the given **kw.
if isinstance(self.element, TableClause):
return
self._reset_exported()
self.element = clone(self.element, **kw)
baseselectable = self.element
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
def get_children(self, column_collections=True, **kw):
if column_collections:
for c in self.c:
yield c
yield self.element
@property
def _from_objects(self):
return [self]
@property
def bind(self):
return self.element.bind
class Lateral(Alias):
"""Represent a LATERAL subquery.
This object is constructed from the :func:`_expression.lateral` module
level function as well as the :meth:`_expression.FromClause.lateral`
method available
on all :class:`_expression.FromClause` subclasses.
While LATERAL is part of the SQL standard, currently only more recent
PostgreSQL versions provide support for this keyword.
.. versionadded:: 1.1
.. seealso::
:ref:`lateral_selects` - overview of usage.
"""
__visit_name__ = "lateral"
_is_lateral = True
@classmethod
def _factory(cls, selectable, name=None):
"""Return a :class:`_expression.Lateral` object.
:class:`_expression.Lateral` is an :class:`_expression.Alias`
subclass that represents
a subquery with the LATERAL keyword applied to it.
The special behavior of a LATERAL subquery is that it appears in the
FROM clause of an enclosing SELECT, but may correlate to other
FROM clauses of that SELECT. It is a special case of subquery
only supported by a small number of backends, currently more recent
PostgreSQL versions.
.. versionadded:: 1.1
.. seealso::
:ref:`lateral_selects` - overview of usage.
"""
return _interpret_as_from(selectable).lateral(name=name)
class TableSample(Alias):
"""Represent a TABLESAMPLE clause.
This object is constructed from the :func:`_expression.tablesample` module
level function as well as the :meth:`_expression.FromClause.tablesample`
method
available on all :class:`_expression.FromClause` subclasses.
.. versionadded:: 1.1
.. seealso::
:func:`_expression.tablesample`
"""
__visit_name__ = "tablesample"
@classmethod
def _factory(cls, selectable, sampling, name=None, seed=None):
"""Return a :class:`_expression.TableSample` object.
:class:`_expression.TableSample` is an :class:`_expression.Alias`
subclass that represents
a table with the TABLESAMPLE clause applied to it.
:func:`_expression.tablesample`
is also available from the :class:`_expression.FromClause`
class via the
:meth:`_expression.FromClause.tablesample` method.
The TABLESAMPLE clause allows selecting a randomly selected approximate
percentage of rows from a table. It supports multiple sampling methods,
most commonly BERNOULLI and SYSTEM.
e.g.::
from sqlalchemy import func
selectable = people.tablesample(
func.bernoulli(1),
name='alias',
seed=func.random())
stmt = select([selectable.c.people_id])
Assuming ``people`` with a column ``people_id``, the above
statement would render as::
SELECT alias.people_id FROM
people AS alias TABLESAMPLE bernoulli(:bernoulli_1)
REPEATABLE (random())
.. versionadded:: 1.1
:param sampling: a ``float`` percentage between 0 and 100 or
:class:`_functions.Function`.
:param name: optional alias name
:param seed: any real-valued SQL expression. When specified, the
REPEATABLE sub-clause is also rendered.
"""
return _interpret_as_from(selectable).tablesample(
sampling, name=name, seed=seed
)
def _init(self, selectable, sampling, name=None, seed=None):
self.sampling = sampling
self.seed = seed
super(TableSample, self)._init(selectable, name=name)
@util.dependencies("sqlalchemy.sql.functions")
def _get_method(self, functions):
if isinstance(self.sampling, functions.Function):
return self.sampling
else:
return functions.func.system(self.sampling)
class CTE(Generative, HasPrefixes, HasSuffixes, Alias):
"""Represent a Common Table Expression.
The :class:`_expression.CTE` object is obtained using the
:meth:`_expression.SelectBase.cte` method from any selectable.
See that method for complete examples.
"""
__visit_name__ = "cte"
@classmethod
def _factory(cls, selectable, name=None, recursive=False):
r"""Return a new :class:`_expression.CTE`,
or Common Table Expression instance.
Please see :meth:`_expression.HasCTE.cte` for detail on CTE usage.
"""
return _interpret_as_from(selectable).cte(
name=name, recursive=recursive
)
def _init(
self,
selectable,
name=None,
recursive=False,
_cte_alias=None,
_restates=frozenset(),
_prefixes=None,
_suffixes=None,
):
self.recursive = recursive
self._cte_alias = _cte_alias
self._restates = _restates
if _prefixes:
self._prefixes = _prefixes
if _suffixes:
self._suffixes = _suffixes
super(CTE, self)._init(selectable, name=name)
def _copy_internals(self, clone=_clone, **kw):
super(CTE, self)._copy_internals(clone, **kw)
if self._cte_alias is not None:
self._cte_alias = clone(self._cte_alias, **kw)
self._restates = frozenset(
[clone(elem, **kw) for elem in self._restates]
)
@util.dependencies("sqlalchemy.sql.dml")
def _populate_column_collection(self, dml):
if isinstance(self.element, dml.UpdateBase):
for col in self.element._returning:
col._make_proxy(self)
else:
for col in self.element.columns._all_columns:
col._make_proxy(self)
def alias(self, name=None, flat=False):
"""Return an :class:`_expression.Alias` of this
:class:`_expression.CTE`.
This method is a CTE-specific specialization of the
:class:`_expression.FromClause.alias` method.
.. seealso::
:ref:`core_tutorial_aliases`
:func:`_expression.alias`
"""
return CTE._construct(
self.original,
name=name,
recursive=self.recursive,
_cte_alias=self,
_prefixes=self._prefixes,
_suffixes=self._suffixes,
)
def union(self, other):
return CTE._construct(
self.original.union(other),
name=self.name,
recursive=self.recursive,
_restates=self._restates.union([self]),
_prefixes=self._prefixes,
_suffixes=self._suffixes,
)
def union_all(self, other):
return CTE._construct(
self.original.union_all(other),
name=self.name,
recursive=self.recursive,
_restates=self._restates.union([self]),
_prefixes=self._prefixes,
_suffixes=self._suffixes,
)
class HasCTE(object):
"""Mixin that declares a class to include CTE support.
.. versionadded:: 1.1
"""
def cte(self, name=None, recursive=False):
r"""Return a new :class:`_expression.CTE`,
or Common Table Expression instance.
Common table expressions are a SQL standard whereby SELECT
statements can draw upon secondary statements specified along
with the primary statement, using a clause called "WITH".
Special semantics regarding UNION can also be employed to
allow "recursive" queries, where a SELECT statement can draw
upon the set of rows that have previously been selected.
CTEs can also be applied to DML constructs UPDATE, INSERT
and DELETE on some databases, both as a source of CTE rows
when combined with RETURNING, as well as a consumer of
CTE rows.
.. versionchanged:: 1.1 Added support for UPDATE/INSERT/DELETE as
CTE, CTEs added to UPDATE/INSERT/DELETE.
SQLAlchemy detects :class:`_expression.CTE` objects, which are treated
similarly to :class:`_expression.Alias` objects, as special elements
to be delivered to the FROM clause of the statement as well
as to a WITH clause at the top of the statement.
For special prefixes such as PostgreSQL "MATERIALIZED" and
"NOT MATERIALIZED", the :meth:`_expression.CTE.prefix_with`
method may be
used to establish these.
.. versionchanged:: 1.3.13 Added support for prefixes.
In particular - MATERIALIZED and NOT MATERIALIZED.
:param name: name given to the common table expression. Like
:meth:`._FromClause.alias`, the name can be left as ``None``
in which case an anonymous symbol will be used at query
compile time.
:param recursive: if ``True``, will render ``WITH RECURSIVE``.
A recursive common table expression is intended to be used in
conjunction with UNION ALL in order to derive rows
from those already selected.
The following examples include two from PostgreSQL's documentation at
http://www.postgresql.org/docs/current/static/queries-with.html,
as well as additional examples.
Example 1, non recursive::
from sqlalchemy import (Table, Column, String, Integer,
MetaData, select, func)
metadata = MetaData()
orders = Table('orders', metadata,
Column('region', String),
Column('amount', Integer),
Column('product', String),
Column('quantity', Integer)
)
regional_sales = select([
orders.c.region,
func.sum(orders.c.amount).label('total_sales')
]).group_by(orders.c.region).cte("regional_sales")
top_regions = select([regional_sales.c.region]).\
where(
regional_sales.c.total_sales >
select([
func.sum(regional_sales.c.total_sales)/10
])
).cte("top_regions")
statement = select([
orders.c.region,
orders.c.product,
func.sum(orders.c.quantity).label("product_units"),
func.sum(orders.c.amount).label("product_sales")
]).where(orders.c.region.in_(
select([top_regions.c.region])
)).group_by(orders.c.region, orders.c.product)
result = conn.execute(statement).fetchall()
Example 2, WITH RECURSIVE::
from sqlalchemy import (Table, Column, String, Integer,
MetaData, select, func)
metadata = MetaData()
parts = Table('parts', metadata,
Column('part', String),
Column('sub_part', String),
Column('quantity', Integer),
)
included_parts = select([
parts.c.sub_part,
parts.c.part,
parts.c.quantity]).\
where(parts.c.part=='our part').\
cte(recursive=True)
incl_alias = included_parts.alias()
parts_alias = parts.alias()
included_parts = included_parts.union_all(
select([
parts_alias.c.sub_part,
parts_alias.c.part,
parts_alias.c.quantity
]).
where(parts_alias.c.part==incl_alias.c.sub_part)
)
statement = select([
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
]).\
group_by(included_parts.c.sub_part)
result = conn.execute(statement).fetchall()
Example 3, an upsert using UPDATE and INSERT with CTEs::
from datetime import date
from sqlalchemy import (MetaData, Table, Column, Integer,
Date, select, literal, and_, exists)
metadata = MetaData()
visitors = Table('visitors', metadata,
Column('product_id', Integer, primary_key=True),
Column('date', Date, primary_key=True),
Column('count', Integer),
)
# add 5 visitors for the product_id == 1
product_id = 1
day = date.today()
count = 5
update_cte = (
visitors.update()
.where(and_(visitors.c.product_id == product_id,
visitors.c.date == day))
.values(count=visitors.c.count + count)
.returning(literal(1))
.cte('update_cte')
)
upsert = visitors.insert().from_select(
[visitors.c.product_id, visitors.c.date, visitors.c.count],
select([literal(product_id), literal(day), literal(count)])
.where(~exists(update_cte.select()))
)
connection.execute(upsert)
.. seealso::
:meth:`.orm.query.Query.cte` - ORM version of
:meth:`_expression.HasCTE.cte`.
"""
return CTE._construct(self, name=name, recursive=recursive)
class FromGrouping(FromClause):
"""Represent a grouping of a FROM clause"""
__visit_name__ = "grouping"
def __init__(self, element):
self.element = element
def _init_collections(self):
pass
@property
def columns(self):
return self.element.columns
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def is_derived_from(self, element):
return self.element.is_derived_from(element)
def alias(self, **kw):
return FromGrouping(self.element.alias(**kw))
@property
def _hide_froms(self):
return self.element._hide_froms
def get_children(self, **kwargs):
return (self.element,)
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {"element": self.element}
def __setstate__(self, state):
self.element = state["element"]
class TableClause(Immutable, FromClause):
"""Represents a minimal "table" construct.
This is a lightweight table object that has only a name, a
collection of columns, which are typically produced
by the :func:`_expression.column` function, and a schema::
from sqlalchemy import table, column
user = table("user",
column("id"),
column("name"),
column("description"),
)
The :class:`_expression.TableClause` construct serves as the base for
the more commonly used :class:`_schema.Table` object, providing
the usual set of :class:`_expression.FromClause` services including
the ``.c.`` collection and statement generation methods.
It does **not** provide all the additional schema-level services
of :class:`_schema.Table`, including constraints, references to other
tables, or support for :class:`_schema.MetaData`-level services.
It's useful
on its own as an ad-hoc construct used to generate quick SQL
statements when a more fully fledged :class:`_schema.Table`
is not on hand.
"""
__visit_name__ = "table"
named_with_column = True
implicit_returning = False
""":class:`_expression.TableClause`
doesn't support having a primary key or column
-level defaults, so implicit returning doesn't apply."""
_autoincrement_column = None
"""No PK or default support so no autoincrement column."""
def __init__(self, name, *columns, **kw):
"""Produce a new :class:`_expression.TableClause`.
The object returned is an instance of :class:`_expression.TableClause`
, which
represents the "syntactical" portion of the schema-level
:class:`_schema.Table` object.
It may be used to construct lightweight table constructs.
.. versionchanged:: 1.0.0 :func:`_expression.table` can now
be imported from the plain ``sqlalchemy`` namespace like any
other SQL element.
:param name: Name of the table.
:param columns: A collection of :func:`_expression.column` constructs.
:param schema: The schema name for this table.
.. versionadded:: 1.3.18 :func:`_expression.table` can now
accept a ``schema`` argument.
"""
super(TableClause, self).__init__()
self.name = self.fullname = name
self._columns = ColumnCollection()
self.primary_key = ColumnSet()
self.foreign_keys = set()
for c in columns:
self.append_column(c)
schema = kw.pop("schema", None)
if schema is not None:
self.schema = schema
if kw:
raise exc.ArgumentError("Unsupported argument(s): %s" % list(kw))
def _init_collections(self):
pass
@util.memoized_property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode("ascii", "backslashreplace")
def append_column(self, c):
self._columns[c.key] = c
c.table = self
def get_children(self, column_collections=True, **kwargs):
if column_collections:
return [c for c in self.c]
else:
return []
@util.dependencies("sqlalchemy.sql.dml")
def insert(self, dml, values=None, inline=False, **kwargs):
"""Generate an :func:`_expression.insert` construct against this
:class:`_expression.TableClause`.
E.g.::
table.insert().values(name='foo')
See :func:`_expression.insert` for argument and usage information.
"""
return dml.Insert(self, values=values, inline=inline, **kwargs)
@util.dependencies("sqlalchemy.sql.dml")
def update(
self, dml, whereclause=None, values=None, inline=False, **kwargs
):
"""Generate an :func:`_expression.update` construct against this
:class:`_expression.TableClause`.
E.g.::
table.update().where(table.c.id==7).values(name='foo')
See :func:`_expression.update` for argument and usage information.
"""
return dml.Update(
self,
whereclause=whereclause,
values=values,
inline=inline,
**kwargs
)
@util.dependencies("sqlalchemy.sql.dml")
def delete(self, dml, whereclause=None, **kwargs):
"""Generate a :func:`_expression.delete` construct against this
:class:`_expression.TableClause`.
E.g.::
table.delete().where(table.c.id==7)
See :func:`_expression.delete` for argument and usage information.
"""
return dml.Delete(self, whereclause, **kwargs)
@property
def _from_objects(self):
return [self]
class ForUpdateArg(ClauseElement):
@classmethod
def parse_legacy_select(self, arg):
"""Parse the for_update argument of :func:`_expression.select`.
:param mode: Defines the lockmode to use.
``None`` - translates to no lockmode
``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
``'nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
``'read_nowait'`` - translates to ``FOR SHARE NOWAIT``
(supported by PostgreSQL). ``FOR SHARE`` and
``FOR SHARE NOWAIT`` (PostgreSQL).
"""
if arg in (None, False):
return None
nowait = read = False
if arg == "nowait":
nowait = True
elif arg == "read":
read = True
elif arg == "read_nowait":
read = nowait = True
elif arg is not True:
raise exc.ArgumentError("Unknown for_update argument: %r" % arg)
return ForUpdateArg(read=read, nowait=nowait)
@property
def legacy_for_update_value(self):
if self.read and not self.nowait:
return "read"
elif self.read and self.nowait:
return "read_nowait"
elif self.nowait:
return "nowait"
else:
return True
def __eq__(self, other):
return (
isinstance(other, ForUpdateArg)
and other.nowait == self.nowait
and other.read == self.read
and other.skip_locked == self.skip_locked
and other.key_share == self.key_share
and other.of is self.of
)
def __hash__(self):
return id(self)
def _copy_internals(self, clone=_clone, **kw):
if self.of is not None:
self.of = [clone(col, **kw) for col in self.of]
def __init__(
self,
nowait=False,
read=False,
of=None,
skip_locked=False,
key_share=False,
):
"""Represents arguments specified to
:meth:`_expression.Select.for_update`.
.. versionadded:: 0.9.0
"""
self.nowait = nowait
self.read = read
self.skip_locked = skip_locked
self.key_share = key_share
if of is not None:
self.of = [
_interpret_as_column_or_from(elem) for elem in util.to_list(of)
]
else:
self.of = None
class SelectBase(HasCTE, Executable, FromClause):
"""Base class for SELECT statements.
This includes :class:`_expression.Select`,
:class:`_selectable.CompoundSelect` and
:class:`.TextAsFrom`.
"""
def as_scalar(self):
"""return a 'scalar' representation of this selectable, which can be
used as a column expression.
Typically, a select statement which has only one column in its columns
clause is eligible to be used as a scalar expression.
The returned object is an instance of
:class:`ScalarSelect`.
"""
return ScalarSelect(self)
def label(self, name):
"""return a 'scalar' representation of this selectable, embedded as a
subquery with a label.
.. seealso::
:meth:`_expression.SelectBase.as_scalar`.
"""
return self.as_scalar().label(name)
@_generative
@util.deprecated(
"0.6",
message="The :meth:`.SelectBase.autocommit` method is deprecated, "
"and will be removed in a future release. Please use the "
"the :paramref:`.Connection.execution_options.autocommit` "
"parameter in conjunction with the "
":meth:`.Executable.execution_options` method.",
)
def autocommit(self):
"""return a new selectable with the 'autocommit' flag set to
True.
"""
self._execution_options = self._execution_options.union(
{"autocommit": True}
)
def _generate(self):
"""Override the default _generate() method to also clear out
exported collections."""
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
s._reset_exported()
return s
@property
def _from_objects(self):
return [self]
class GenerativeSelect(SelectBase):
"""Base class for SELECT statements where additional elements can be
added.
This serves as the base for :class:`_expression.Select` and
:class:`_selectable.CompoundSelect`
where elements such as ORDER BY, GROUP BY can be added and column
rendering can be controlled. Compare to :class:`.TextAsFrom`, which,
while it subclasses :class:`_expression.SelectBase`
and is also a SELECT construct,
represents a fixed textual string which cannot be altered at this level,
only wrapped as a subquery.
.. versionadded:: 0.9.0 :class:`_expression.GenerativeSelect` was added to
provide functionality specific to :class:`_expression.Select` and
:class:`_selectable.CompoundSelect` while allowing
:class:`_expression.SelectBase` to be
used for other SELECT-like objects, e.g. :class:`.TextAsFrom`.
"""
_order_by_clause = ClauseList()
_group_by_clause = ClauseList()
_limit_clause = None
_offset_clause = None
_for_update_arg = None
def __init__(
self,
use_labels=False,
for_update=False,
limit=None,
offset=None,
order_by=None,
group_by=None,
bind=None,
autocommit=None,
):
self.use_labels = use_labels
if for_update is not False:
self._for_update_arg = ForUpdateArg.parse_legacy_select(for_update)
if autocommit is not None:
util.warn_deprecated(
"The select.autocommit parameter is deprecated and will be "
"removed in a future release. Please refer to the "
"Select.execution_options.autocommit` parameter."
)
self._execution_options = self._execution_options.union(
{"autocommit": autocommit}
)
if limit is not None:
self._limit_clause = _offset_or_limit_clause(limit)
if offset is not None:
self._offset_clause = _offset_or_limit_clause(offset)
self._bind = bind
if order_by is not None:
self._order_by_clause = ClauseList(
*util.to_list(order_by),
_literal_as_text=_literal_and_labels_as_label_reference
)
if group_by is not None:
self._group_by_clause = ClauseList(
*util.to_list(group_by),
_literal_as_text=_literal_as_label_reference
)
@property
def for_update(self):
"""Provide legacy dialect support for the ``for_update`` attribute.
"""
if self._for_update_arg is not None:
return self._for_update_arg.legacy_for_update_value
else:
return None
@for_update.setter
def for_update(self, value):
self._for_update_arg = ForUpdateArg.parse_legacy_select(value)
@_generative
def with_for_update(
self,
nowait=False,
read=False,
of=None,
skip_locked=False,
key_share=False,
):
"""Specify a ``FOR UPDATE`` clause for this
:class:`_expression.GenerativeSelect`.
E.g.::
stmt = select([table]).with_for_update(nowait=True)
On a database like PostgreSQL or Oracle, the above would render a
statement like::
SELECT table.a, table.b FROM table FOR UPDATE NOWAIT
on other backends, the ``nowait`` option is ignored and instead
would produce::
SELECT table.a, table.b FROM table FOR UPDATE
When called with no arguments, the statement will render with
the suffix ``FOR UPDATE``. Additional arguments can then be
provided which allow for common database-specific
variants.
:param nowait: boolean; will render ``FOR UPDATE NOWAIT`` on Oracle
and PostgreSQL dialects.
:param read: boolean; will render ``LOCK IN SHARE MODE`` on MySQL,
``FOR SHARE`` on PostgreSQL. On PostgreSQL, when combined with
``nowait``, will render ``FOR SHARE NOWAIT``.
:param of: SQL expression or list of SQL expression elements
(typically :class:`_schema.Column`
objects or a compatible expression) which
will render into a ``FOR UPDATE OF`` clause; supported by PostgreSQL
and Oracle. May render as a table or as a column depending on
backend.
:param skip_locked: boolean, will render ``FOR UPDATE SKIP LOCKED``
on Oracle and PostgreSQL dialects or ``FOR SHARE SKIP LOCKED`` if
``read=True`` is also specified.
.. versionadded:: 1.1.0
:param key_share: boolean, will render ``FOR NO KEY UPDATE``,
or if combined with ``read=True`` will render ``FOR KEY SHARE``,
on the PostgreSQL dialect.
.. versionadded:: 1.1.0
"""
self._for_update_arg = ForUpdateArg(
nowait=nowait,
read=read,
of=of,
skip_locked=skip_locked,
key_share=key_share,
)
@_generative
def apply_labels(self):
"""return a new selectable with the 'use_labels' flag set to True.
This will result in column expressions being generated using labels
against their table name, such as "SELECT somecolumn AS
tablename_somecolumn". This allows selectables which contain multiple
FROM clauses to produce a unique set of column names regardless of
name conflicts among the individual FROM clauses.
"""
self.use_labels = True
@property
def _limit(self):
"""Get an integer value for the limit. This should only be used
by code that cannot support a limit as a BindParameter or
other custom clause as it will throw an exception if the limit
isn't currently set to an integer.
"""
return _offset_or_limit_clause_asint(self._limit_clause, "limit")
@property
def _simple_int_limit(self):
"""True if the LIMIT clause is a simple integer, False
if it is not present or is a SQL expression.
"""
return isinstance(self._limit_clause, _OffsetLimitParam)
@property
def _simple_int_offset(self):
"""True if the OFFSET clause is a simple integer, False
if it is not present or is a SQL expression.
"""
return isinstance(self._offset_clause, _OffsetLimitParam)
@property
def _offset(self):
"""Get an integer value for the offset. This should only be used
by code that cannot support an offset as a BindParameter or
other custom clause as it will throw an exception if the
offset isn't currently set to an integer.
"""
return _offset_or_limit_clause_asint(self._offset_clause, "offset")
@_generative
def limit(self, limit):
"""return a new selectable with the given LIMIT criterion
applied.
This is a numerical value which usually renders as a ``LIMIT``
expression in the resulting select. Backends that don't
support ``LIMIT`` will attempt to provide similar
functionality.
.. versionchanged:: 1.0.0 - :meth:`_expression.Select.limit` can now
accept arbitrary SQL expressions as well as integer values.
:param limit: an integer LIMIT parameter, or a SQL expression
that provides an integer result.
"""
self._limit_clause = _offset_or_limit_clause(limit)
@_generative
def offset(self, offset):
"""return a new selectable with the given OFFSET criterion
applied.
This is a numeric value which usually renders as an ``OFFSET``
expression in the resulting select. Backends that don't
support ``OFFSET`` will attempt to provide similar
functionality.
.. versionchanged:: 1.0.0 - :meth:`_expression.Select.offset` can now
accept arbitrary SQL expressions as well as integer values.
:param offset: an integer OFFSET parameter, or a SQL expression
that provides an integer result.
"""
self._offset_clause = _offset_or_limit_clause(offset)
@_generative
def order_by(self, *clauses):
r"""return a new selectable with the given list of ORDER BY
criterion applied.
e.g.::
stmt = select([table]).order_by(table.c.id, table.c.name)
:param \*order_by: a series of :class:`_expression.ColumnElement`
constructs
which will be used to generate an ORDER BY clause.
.. seealso::
:ref:`core_tutorial_ordering`
"""
self.append_order_by(*clauses)
@_generative
def group_by(self, *clauses):
r"""return a new selectable with the given list of GROUP BY
criterion applied.
e.g.::
stmt = select([table.c.name, func.max(table.c.stat)]).\
group_by(table.c.name)
:param \*group_by: a series of :class:`_expression.ColumnElement`
constructs
which will be used to generate an GROUP BY clause.
.. seealso::
:ref:`core_tutorial_ordering`
"""
self.append_group_by(*clauses)
def append_order_by(self, *clauses):
"""Append the given ORDER BY criterion applied to this selectable.
The criterion will be appended to any pre-existing ORDER BY criterion.
This is an **in-place** mutation method; the
:meth:`_expression.GenerativeSelect.order_by` method is preferred,
as it
provides standard :term:`method chaining`.
.. seealso::
:meth:`_expression.GenerativeSelect.order_by`
"""
if len(clauses) == 1 and clauses[0] is None:
self._order_by_clause = ClauseList()
else:
if getattr(self, "_order_by_clause", None) is not None:
clauses = list(self._order_by_clause) + list(clauses)
self._order_by_clause = ClauseList(
*clauses,
_literal_as_text=_literal_and_labels_as_label_reference
)
def append_group_by(self, *clauses):
"""Append the given GROUP BY criterion applied to this selectable.
The criterion will be appended to any pre-existing GROUP BY criterion.
This is an **in-place** mutation method; the
:meth:`_expression.GenerativeSelect.group_by` method is preferred,
as it
provides standard :term:`method chaining`.
.. seealso::
:meth:`_expression.GenerativeSelect.group_by`
"""
if len(clauses) == 1 and clauses[0] is None:
self._group_by_clause = ClauseList()
else:
if getattr(self, "_group_by_clause", None) is not None:
clauses = list(self._group_by_clause) + list(clauses)
self._group_by_clause = ClauseList(
*clauses, _literal_as_text=_literal_as_label_reference
)
@property
def _label_resolve_dict(self):
raise NotImplementedError()
def _copy_internals(self, clone=_clone, **kw):
if self._limit_clause is not None:
self._limit_clause = clone(self._limit_clause, **kw)
if self._offset_clause is not None:
self._offset_clause = clone(self._offset_clause, **kw)
class CompoundSelect(GenerativeSelect):
"""Forms the basis of ``UNION``, ``UNION ALL``, and other
SELECT-based set operations.
.. seealso::
:func:`_expression.union`
:func:`_expression.union_all`
:func:`_expression.intersect`
:func:`_expression.intersect_all`
:func:`_expression.except`
:func:`_expression.except_all`
"""
__visit_name__ = "compound_select"
UNION = util.symbol("UNION")
UNION_ALL = util.symbol("UNION ALL")
EXCEPT = util.symbol("EXCEPT")
EXCEPT_ALL = util.symbol("EXCEPT ALL")
INTERSECT = util.symbol("INTERSECT")
INTERSECT_ALL = util.symbol("INTERSECT ALL")
_is_from_container = True
def __init__(self, keyword, *selects, **kwargs):
self._auto_correlate = kwargs.pop("correlate", False)
self.keyword = keyword
self.selects = []
numcols = None
# some DBs do not like ORDER BY in the inner queries of a UNION, etc.
for n, s in enumerate(selects):
s = _clause_element_as_expr(s)
if not numcols:
numcols = len(s.c._all_columns)
elif len(s.c._all_columns) != numcols:
raise exc.ArgumentError(
"All selectables passed to "
"CompoundSelect must have identical numbers of "
"columns; select #%d has %d columns, select "
"#%d has %d"
% (
1,
len(self.selects[0].c._all_columns),
n + 1,
len(s.c._all_columns),
)
)
self.selects.append(s.self_group(against=self))
GenerativeSelect.__init__(self, **kwargs)
@property
def _label_resolve_dict(self):
d = dict((c.key, c) for c in self.c)
return d, d, d
@classmethod
def _create_union(cls, *selects, **kwargs):
r"""Return a ``UNION`` of multiple selectables.
The returned object is an instance of
:class:`_selectable.CompoundSelect`.
A similar :func:`union()` method is available on all
:class:`_expression.FromClause` subclasses.
\*selects
a list of :class:`_expression.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.UNION, *selects, **kwargs)
@classmethod
def _create_union_all(cls, *selects, **kwargs):
r"""Return a ``UNION ALL`` of multiple selectables.
The returned object is an instance of
:class:`_selectable.CompoundSelect`.
A similar :func:`union_all()` method is available on all
:class:`_expression.FromClause` subclasses.
\*selects
a list of :class:`_expression.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.UNION_ALL, *selects, **kwargs)
@classmethod
def _create_except(cls, *selects, **kwargs):
r"""Return an ``EXCEPT`` of multiple selectables.
The returned object is an instance of
:class:`_selectable.CompoundSelect`.
\*selects
a list of :class:`_expression.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.EXCEPT, *selects, **kwargs)
@classmethod
def _create_except_all(cls, *selects, **kwargs):
r"""Return an ``EXCEPT ALL`` of multiple selectables.
The returned object is an instance of
:class:`_selectable.CompoundSelect`.
\*selects
a list of :class:`_expression.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.EXCEPT_ALL, *selects, **kwargs)
@classmethod
def _create_intersect(cls, *selects, **kwargs):
r"""Return an ``INTERSECT`` of multiple selectables.
The returned object is an instance of
:class:`_selectable.CompoundSelect`.
\*selects
a list of :class:`_expression.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.INTERSECT, *selects, **kwargs)
@classmethod
def _create_intersect_all(cls, *selects, **kwargs):
r"""Return an ``INTERSECT ALL`` of multiple selectables.
The returned object is an instance of
:class:`_selectable.CompoundSelect`.
\*selects
a list of :class:`_expression.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.INTERSECT_ALL, *selects, **kwargs)
def _scalar_type(self):
return self.selects[0]._scalar_type()
def self_group(self, against=None):
return FromGrouping(self)
def is_derived_from(self, fromclause):
for s in self.selects:
if s.is_derived_from(fromclause):
return True
return False
def _populate_column_collection(self):
for cols in zip(*[s.c._all_columns for s in self.selects]):
# this is a slightly hacky thing - the union exports a
# column that resembles just that of the *first* selectable.
# to get at a "composite" column, particularly foreign keys,
# you have to dig through the proxies collection which we
# generate below. We may want to improve upon this, such as
# perhaps _make_proxy can accept a list of other columns
# that are "shared" - schema.column can then copy all the
# ForeignKeys in. this would allow the union() to have all
# those fks too.
proxy = cols[0]._make_proxy(
self,
name=cols[0]._label if self.use_labels else None,
key=cols[0]._key_label if self.use_labels else None,
)
# hand-construct the "_proxies" collection to include all
# derived columns place a 'weight' annotation corresponding
# to how low in the list of select()s the column occurs, so
# that the corresponding_column() operation can resolve
# conflicts
proxy._proxies = [
c._annotate({"weight": i + 1}) for (i, c) in enumerate(cols)
]
def _refresh_for_new_column(self, column):
for s in self.selects:
s._refresh_for_new_column(column)
if not self._cols_populated:
return None
raise NotImplementedError(
"CompoundSelect constructs don't support "
"addition of columns to underlying "
"selectables"
)
def _copy_internals(self, clone=_clone, **kw):
super(CompoundSelect, self)._copy_internals(clone, **kw)
self._reset_exported()
self.selects = [clone(s, **kw) for s in self.selects]
if hasattr(self, "_col_map"):
del self._col_map
for attr in (
"_order_by_clause",
"_group_by_clause",
"_for_update_arg",
):
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr), **kw))
def get_children(self, column_collections=True, **kwargs):
return (
(column_collections and list(self.c) or [])
+ [self._order_by_clause, self._group_by_clause]
+ list(self.selects)
)
def bind(self):
if self._bind:
return self._bind
for s in self.selects:
e = s.bind
if e:
return e
else:
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class Select(HasPrefixes, HasSuffixes, GenerativeSelect):
"""Represents a ``SELECT`` statement.
"""
__visit_name__ = "select"
_prefixes = ()
_suffixes = ()
_hints = util.immutabledict()
_statement_hints = ()
_distinct = False
_from_cloned = None
_correlate = ()
_correlate_except = None
_memoized_property = SelectBase._memoized_property
_is_select = True
@util.deprecated_params(
autocommit=(
"0.6",
"The :paramref:`_expression.select.autocommit` "
"parameter is deprecated "
"and will be removed in a future release. Please refer to "
"the :paramref:`.Connection.execution_options.autocommit` "
"parameter in conjunction with the the "
":meth:`.Executable.execution_options` method in order to "
"affect the autocommit behavior for a statement.",
),
for_update=(
"0.9",
"The :paramref:`_expression.select.for_update` "
"parameter is deprecated and "
"will be removed in a future release. Please refer to the "
":meth:`_expression.Select.with_for_update` to specify the "
"structure of the ``FOR UPDATE`` clause.",
),
)
def __init__(
self,
columns=None,
whereclause=None,
from_obj=None,
distinct=False,
having=None,
correlate=True,
prefixes=None,
suffixes=None,
**kwargs
):
"""Construct a new :class:`_expression.Select`.
Similar functionality is also available via the
:meth:`_expression.FromClause.select` method on any
:class:`_expression.FromClause`.
All arguments which accept :class:`_expression.ClauseElement`
arguments also
accept string arguments, which will be converted as appropriate into
either :func:`_expression.text()` or
:func:`_expression.literal_column()` constructs.
.. seealso::
:ref:`coretutorial_selecting` - Core Tutorial description of
:func:`_expression.select`.
:param columns:
A list of :class:`_expression.ColumnElement` or
:class:`_expression.FromClause`
objects which will form the columns clause of the resulting
statement. For those objects that are instances of
:class:`_expression.FromClause` (typically :class:`_schema.Table`
or :class:`_expression.Alias`
objects), the :attr:`_expression.FromClause.c`
collection is extracted
to form a collection of :class:`_expression.ColumnElement` objects.
This parameter will also accept :class:`_expression.TextClause`
constructs as
given, as well as ORM-mapped classes.
.. note::
The :paramref:`_expression.select.columns`
parameter is not available
in the method form of :func:`_expression.select`, e.g.
:meth:`_expression.FromClause.select`.
.. seealso::
:meth:`_expression.Select.column`
:meth:`_expression.Select.with_only_columns`
:param whereclause:
A :class:`_expression.ClauseElement`
expression which will be used to form the
``WHERE`` clause. It is typically preferable to add WHERE
criterion to an existing :class:`_expression.Select`
using method chaining
with :meth:`_expression.Select.where`.
.. seealso::
:meth:`_expression.Select.where`
:param from_obj:
A list of :class:`_expression.ClauseElement`
objects which will be added to the
``FROM`` clause of the resulting statement. This is equivalent
to calling :meth:`_expression.Select.select_from`
using method chaining on
an existing :class:`_expression.Select` object.
.. seealso::
:meth:`_expression.Select.select_from`
- full description of explicit
FROM clause specification.
:param autocommit: legacy autocommit parameter.
:param bind=None:
an :class:`_engine.Engine` or :class:`_engine.Connection` instance
to which the
resulting :class:`_expression.Select` object will be bound. The
:class:`_expression.Select`
object will otherwise automatically bind to
whatever :class:`~.base.Connectable` instances can be located within
its contained :class:`_expression.ClauseElement` members.
:param correlate=True:
indicates that this :class:`_expression.Select`
object should have its
contained :class:`_expression.FromClause`
elements "correlated" to an enclosing
:class:`_expression.Select` object.
It is typically preferable to specify
correlations on an existing :class:`_expression.Select`
construct using
:meth:`_expression.Select.correlate`.
.. seealso::
:meth:`_expression.Select.correlate`
- full description of correlation.
:param distinct=False:
when ``True``, applies a ``DISTINCT`` qualifier to the columns
clause of the resulting statement.
The boolean argument may also be a column expression or list
of column expressions - this is a special calling form which
is understood by the PostgreSQL dialect to render the
``DISTINCT ON (<columns>)`` syntax.
``distinct`` is also available on an existing
:class:`_expression.Select`
object via the :meth:`_expression.Select.distinct` method.
.. seealso::
:meth:`_expression.Select.distinct`
:param for_update=False:
when ``True``, applies ``FOR UPDATE`` to the end of the
resulting statement.
``for_update`` accepts various string values interpreted by
specific backends, including:
* ``"read"`` - on MySQL, translates to ``LOCK IN SHARE MODE``;
on PostgreSQL, translates to ``FOR SHARE``.
* ``"nowait"`` - on PostgreSQL and Oracle, translates to
``FOR UPDATE NOWAIT``.
* ``"read_nowait"`` - on PostgreSQL, translates to
``FOR SHARE NOWAIT``.
.. seealso::
:meth:`_expression.Select.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
:param group_by:
a list of :class:`_expression.ClauseElement`
objects which will comprise the
``GROUP BY`` clause of the resulting select. This parameter
is typically specified more naturally using the
:meth:`_expression.Select.group_by` method on an existing
:class:`_expression.Select`.
.. seealso::
:meth:`_expression.Select.group_by`
:param having:
a :class:`_expression.ClauseElement`
that will comprise the ``HAVING`` clause
of the resulting select when ``GROUP BY`` is used. This parameter
is typically specified more naturally using the
:meth:`_expression.Select.having` method on an existing
:class:`_expression.Select`.
.. seealso::
:meth:`_expression.Select.having`
:param limit=None:
a numerical value which usually renders as a ``LIMIT``
expression in the resulting select. Backends that don't
support ``LIMIT`` will attempt to provide similar
functionality. This parameter is typically specified more
naturally using the :meth:`_expression.Select.limit`
method on an existing
:class:`_expression.Select`.
.. seealso::
:meth:`_expression.Select.limit`
:param offset=None:
a numeric value which usually renders as an ``OFFSET``
expression in the resulting select. Backends that don't
support ``OFFSET`` will attempt to provide similar
functionality. This parameter is typically specified more naturally
using the :meth:`_expression.Select.offset` method on an existing
:class:`_expression.Select`.
.. seealso::
:meth:`_expression.Select.offset`
:param order_by:
a scalar or list of :class:`_expression.ClauseElement`
objects which will
comprise the ``ORDER BY`` clause of the resulting select.
This parameter is typically specified more naturally using the
:meth:`_expression.Select.order_by` method on an existing
:class:`_expression.Select`.
.. seealso::
:meth:`_expression.Select.order_by`
:param use_labels=False:
when ``True``, the statement will be generated using labels
for each column in the columns clause, which qualify each
column with its parent table's (or aliases) name so that name
conflicts between columns in different tables don't occur.
The format of the label is <tablename>_<column>. The "c"
collection of the resulting :class:`_expression.Select`
object will use these
names as well for targeting column members.
This parameter can also be specified on an existing
:class:`_expression.Select` object using the
:meth:`_expression.Select.apply_labels`
method.
.. seealso::
:meth:`_expression.Select.apply_labels`
"""
self._auto_correlate = correlate
if distinct is not False:
if distinct is True:
self._distinct = True
else:
self._distinct = [
_literal_as_label_reference(e)
for e in util.to_list(distinct)
]
if from_obj is not None:
self._from_obj = util.OrderedSet(
_interpret_as_from(f) for f in util.to_list(from_obj)
)
else:
self._from_obj = util.OrderedSet()
try:
cols_present = bool(columns)
except TypeError as err:
util.raise_(
exc.ArgumentError(
"columns argument to select() must "
"be a Python list or other iterable"
),
replace_context=err,
)
if cols_present:
self._raw_columns = []
for c in columns:
c = _interpret_as_column_or_from(c)
if isinstance(c, ScalarSelect):
c = c.self_group(against=operators.comma_op)
self._raw_columns.append(c)
else:
self._raw_columns = []
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause).self_group(
against=operators._asbool
)
else:
self._whereclause = None
if having is not None:
self._having = _literal_as_text(having).self_group(
against=operators._asbool
)
else:
self._having = None
if prefixes:
self._setup_prefixes(prefixes)
if suffixes:
self._setup_suffixes(suffixes)
GenerativeSelect.__init__(self, **kwargs)
@property
def _froms(self):
# would love to cache this,
# but there's just enough edge cases, particularly now that
# declarative encourages construction of SQL expressions
# without tables present, to just regen this each time.
froms = []
seen = set()
translate = self._from_cloned
for item in itertools.chain(
_from_objects(*self._raw_columns),
_from_objects(self._whereclause)
if self._whereclause is not None
else (),
self._from_obj,
):
if item is self:
raise exc.InvalidRequestError(
"select() construct refers to itself as a FROM"
)
if translate and item in translate:
item = translate[item]
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
return froms
def _get_display_froms(
self, explicit_correlate_froms=None, implicit_correlate_froms=None
):
"""Return the full list of 'from' clauses to be displayed.
Takes into account a set of existing froms which may be
rendered in the FROM clause of enclosing selects; this Select
may want to leave those absent if it is automatically
correlating.
"""
froms = self._froms
toremove = set(
itertools.chain(*[_expand_cloned(f._hide_froms) for f in froms])
)
if toremove:
# if we're maintaining clones of froms,
# add the copies out to the toremove list. only include
# clones that are lexical equivalents.
if self._from_cloned:
toremove.update(
self._from_cloned[f]
for f in toremove.intersection(self._from_cloned)
if self._from_cloned[f]._is_lexical_equivalent(f)
)
# filter out to FROM clauses not in the list,
# using a list to maintain ordering
froms = [f for f in froms if f not in toremove]
if self._correlate:
to_correlate = self._correlate
if to_correlate:
froms = [
f
for f in froms
if f
not in _cloned_intersection(
_cloned_intersection(
froms, explicit_correlate_froms or ()
),
to_correlate,
)
]
if self._correlate_except is not None:
froms = [
f
for f in froms
if f
not in _cloned_difference(
_cloned_intersection(
froms, explicit_correlate_froms or ()
),
self._correlate_except,
)
]
if (
self._auto_correlate
and implicit_correlate_froms
and len(froms) > 1
):
froms = [
f
for f in froms
if f
not in _cloned_intersection(froms, implicit_correlate_froms)
]
if not len(froms):
raise exc.InvalidRequestError(
"Select statement '%s"
"' returned no FROM clauses "
"due to auto-correlation; "
"specify correlate(<tables>) "
"to control correlation "
"manually." % self
)
return froms
def _scalar_type(self):
elem = self._raw_columns[0]
cols = list(elem._select_iterable)
return cols[0].type
@property
def froms(self):
"""Return the displayed list of FromClause elements."""
return self._get_display_froms()
def with_statement_hint(self, text, dialect_name="*"):
"""add a statement hint to this :class:`_expression.Select`.
This method is similar to :meth:`_expression.Select.with_hint`
except that
it does not require an individual table, and instead applies to the
statement as a whole.
Hints here are specific to the backend database and may include
directives such as isolation levels, file directives, fetch directives,
etc.
.. versionadded:: 1.0.0
.. seealso::
:meth:`_expression.Select.with_hint`
:meth:`.Select.prefix_with` - generic SELECT prefixing which also
can suit some database-specific HINT syntaxes such as MySQL
optimizer hints
"""
return self.with_hint(None, text, dialect_name)
@_generative
def with_hint(self, selectable, text, dialect_name="*"):
r"""Add an indexing or other executional context hint for the given
selectable to this :class:`_expression.Select`.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the given :class:`_schema.Table` or :class:`_expression.Alias`
passed as the
``selectable`` argument. The dialect implementation
typically uses Python string substitution syntax
with the token ``%(name)s`` to render the name of
the table or alias. E.g. when using Oracle, the
following::
select([mytable]).\
with_hint(mytable, "index(%(name)s ix_mytable)")
Would render SQL as::
select /*+ index(mytable ix_mytable) */ ... from mytable
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add hints for both Oracle
and Sybase simultaneously::
select([mytable]).\
with_hint(mytable, "index(%(name)s ix_mytable)", 'oracle').\
with_hint(mytable, "WITH INDEX ix_mytable", 'sybase')
.. seealso::
:meth:`_expression.Select.with_statement_hint`
"""
if selectable is None:
self._statement_hints += ((dialect_name, text),)
else:
self._hints = self._hints.union({(selectable, dialect_name): text})
@property
def type(self):
raise exc.InvalidRequestError(
"Select objects don't have a type. "
"Call as_scalar() on this Select "
"object to return a 'scalar' version "
"of this Select."
)
@_memoized_property.method
def locate_all_froms(self):
"""return a Set of all FromClause elements referenced by this Select.
This set is a superset of that returned by the ``froms`` property,
which is specifically for those FromClause elements that would
actually be rendered.
"""
froms = self._froms
return froms + list(_from_objects(*froms))
@property
def inner_columns(self):
"""an iterator of all ColumnElement expressions which would
be rendered into the columns clause of the resulting SELECT statement.
"""
return _select_iterables(self._raw_columns)
@_memoized_property
def _label_resolve_dict(self):
with_cols = dict(
(c._resolve_label or c._label or c.key, c)
for c in _select_iterables(self._raw_columns)
if c._allow_label_resolve
)
only_froms = dict(
(c.key, c)
for c in _select_iterables(self.froms)
if c._allow_label_resolve
)
only_cols = with_cols.copy()
for key, value in only_froms.items():
with_cols.setdefault(key, value)
return with_cols, only_froms, only_cols
def is_derived_from(self, fromclause):
if self in fromclause._cloned_set:
return True
for f in self.locate_all_froms():
if f.is_derived_from(fromclause):
return True
return False
def _copy_internals(self, clone=_clone, **kw):
super(Select, self)._copy_internals(clone, **kw)
# Select() object has been cloned and probably adapted by the
# given clone function. Apply the cloning function to internal
# objects
# 1. keep a dictionary of the froms we've cloned, and what
# they've become. This is consulted later when we derive
# additional froms from "whereclause" and the columns clause,
# which may still reference the uncloned parent table.
# as of 0.7.4 we also put the current version of _froms, which
# gets cleared on each generation. previously we were "baking"
# _froms into self._from_obj.
self._from_cloned = from_cloned = dict(
(f, clone(f, **kw)) for f in self._from_obj.union(self._froms)
)
# 3. update persistent _from_obj with the cloned versions.
self._from_obj = util.OrderedSet(
from_cloned[f] for f in self._from_obj
)
# the _correlate collection is done separately, what can happen
# here is the same item is _correlate as in _from_obj but the
# _correlate version has an annotation on it - (specifically
# RelationshipProperty.Comparator._criterion_exists() does
# this). Also keep _correlate liberally open with its previous
# contents, as this set is used for matching, not rendering.
self._correlate = set(clone(f, **kw) for f in self._correlate).union(
self._correlate
)
# do something similar for _correlate_except - this is a more
# unusual case but same idea applies
if self._correlate_except:
self._correlate_except = set(
clone(f, **kw) for f in self._correlate_except
).union(self._correlate_except)
# 4. clone other things. The difficulty here is that Column
# objects are not actually cloned, and refer to their original
# .table, resulting in the wrong "from" parent after a clone
# operation. Hence _from_cloned and _from_obj supersede what is
# present here.
self._raw_columns = [clone(c, **kw) for c in self._raw_columns]
for attr in (
"_whereclause",
"_having",
"_order_by_clause",
"_group_by_clause",
"_for_update_arg",
):
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr), **kw))
# erase exported column list, _froms collection,
# etc.
self._reset_exported()
def get_children(self, column_collections=True, **kwargs):
"""return child elements as per the ClauseElement specification."""
return (
(column_collections and list(self.columns) or [])
+ self._raw_columns
+ list(self._froms)
+ [
x
for x in (
self._whereclause,
self._having,
self._order_by_clause,
self._group_by_clause,
)
if x is not None
]
)
@_generative
def column(self, column):
"""return a new select() construct with the given column expression
added to its columns clause.
E.g.::
my_select = my_select.column(table.c.new_column)
See the documentation for
:meth:`_expression.Select.with_only_columns`
for guidelines on adding /replacing the columns of a
:class:`_expression.Select` object.
"""
self.append_column(column)
@util.dependencies("sqlalchemy.sql.util")
def reduce_columns(self, sqlutil, only_synonyms=True):
"""Return a new :func`.select` construct with redundantly
named, equivalently-valued columns removed from the columns clause.
"Redundant" here means two columns where one refers to the
other either based on foreign key, or via a simple equality
comparison in the WHERE clause of the statement. The primary purpose
of this method is to automatically construct a select statement
with all uniquely-named columns, without the need to use
table-qualified labels as :meth:`_expression.Select.apply_labels` does
.
When columns are omitted based on foreign key, the referred-to
column is the one that's kept. When columns are omitted based on
WHERE equivalence, the first column in the columns clause is the
one that's kept.
:param only_synonyms: when True, limit the removal of columns
to those which have the same name as the equivalent. Otherwise,
all columns that are equivalent to another are removed.
"""
return self.with_only_columns(
sqlutil.reduce_columns(
self.inner_columns,
only_synonyms=only_synonyms,
*(self._whereclause,) + tuple(self._from_obj)
)
)
@_generative
def with_only_columns(self, columns):
r"""Return a new :func:`_expression.select` construct with its columns
clause replaced with the given columns.
This method is exactly equivalent to as if the original
:func:`_expression.select` had been called with the given columns
clause. I.e. a statement::
s = select([table1.c.a, table1.c.b])
s = s.with_only_columns([table1.c.b])
should be exactly equivalent to::
s = select([table1.c.b])
This means that FROM clauses which are only derived
from the column list will be discarded if the new column
list no longer contains that FROM::
>>> table1 = table('t1', column('a'), column('b'))
>>> table2 = table('t2', column('a'), column('b'))
>>> s1 = select([table1.c.a, table2.c.b])
>>> print(s1)
SELECT t1.a, t2.b FROM t1, t2
>>> s2 = s1.with_only_columns([table2.c.b])
>>> print(s2)
SELECT t2.b FROM t1
The preferred way to maintain a specific FROM clause
in the construct, assuming it won't be represented anywhere
else (i.e. not in the WHERE clause, etc.) is to set it using
:meth:`_expression.Select.select_from`::
>>> s1 = select([table1.c.a, table2.c.b]).\
... select_from(table1.join(table2,
... table1.c.a==table2.c.a))
>>> s2 = s1.with_only_columns([table2.c.b])
>>> print(s2)
SELECT t2.b FROM t1 JOIN t2 ON t1.a=t2.a
Care should also be taken to use the correct
set of column objects passed to
:meth:`_expression.Select.with_only_columns`.
Since the method is essentially equivalent to calling the
:func:`_expression.select` construct in the first place with the given
columns, the columns passed to
:meth:`_expression.Select.with_only_columns`
should usually be a subset of those which were passed
to the :func:`_expression.select` construct,
not those which are available
from the ``.c`` collection of that :func:`_expression.select`. That
is::
s = select([table1.c.a, table1.c.b]).select_from(table1)
s = s.with_only_columns([table1.c.b])
and **not**::
# usually incorrect
s = s.with_only_columns([s.c.b])
The latter would produce the SQL::
SELECT b
FROM (SELECT t1.a AS a, t1.b AS b
FROM t1), t1
Since the :func:`_expression.select` construct is essentially being
asked to select both from ``table1`` as well as itself.
"""
self._reset_exported()
rc = []
for c in columns:
c = _interpret_as_column_or_from(c)
if isinstance(c, ScalarSelect):
c = c.self_group(against=operators.comma_op)
rc.append(c)
self._raw_columns = rc
@_generative
def where(self, whereclause):
"""return a new select() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
self.append_whereclause(whereclause)
@_generative
def having(self, having):
"""return a new select() construct with the given expression added to
its HAVING clause, joined to the existing clause via AND, if any.
"""
self.append_having(having)
@_generative
def distinct(self, *expr):
r"""Return a new select() construct which will apply DISTINCT to its
columns clause.
:param \*expr: optional column expressions. When present,
the PostgreSQL dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if expr:
expr = [_literal_as_label_reference(e) for e in expr]
if isinstance(self._distinct, list):
self._distinct = self._distinct + expr
else:
self._distinct = expr
else:
self._distinct = True
@_generative
def select_from(self, fromclause):
r"""return a new :func:`_expression.select` construct with the
given FROM expression
merged into its list of FROM objects.
E.g.::
table1 = table('t1', column('a'))
table2 = table('t2', column('b'))
s = select([table1.c.a]).\
select_from(
table1.join(table2, table1.c.a==table2.c.b)
)
The "from" list is a unique set on the identity of each element,
so adding an already present :class:`_schema.Table`
or other selectable
will have no effect. Passing a :class:`_expression.Join` that refers
to an already present :class:`_schema.Table`
or other selectable will have
the effect of concealing the presence of that selectable as
an individual element in the rendered FROM list, instead
rendering it into a JOIN clause.
While the typical purpose of :meth:`_expression.Select.select_from`
is to
replace the default, derived FROM clause with a join, it can
also be called with individual table elements, multiple times
if desired, in the case that the FROM clause cannot be fully
derived from the columns clause::
select([func.count('*')]).select_from(table1)
"""
self.append_from(fromclause)
@_generative
def correlate(self, *fromclauses):
r"""return a new :class:`_expression.Select`
which will correlate the given FROM
clauses to that of an enclosing :class:`_expression.Select`.
Calling this method turns off the :class:`_expression.Select` object's
default behavior of "auto-correlation". Normally, FROM elements
which appear in a :class:`_expression.Select`
that encloses this one via
its :term:`WHERE clause`, ORDER BY, HAVING or
:term:`columns clause` will be omitted from this
:class:`_expression.Select`
object's :term:`FROM clause`.
Setting an explicit correlation collection using the
:meth:`_expression.Select.correlate`
method provides a fixed list of FROM objects
that can potentially take place in this process.
When :meth:`_expression.Select.correlate`
is used to apply specific FROM clauses
for correlation, the FROM elements become candidates for
correlation regardless of how deeply nested this
:class:`_expression.Select`
object is, relative to an enclosing :class:`_expression.Select`
which refers to
the same FROM object. This is in contrast to the behavior of
"auto-correlation" which only correlates to an immediate enclosing
:class:`_expression.Select`.
Multi-level correlation ensures that the link
between enclosed and enclosing :class:`_expression.Select`
is always via
at least one WHERE/ORDER BY/HAVING/columns clause in order for
correlation to take place.
If ``None`` is passed, the :class:`_expression.Select`
object will correlate
none of its FROM entries, and all will render unconditionally
in the local FROM clause.
:param \*fromclauses: a list of one or more
:class:`_expression.FromClause`
constructs, or other compatible constructs (i.e. ORM-mapped
classes) to become part of the correlate collection.
.. seealso::
:meth:`_expression.Select.correlate_except`
:ref:`correlated_subqueries`
"""
self._auto_correlate = False
if fromclauses and fromclauses[0] is None:
self._correlate = ()
else:
self._correlate = set(self._correlate).union(
_interpret_as_from(f) for f in fromclauses
)
@_generative
def correlate_except(self, *fromclauses):
r"""return a new :class:`_expression.Select`
which will omit the given FROM
clauses from the auto-correlation process.
Calling :meth:`_expression.Select.correlate_except` turns off the
:class:`_expression.Select` object's default behavior of
"auto-correlation" for the given FROM elements. An element
specified here will unconditionally appear in the FROM list, while
all other FROM elements remain subject to normal auto-correlation
behaviors.
If ``None`` is passed, the :class:`_expression.Select`
object will correlate
all of its FROM entries.
:param \*fromclauses: a list of one or more
:class:`_expression.FromClause`
constructs, or other compatible constructs (i.e. ORM-mapped
classes) to become part of the correlate-exception collection.
.. seealso::
:meth:`_expression.Select.correlate`
:ref:`correlated_subqueries`
"""
self._auto_correlate = False
if fromclauses and fromclauses[0] is None:
self._correlate_except = ()
else:
self._correlate_except = set(self._correlate_except or ()).union(
_interpret_as_from(f) for f in fromclauses
)
def append_correlation(self, fromclause):
"""append the given correlation expression to this select()
construct.
This is an **in-place** mutation method; the
:meth:`_expression.Select.correlate` method is preferred,
as it provides
standard :term:`method chaining`.
"""
self._auto_correlate = False
self._correlate = set(self._correlate).union(
_interpret_as_from(f) for f in fromclause
)
def append_column(self, column):
"""append the given column expression to the columns clause of this
select() construct.
E.g.::
my_select.append_column(some_table.c.new_column)
This is an **in-place** mutation method; the
:meth:`_expression.Select.column` method is preferred,
as it provides standard
:term:`method chaining`.
See the documentation for :meth:`_expression.Select.with_only_columns`
for guidelines on adding /replacing the columns of a
:class:`_expression.Select` object.
"""
self._reset_exported()
column = _interpret_as_column_or_from(column)
if isinstance(column, ScalarSelect):
column = column.self_group(against=operators.comma_op)
self._raw_columns = self._raw_columns + [column]
def append_prefix(self, clause):
"""append the given columns clause prefix expression to this select()
construct.
This is an **in-place** mutation method; the
:meth:`_expression.Select.prefix_with` method is preferred,
as it provides
standard :term:`method chaining`.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
def append_whereclause(self, whereclause):
"""append the given expression to this select() construct's WHERE
criterion.
The expression will be joined to existing WHERE criterion via AND.
This is an **in-place** mutation method; the
:meth:`_expression.Select.where` method is preferred,
as it provides standard
:term:`method chaining`.
"""
self._reset_exported()
self._whereclause = and_(True_._ifnone(self._whereclause), whereclause)
def append_having(self, having):
"""append the given expression to this select() construct's HAVING
criterion.
The expression will be joined to existing HAVING criterion via AND.
This is an **in-place** mutation method; the
:meth:`_expression.Select.having` method is preferred,
as it provides standard
:term:`method chaining`.
"""
self._reset_exported()
self._having = and_(True_._ifnone(self._having), having)
def append_from(self, fromclause):
"""append the given FromClause expression to this select() construct's
FROM clause.
This is an **in-place** mutation method; the
:meth:`_expression.Select.select_from` method is preferred,
as it provides
standard :term:`method chaining`.
"""
self._reset_exported()
fromclause = _interpret_as_from(fromclause)
self._from_obj = self._from_obj.union([fromclause])
@_memoized_property
def _columns_plus_names(self):
if self.use_labels:
names = set()
def name_for_col(c):
if c._label is None or not c._render_label_in_columns_clause:
return (None, c)
name = c._label
if name in names:
name = c.anon_label
else:
names.add(name)
return name, c
return [
name_for_col(c)
for c in util.unique_list(_select_iterables(self._raw_columns))
]
else:
return [
(None, c)
for c in util.unique_list(_select_iterables(self._raw_columns))
]
def _populate_column_collection(self):
for name, c in self._columns_plus_names:
if not hasattr(c, "_make_proxy"):
continue
if name is None:
key = None
elif self.use_labels:
key = c._key_label
if key is not None and key in self.c:
key = c.anon_label
else:
key = None
c._make_proxy(self, key=key, name=name, name_is_truncatable=True)
def _refresh_for_new_column(self, column):
for fromclause in self._froms:
col = fromclause._refresh_for_new_column(column)
if col is not None:
if col in self.inner_columns and self._cols_populated:
our_label = col._key_label if self.use_labels else col.key
if our_label not in self.c:
return col._make_proxy(
self,
name=col._label if self.use_labels else None,
key=col._key_label if self.use_labels else None,
name_is_truncatable=True,
)
return None
return None
def _needs_parens_for_grouping(self):
return (
self._limit_clause is not None
or self._offset_clause is not None
or bool(self._order_by_clause.clauses)
)
def self_group(self, against=None):
"""return a 'grouping' construct as per the ClauseElement
specification.
This produces an element that can be embedded in an expression. Note
that this method is called automatically as needed when constructing
expressions and should not require explicit use.
"""
if (
isinstance(against, CompoundSelect)
and not self._needs_parens_for_grouping()
):
return self
return FromGrouping(self)
def union(self, other, **kwargs):
"""return a SQL UNION of this select() construct against the given
selectable."""
return CompoundSelect._create_union(self, other, **kwargs)
def union_all(self, other, **kwargs):
"""return a SQL UNION ALL of this select() construct against the given
selectable.
"""
return CompoundSelect._create_union_all(self, other, **kwargs)
def except_(self, other, **kwargs):
"""return a SQL EXCEPT of this select() construct against the given
selectable."""
return CompoundSelect._create_except(self, other, **kwargs)
def except_all(self, other, **kwargs):
"""return a SQL EXCEPT ALL of this select() construct against the
given selectable.
"""
return CompoundSelect._create_except_all(self, other, **kwargs)
def intersect(self, other, **kwargs):
"""return a SQL INTERSECT of this select() construct against the given
selectable.
"""
return CompoundSelect._create_intersect(self, other, **kwargs)
def intersect_all(self, other, **kwargs):
"""return a SQL INTERSECT ALL of this select() construct against the
given selectable.
"""
return CompoundSelect._create_intersect_all(self, other, **kwargs)
def bind(self):
if self._bind:
return self._bind
froms = self._froms
if not froms:
for c in self._raw_columns:
e = c.bind
if e:
self._bind = e
return e
else:
e = list(froms)[0].bind
if e:
self._bind = e
return e
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class ScalarSelect(Generative, Grouping):
_from_objects = []
_is_from_container = True
_is_implicitly_boolean = False
def __init__(self, element):
self.element = element
self.type = element._scalar_type()
@property
def columns(self):
raise exc.InvalidRequestError(
"Scalar Select expression has no "
"columns; use this object directly "
"within a column-level expression."
)
c = columns
@_generative
def where(self, crit):
"""Apply a WHERE clause to the SELECT statement referred to
by this :class:`_expression.ScalarSelect`.
"""
self.element = self.element.where(crit)
def self_group(self, **kwargs):
return self
class Exists(UnaryExpression):
"""Represent an ``EXISTS`` clause.
"""
__visit_name__ = UnaryExpression.__visit_name__
_from_objects = []
def __init__(self, *args, **kwargs):
"""Construct a new :class:`_expression.Exists` against an existing
:class:`_expression.Select` object.
Calling styles are of the following forms::
# use on an existing select()
s = select([table.c.col1]).where(table.c.col2==5)
s = exists(s)
# construct a select() at once
exists(['*'], **select_arguments).where(criterion)
# columns argument is optional, generates "EXISTS (SELECT *)"
# by default.
exists().where(table.c.col2==5)
"""
if args and isinstance(args[0], (SelectBase, ScalarSelect)):
s = args[0]
else:
if not args:
args = ([literal_column("*")],)
s = Select(*args, **kwargs).as_scalar().self_group()
UnaryExpression.__init__(
self,
s,
operator=operators.exists,
type_=type_api.BOOLEANTYPE,
wraps_column_expression=True,
)
def select(self, whereclause=None, **params):
return Select([self], whereclause, **params)
def correlate(self, *fromclause):
e = self._clone()
e.element = self.element.correlate(*fromclause).self_group()
return e
def correlate_except(self, *fromclause):
e = self._clone()
e.element = self.element.correlate_except(*fromclause).self_group()
return e
def select_from(self, clause):
"""return a new :class:`_expression.Exists` construct,
applying the given
expression to the :meth:`_expression.Select.select_from`
method of the select
statement contained.
"""
e = self._clone()
e.element = self.element.select_from(clause).self_group()
return e
def where(self, clause):
"""return a new exists() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
e = self._clone()
e.element = self.element.where(clause).self_group()
return e
class TextAsFrom(SelectBase):
"""Wrap a :class:`_expression.TextClause` construct within a
:class:`_expression.SelectBase`
interface.
This allows the :class:`_expression.TextClause` object to gain a ``.
c`` collection
and other FROM-like capabilities such as
:meth:`_expression.FromClause.alias`,
:meth:`_expression.SelectBase.cte`, etc.
The :class:`.TextAsFrom` construct is produced via the
:meth:`_expression.TextClause.columns`
method - see that method for details.
.. versionadded:: 0.9.0
.. seealso::
:func:`_expression.text`
:meth:`_expression.TextClause.columns`
"""
__visit_name__ = "text_as_from"
_textual = True
def __init__(self, text, columns, positional=False):
self.element = text
self.column_args = columns
self.positional = positional
@property
def _bind(self):
return self.element._bind
@_generative
def bindparams(self, *binds, **bind_as_values):
self.element = self.element.bindparams(*binds, **bind_as_values)
def _populate_column_collection(self):
for c in self.column_args:
c._make_proxy(self)
def _copy_internals(self, clone=_clone, **kw):
self._reset_exported()
self.element = clone(self.element, **kw)
def _scalar_type(self):
return self.column_args[0].type
class AnnotatedFromClause(Annotated):
def __init__(self, element, values):
# force FromClause to generate their internal
# collections into __dict__
element.c
Annotated.__init__(self, element, values)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/dml.py
|
# sql/dml.py
# Copyright (C) 2009-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Provide :class:`_expression.Insert`, :class:`_expression.Update` and
:class:`_expression.Delete`.
"""
from .base import _from_objects
from .base import _generative
from .base import DialectKWArgs
from .base import Executable
from .elements import _clone
from .elements import _column_as_key
from .elements import _literal_as_text
from .elements import and_
from .elements import ClauseElement
from .elements import Null
from .selectable import _interpret_as_from
from .selectable import _interpret_as_select
from .selectable import HasCTE
from .selectable import HasPrefixes
from .. import exc
from .. import util
class UpdateBase(
HasCTE, DialectKWArgs, HasPrefixes, Executable, ClauseElement
):
"""Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements.
"""
__visit_name__ = "update_base"
_execution_options = Executable._execution_options.union(
{"autocommit": True}
)
_hints = util.immutabledict()
_parameter_ordering = None
_prefixes = ()
named_with_column = False
def _process_colparams(self, parameters):
def process_single(p):
if isinstance(p, (list, tuple)):
return dict((c.key, pval) for c, pval in zip(self.table.c, p))
else:
return p
if self._preserve_parameter_order and parameters is not None:
if not isinstance(parameters, list) or (
parameters and not isinstance(parameters[0], tuple)
):
raise ValueError(
"When preserve_parameter_order is True, "
"values() only accepts a list of 2-tuples"
)
self._parameter_ordering = [key for key, value in parameters]
return dict(parameters), False
if (
isinstance(parameters, (list, tuple))
and parameters
and isinstance(parameters[0], (list, tuple, dict))
):
if not self._supports_multi_parameters:
raise exc.InvalidRequestError(
"This construct does not support "
"multiple parameter sets."
)
return [process_single(p) for p in parameters], True
else:
return process_single(parameters), False
def params(self, *arg, **kw):
"""Set the parameters for the statement.
This method raises ``NotImplementedError`` on the base class,
and is overridden by :class:`.ValuesBase` to provide the
SET/VALUES clause of UPDATE and INSERT.
"""
raise NotImplementedError(
"params() is not supported for INSERT/UPDATE/DELETE statements."
" To set the values for an INSERT or UPDATE statement, use"
" stmt.values(**parameters)."
)
def bind(self):
"""Return a 'bind' linked to this :class:`.UpdateBase`
or a :class:`_schema.Table` associated with it.
"""
return self._bind or self.table.bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
@_generative
def returning(self, *cols):
r"""Add a :term:`RETURNING` or equivalent clause to this statement.
e.g.::
stmt = table.update().\
where(table.c.data == 'value').\
values(status='X').\
returning(table.c.server_flag,
table.c.updated_timestamp)
for server_flag, updated_timestamp in connection.execute(stmt):
print(server_flag, updated_timestamp)
The given collection of column expressions should be derived from
the table that is
the target of the INSERT, UPDATE, or DELETE. While
:class:`_schema.Column`
objects are typical, the elements can also be expressions::
stmt = table.insert().returning(
(table.c.first_name + " " + table.c.last_name).
label('fullname'))
Upon compilation, a RETURNING clause, or database equivalent,
will be rendered within the statement. For INSERT and UPDATE,
the values are the newly inserted/updated values. For DELETE,
the values are those of the rows which were deleted.
Upon execution, the values of the columns to be returned are made
available via the result set and can be iterated using
:meth:`_engine.ResultProxy.fetchone` and similar.
For DBAPIs which do not
natively support returning values (i.e. cx_oracle), SQLAlchemy will
approximate this behavior at the result level so that a reasonable
amount of behavioral neutrality is provided.
Note that not all databases/DBAPIs
support RETURNING. For those backends with no support,
an exception is raised upon compilation and/or execution.
For those who do support it, the functionality across backends
varies greatly, including restrictions on executemany()
and other statements which return multiple rows. Please
read the documentation notes for the database in use in
order to determine the availability of RETURNING.
.. seealso::
:meth:`.ValuesBase.return_defaults` - an alternative method tailored
towards efficient fetching of server-side defaults and triggers
for single-row INSERTs or UPDATEs.
"""
self._returning = cols
@_generative
def with_hint(self, text, selectable=None, dialect_name="*"):
"""Add a table hint for a single table to this
INSERT/UPDATE/DELETE statement.
.. note::
:meth:`.UpdateBase.with_hint` currently applies only to
Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use
:meth:`.UpdateBase.prefix_with`.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the :class:`_schema.Table` that is the subject of this
statement, or optionally to that of the given
:class:`_schema.Table` passed as the ``selectable`` argument.
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add a hint
that only takes effect for SQL Server::
mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql")
:param text: Text of the hint.
:param selectable: optional :class:`_schema.Table` that specifies
an element of the FROM clause within an UPDATE or DELETE
to be the subject of the hint - applies only to certain backends.
:param dialect_name: defaults to ``*``, if specified as the name
of a particular dialect, will apply these hints only when
that dialect is in use.
"""
if selectable is None:
selectable = self.table
self._hints = self._hints.union({(selectable, dialect_name): text})
class ValuesBase(UpdateBase):
"""Supplies support for :meth:`.ValuesBase.values` to
INSERT and UPDATE constructs."""
__visit_name__ = "values_base"
_supports_multi_parameters = False
_has_multi_parameters = False
_preserve_parameter_order = False
select = None
_post_values_clause = None
def __init__(self, table, values, prefixes):
self.table = _interpret_as_from(table)
self.parameters, self._has_multi_parameters = self._process_colparams(
values
)
if prefixes:
self._setup_prefixes(prefixes)
@_generative
def values(self, *args, **kwargs):
r"""specify a fixed VALUES clause for an INSERT statement, or the SET
clause for an UPDATE.
Note that the :class:`_expression.Insert` and
:class:`_expression.Update` constructs support
per-execution time formatting of the VALUES and/or SET clauses,
based on the arguments passed to :meth:`_engine.Connection.execute`.
However, the :meth:`.ValuesBase.values` method can be used to "fix" a
particular set of parameters into the statement.
Multiple calls to :meth:`.ValuesBase.values` will produce a new
construct, each one with the parameter list modified to include
the new parameters sent. In the typical case of a single
dictionary of parameters, the newly passed keys will replace
the same keys in the previous construct. In the case of a list-based
"multiple values" construct, each new list of values is extended
onto the existing list of values.
:param \**kwargs: key value pairs representing the string key
of a :class:`_schema.Column`
mapped to the value to be rendered into the
VALUES or SET clause::
users.insert().values(name="some name")
users.update().where(users.c.id==5).values(name="some name")
:param \*args: As an alternative to passing key/value parameters,
a dictionary, tuple, or list of dictionaries or tuples can be passed
as a single positional argument in order to form the VALUES or
SET clause of the statement. The forms that are accepted vary
based on whether this is an :class:`_expression.Insert` or an
:class:`_expression.Update`
construct.
For either an :class:`_expression.Insert` or
:class:`_expression.Update` construct, a
single dictionary can be passed, which works the same as that of
the kwargs form::
users.insert().values({"name": "some name"})
users.update().values({"name": "some new name"})
Also for either form but more typically for the
:class:`_expression.Insert`
construct, a tuple that contains an entry for every column in the
table is also accepted::
users.insert().values((5, "some name"))
The :class:`_expression.Insert`
construct also supports being passed a list
of dictionaries or full-table-tuples, which on the server will
render the less common SQL syntax of "multiple values" - this
syntax is supported on backends such as SQLite, PostgreSQL, MySQL,
but not necessarily others::
users.insert().values([
{"name": "some name"},
{"name": "some other name"},
{"name": "yet another name"},
])
The above form would render a multiple VALUES statement similar to::
INSERT INTO users (name) VALUES
(:name_1),
(:name_2),
(:name_3)
It is essential to note that **passing multiple values is
NOT the same as using traditional executemany() form**. The above
syntax is a **special** syntax not typically used. To emit an
INSERT statement against multiple rows, the normal method is
to pass a multiple values list to the
:meth:`_engine.Connection.execute`
method, which is supported by all database backends and is generally
more efficient for a very large number of parameters.
.. seealso::
:ref:`execute_multiple` - an introduction to
the traditional Core method of multiple parameter set
invocation for INSERTs and other statements.
.. versionchanged:: 1.0.0 an INSERT that uses a multiple-VALUES
clause, even a list of length one,
implies that the :paramref:`_expression.Insert.inline`
flag is set to
True, indicating that the statement will not attempt to fetch
the "last inserted primary key" or other defaults. The
statement deals with an arbitrary number of rows, so the
:attr:`_engine.ResultProxy.inserted_primary_key`
accessor does not
apply.
.. versionchanged:: 1.0.0 A multiple-VALUES INSERT now supports
columns with Python side default values and callables in the
same way as that of an "executemany" style of invocation; the
callable is invoked for each row. See :ref:`bug_3288`
for other details.
The :class:`_expression.Update`
construct supports a special form which is a
list of 2-tuples, which when provided must be passed in conjunction
with the
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
parameter.
This form causes the UPDATE statement to render the SET clauses
using the order of parameters given to
:meth:`_expression.Update.values`, rather
than the ordering of columns given in the :class:`_schema.Table`.
.. versionadded:: 1.0.10 - added support for parameter-ordered
UPDATE statements via the
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
flag.
.. seealso::
:ref:`updates_order_parameters` - full example of the
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
flag
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
:func:`_expression.insert` - produce an ``INSERT`` statement
:func:`_expression.update` - produce an ``UPDATE`` statement
"""
if self.select is not None:
raise exc.InvalidRequestError(
"This construct already inserts from a SELECT"
)
if self._has_multi_parameters and kwargs:
raise exc.InvalidRequestError(
"This construct already has multiple parameter sets."
)
if args:
if len(args) > 1:
raise exc.ArgumentError(
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally."
)
v = args[0]
else:
v = {}
if self.parameters is None:
(
self.parameters,
self._has_multi_parameters,
) = self._process_colparams(v)
else:
if self._has_multi_parameters:
self.parameters = list(self.parameters)
p, self._has_multi_parameters = self._process_colparams(v)
if not self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement"
)
self.parameters.extend(p)
else:
self.parameters = self.parameters.copy()
p, self._has_multi_parameters = self._process_colparams(v)
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement"
)
self.parameters.update(p)
if kwargs:
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't pass kwargs and multiple parameter sets "
"simultaneously"
)
else:
self.parameters.update(kwargs)
@_generative
def return_defaults(self, *cols):
"""Make use of a :term:`RETURNING` clause for the purpose
of fetching server-side expressions and defaults.
E.g.::
stmt = table.insert().values(data='newdata').return_defaults()
result = connection.execute(stmt)
server_created_at = result.returned_defaults['created_at']
When used against a backend that supports RETURNING, all column
values generated by SQL expression or server-side-default will be
added to any existing RETURNING clause, provided that
:meth:`.UpdateBase.returning` is not used simultaneously. The column
values will then be available on the result using the
:attr:`_engine.ResultProxy.returned_defaults` accessor as a dictionary
,
referring to values keyed to the :class:`_schema.Column`
object as well as
its ``.key``.
This method differs from :meth:`.UpdateBase.returning` in these ways:
1. :meth:`.ValuesBase.return_defaults` is only intended for use with
an INSERT or an UPDATE statement that matches exactly one row.
While the RETURNING construct in the general sense supports
multiple rows for a multi-row UPDATE or DELETE statement, or for
special cases of INSERT that return multiple rows (e.g. INSERT from
SELECT, multi-valued VALUES clause),
:meth:`.ValuesBase.return_defaults` is intended only for an
"ORM-style" single-row INSERT/UPDATE statement. The row returned
by the statement is also consumed implicitly when
:meth:`.ValuesBase.return_defaults` is used. By contrast,
:meth:`.UpdateBase.returning` leaves the RETURNING result-set
intact with a collection of any number of rows.
2. It is compatible with the existing logic to fetch auto-generated
primary key values, also known as "implicit returning". Backends
that support RETURNING will automatically make use of RETURNING in
order to fetch the value of newly generated primary keys; while the
:meth:`.UpdateBase.returning` method circumvents this behavior,
:meth:`.ValuesBase.return_defaults` leaves it intact.
3. It can be called against any backend. Backends that don't support
RETURNING will skip the usage of the feature, rather than raising
an exception. The return value of
:attr:`_engine.ResultProxy.returned_defaults` will be ``None``
:meth:`.ValuesBase.return_defaults` is used by the ORM to provide
an efficient implementation for the ``eager_defaults`` feature of
:func:`.mapper`.
:param cols: optional list of column key names or
:class:`_schema.Column`
objects. If omitted, all column expressions evaluated on the server
are added to the returning list.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.UpdateBase.returning`
:attr:`_engine.ResultProxy.returned_defaults`
"""
self._return_defaults = cols or True
class Insert(ValuesBase):
"""Represent an INSERT construct.
The :class:`_expression.Insert` object is created using the
:func:`_expression.insert()` function.
.. seealso::
:ref:`coretutorial_insert_expressions`
"""
__visit_name__ = "insert"
_supports_multi_parameters = True
def __init__(
self,
table,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
return_defaults=False,
**dialect_kw
):
"""Construct an :class:`_expression.Insert` object.
Similar functionality is available via the
:meth:`_expression.TableClause.insert` method on
:class:`_schema.Table`.
:param table: :class:`_expression.TableClause`
which is the subject of the
insert.
:param values: collection of values to be inserted; see
:meth:`_expression.Insert.values`
for a description of allowed formats here.
Can be omitted entirely; a :class:`_expression.Insert`
construct will also
dynamically render the VALUES clause at execution time based on
the parameters passed to :meth:`_engine.Connection.execute`.
:param inline: if True, no attempt will be made to retrieve the
SQL-generated default values to be provided within the statement;
in particular,
this allows SQL expressions to be rendered 'inline' within the
statement without the need to pre-execute them beforehand; for
backends that support "returning", this turns off the "implicit
returning" feature for the statement.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within `values` on a per-key basis.
The keys within `values` can be either
:class:`~sqlalchemy.schema.Column` objects or their string
identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``INSERT`` statement's table, the statement will be correlated
against the ``INSERT`` statement.
.. seealso::
:ref:`coretutorial_insert_expressions` - SQL Expression Tutorial
:ref:`inserts_and_updates` - SQL Expression Tutorial
"""
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self.select = self.select_names = None
self.include_insert_from_select_defaults = False
self.inline = inline
self._returning = returning
self._validate_dialect_kwargs(dialect_kw)
self._return_defaults = return_defaults
def get_children(self, **kwargs):
if self.select is not None:
return (self.select,)
else:
return ()
@_generative
def from_select(self, names, select, include_defaults=True):
"""Return a new :class:`_expression.Insert` construct which represents
an ``INSERT...FROM SELECT`` statement.
e.g.::
sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5)
ins = table2.insert().from_select(['a', 'b'], sel)
:param names: a sequence of string column names or
:class:`_schema.Column`
objects representing the target columns.
:param select: a :func:`_expression.select` construct,
:class:`_expression.FromClause`
or other construct which resolves into a
:class:`_expression.FromClause`,
such as an ORM :class:`_query.Query` object, etc. The order of
columns returned from this FROM clause should correspond to the
order of columns sent as the ``names`` parameter; while this
is not checked before passing along to the database, the database
would normally raise an exception if these column lists don't
correspond.
:param include_defaults: if True, non-server default values and
SQL expressions as specified on :class:`_schema.Column` objects
(as documented in :ref:`metadata_defaults_toplevel`) not
otherwise specified in the list of names will be rendered
into the INSERT and SELECT statements, so that these values are also
included in the data to be inserted.
.. note:: A Python-side default that uses a Python callable function
will only be invoked **once** for the whole statement, and **not
per row**.
.. versionadded:: 1.0.0 - :meth:`_expression.Insert.from_select`
now renders
Python-side and SQL expression column defaults into the
SELECT statement for columns otherwise not included in the
list of column names.
.. versionchanged:: 1.0.0 an INSERT that uses FROM SELECT
implies that the :paramref:`_expression.insert.inline`
flag is set to
True, indicating that the statement will not attempt to fetch
the "last inserted primary key" or other defaults. The statement
deals with an arbitrary number of rows, so the
:attr:`_engine.ResultProxy.inserted_primary_key`
accessor does not apply.
"""
if self.parameters:
raise exc.InvalidRequestError(
"This construct already inserts value expressions"
)
self.parameters, self._has_multi_parameters = self._process_colparams(
{_column_as_key(n): Null() for n in names}
)
self.select_names = names
self.inline = True
self.include_insert_from_select_defaults = include_defaults
self.select = _interpret_as_select(select)
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self.parameters = self.parameters.copy()
if self.select is not None:
self.select = _clone(self.select)
class Update(ValuesBase):
"""Represent an Update construct.
The :class:`_expression.Update`
object is created using the :func:`update()`
function.
"""
__visit_name__ = "update"
def __init__(
self,
table,
whereclause=None,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
return_defaults=False,
preserve_parameter_order=False,
**dialect_kw
):
r"""Construct an :class:`_expression.Update` object.
E.g.::
from sqlalchemy import update
stmt = update(users).where(users.c.id==5).\
values(name='user #5')
Similar functionality is available via the
:meth:`_expression.TableClause.update` method on
:class:`_schema.Table`::
stmt = users.update().\
where(users.c.id==5).\
values(name='user #5')
:param table: A :class:`_schema.Table`
object representing the database
table to be updated.
:param whereclause: Optional SQL expression describing the ``WHERE``
condition of the ``UPDATE`` statement. Modern applications
may prefer to use the generative :meth:`~Update.where()`
method to specify the ``WHERE`` clause.
The WHERE clause can refer to multiple tables.
For databases which support this, an ``UPDATE FROM`` clause will
be generated, or on MySQL, a multi-table update. The statement
will fail on databases that don't have support for multi-table
update statements. A SQL-standard method of referring to
additional tables in the WHERE clause is to use a correlated
subquery::
users.update().values(name='ed').where(
users.c.name==select([addresses.c.email_address]).\
where(addresses.c.user_id==users.c.id).\
as_scalar()
)
:param values:
Optional dictionary which specifies the ``SET`` conditions of the
``UPDATE``. If left as ``None``, the ``SET``
conditions are determined from those parameters passed to the
statement during the execution and/or compilation of the
statement. When compiled standalone without any parameters,
the ``SET`` clause generates for all columns.
Modern applications may prefer to use the generative
:meth:`_expression.Update.values` method to set the values of the
UPDATE statement.
:param inline:
if True, SQL defaults present on :class:`_schema.Column` objects via
the ``default`` keyword will be compiled 'inline' into the statement
and not pre-executed. This means that their values will not
be available in the dictionary returned from
:meth:`_engine.ResultProxy.last_updated_params`.
:param preserve_parameter_order: if True, the update statement is
expected to receive parameters **only** via the
:meth:`_expression.Update.values` method,
and they must be passed as a Python
``list`` of 2-tuples. The rendered UPDATE statement will emit the SET
clause for each referenced column maintaining this order.
.. versionadded:: 1.0.10
.. seealso::
:ref:`updates_order_parameters` - full example of the
:paramref:`_expression.update.preserve_parameter_order` flag
If both ``values`` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within ``values`` on a per-key basis.
The keys within ``values`` can be either :class:`_schema.Column`
objects or their string identifiers (specifically the "key" of the
:class:`_schema.Column`, normally but not necessarily equivalent to
its "name"). Normally, the
:class:`_schema.Column` objects used here are expected to be
part of the target :class:`_schema.Table` that is the table
to be updated. However when using MySQL, a multiple-table
UPDATE statement can refer to columns from any of
the tables referred to in the WHERE clause.
The values referred to in ``values`` are typically:
* a literal data value (i.e. string, number, etc.)
* a SQL expression, such as a related :class:`_schema.Column`,
a scalar-returning :func:`_expression.select` construct,
etc.
When combining :func:`_expression.select` constructs within the values
clause of an :func:`_expression.update` construct,
the subquery represented by the :func:`_expression.select` should be
*correlated* to the parent table, that is, providing criterion
which links the table inside the subquery to the outer table
being updated::
users.update().values(
name=select([addresses.c.email_address]).\
where(addresses.c.user_id==users.c.id).\
as_scalar()
)
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
"""
self._preserve_parameter_order = preserve_parameter_order
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self.inline = inline
self._validate_dialect_kwargs(dialect_kw)
self._return_defaults = return_defaults
def get_children(self, **kwargs):
if self._whereclause is not None:
return (self._whereclause,)
else:
return ()
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
self.parameters = self.parameters.copy()
@_generative
def where(self, whereclause):
"""return a new update() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
if self._whereclause is not None:
self._whereclause = and_(
self._whereclause, _literal_as_text(whereclause)
)
else:
self._whereclause = _literal_as_text(whereclause)
@property
def _extra_froms(self):
froms = []
seen = {self.table}
if self._whereclause is not None:
for item in _from_objects(self._whereclause):
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
return froms
class Delete(UpdateBase):
"""Represent a DELETE construct.
The :class:`_expression.Delete`
object is created using the :func:`delete()`
function.
"""
__visit_name__ = "delete"
def __init__(
self,
table,
whereclause=None,
bind=None,
returning=None,
prefixes=None,
**dialect_kw
):
r"""Construct :class:`_expression.Delete` object.
Similar functionality is available via the
:meth:`_expression.TableClause.delete` method on
:class:`_schema.Table`.
:param table: The table to delete rows from.
:param whereclause: A :class:`_expression.ClauseElement`
describing the ``WHERE``
condition of the ``DELETE`` statement. Note that the
:meth:`~Delete.where()` generative method may be used instead.
The WHERE clause can refer to multiple tables.
For databases which support this, a ``DELETE..USING`` or similar
clause will be generated. The statement
will fail on databases that don't have support for multi-table
delete statements. A SQL-standard method of referring to
additional tables in the WHERE clause is to use a correlated
subquery::
users.delete().where(
users.c.name==select([addresses.c.email_address]).\
where(addresses.c.user_id==users.c.id).\
as_scalar()
)
.. versionchanged:: 1.2.0
The WHERE clause of DELETE can refer to multiple tables.
.. seealso::
:ref:`deletes` - SQL Expression Tutorial
"""
self._bind = bind
self.table = _interpret_as_from(table)
self._returning = returning
if prefixes:
self._setup_prefixes(prefixes)
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self._validate_dialect_kwargs(dialect_kw)
def get_children(self, **kwargs):
if self._whereclause is not None:
return (self._whereclause,)
else:
return ()
@_generative
def where(self, whereclause):
"""Add the given WHERE clause to a newly returned delete construct."""
if self._whereclause is not None:
self._whereclause = and_(
self._whereclause, _literal_as_text(whereclause)
)
else:
self._whereclause = _literal_as_text(whereclause)
@property
def _extra_froms(self):
froms = []
seen = {self.table}
if self._whereclause is not None:
for item in _from_objects(self._whereclause):
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
return froms
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/expression.py
|
# sql/expression.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines the public namespace for SQL expression constructs.
Prior to version 0.9, this module contained all of "elements", "dml",
"default_comparator" and "selectable". The module was broken up
and most "factory" functions were moved to be grouped with their associated
class.
"""
__all__ = [
"Alias",
"any_",
"all_",
"ClauseElement",
"ColumnCollection",
"ColumnElement",
"CompoundSelect",
"Delete",
"FromClause",
"Insert",
"Join",
"Lateral",
"Select",
"Selectable",
"TableClause",
"Update",
"alias",
"and_",
"asc",
"between",
"bindparam",
"case",
"cast",
"column",
"cte",
"delete",
"desc",
"distinct",
"except_",
"except_all",
"exists",
"extract",
"func",
"modifier",
"collate",
"insert",
"intersect",
"intersect_all",
"join",
"label",
"lateral",
"literal",
"literal_column",
"not_",
"null",
"nullsfirst",
"nullslast",
"or_",
"outparam",
"outerjoin",
"over",
"select",
"subquery",
"table",
"text",
"tuple_",
"type_coerce",
"quoted_name",
"union",
"union_all",
"update",
"within_group",
"TableSample",
"tablesample",
]
from .base import _from_objects # noqa
from .base import ColumnCollection # noqa
from .base import Executable # noqa
from .base import Generative # noqa
from .base import PARSE_AUTOCOMMIT # noqa
from .dml import Delete # noqa
from .dml import Insert # noqa
from .dml import Update # noqa
from .dml import UpdateBase # noqa
from .dml import ValuesBase # noqa
from .elements import _clause_element_as_expr # noqa
from .elements import _clone # noqa
from .elements import _cloned_difference # noqa
from .elements import _cloned_intersection # noqa
from .elements import _column_as_key # noqa
from .elements import _corresponding_column_or_error # noqa
from .elements import _expression_literal_as_text # noqa
from .elements import _is_column # noqa
from .elements import _labeled # noqa
from .elements import _literal_as_binds # noqa
from .elements import _literal_as_column # noqa
from .elements import _literal_as_label_reference # noqa
from .elements import _literal_as_text # noqa
from .elements import _only_column_elements # noqa
from .elements import _select_iterables # noqa
from .elements import _string_or_unprintable # noqa
from .elements import _truncated_label # noqa
from .elements import between # noqa
from .elements import BinaryExpression # noqa
from .elements import BindParameter # noqa
from .elements import BooleanClauseList # noqa
from .elements import Case # noqa
from .elements import Cast # noqa
from .elements import ClauseElement # noqa
from .elements import ClauseList # noqa
from .elements import collate # noqa
from .elements import CollectionAggregate # noqa
from .elements import ColumnClause # noqa
from .elements import ColumnElement # noqa
from .elements import Extract # noqa
from .elements import False_ # noqa
from .elements import FunctionFilter # noqa
from .elements import Grouping # noqa
from .elements import Label # noqa
from .elements import literal # noqa
from .elements import literal_column # noqa
from .elements import not_ # noqa
from .elements import Null # noqa
from .elements import outparam # noqa
from .elements import Over # noqa
from .elements import quoted_name # noqa
from .elements import ReleaseSavepointClause # noqa
from .elements import RollbackToSavepointClause # noqa
from .elements import SavepointClause # noqa
from .elements import TextClause # noqa
from .elements import True_ # noqa
from .elements import Tuple # noqa
from .elements import TypeClause # noqa
from .elements import TypeCoerce # noqa
from .elements import UnaryExpression # noqa
from .elements import WithinGroup # noqa
from .functions import func # noqa
from .functions import Function # noqa
from .functions import FunctionElement # noqa
from .functions import modifier # noqa
from .selectable import _interpret_as_from # noqa
from .selectable import Alias # noqa
from .selectable import CompoundSelect # noqa
from .selectable import CTE # noqa
from .selectable import Exists # noqa
from .selectable import FromClause # noqa
from .selectable import FromGrouping # noqa
from .selectable import GenerativeSelect # noqa
from .selectable import HasCTE # noqa
from .selectable import HasPrefixes # noqa
from .selectable import HasSuffixes # noqa
from .selectable import Join # noqa
from .selectable import Lateral # noqa
from .selectable import ScalarSelect # noqa
from .selectable import Select # noqa
from .selectable import Selectable # noqa
from .selectable import SelectBase # noqa
from .selectable import subquery # noqa
from .selectable import TableClause # noqa
from .selectable import TableSample # noqa
from .selectable import TextAsFrom # noqa
from .visitors import Visitable # noqa
from ..util.langhelpers import public_factory # noqa
# factory functions - these pull class-bound constructors and classmethods
# from SQL elements and selectables into public functions. This allows
# the functions to be available in the sqlalchemy.sql.* namespace and
# to be auto-cross-documenting from the function to the class itself.
all_ = public_factory(CollectionAggregate._create_all, ".sql.expression.all_")
any_ = public_factory(CollectionAggregate._create_any, ".sql.expression.any_")
and_ = public_factory(BooleanClauseList.and_, ".sql.expression.and_")
alias = public_factory(Alias._factory, ".sql.expression.alias")
tablesample = public_factory(
TableSample._factory, ".sql.expression.tablesample"
)
lateral = public_factory(Lateral._factory, ".sql.expression.lateral")
or_ = public_factory(BooleanClauseList.or_, ".sql.expression.or_")
bindparam = public_factory(BindParameter, ".sql.expression.bindparam")
select = public_factory(Select, ".sql.expression.select")
text = public_factory(TextClause._create_text, ".sql.expression.text")
table = public_factory(TableClause, ".sql.expression.table")
column = public_factory(ColumnClause, ".sql.expression.column")
over = public_factory(Over, ".sql.expression.over")
within_group = public_factory(WithinGroup, ".sql.expression.within_group")
label = public_factory(Label, ".sql.expression.label")
case = public_factory(Case, ".sql.expression.case")
cast = public_factory(Cast, ".sql.expression.cast")
cte = public_factory(CTE._factory, ".sql.expression.cte")
extract = public_factory(Extract, ".sql.expression.extract")
tuple_ = public_factory(Tuple, ".sql.expression.tuple_")
except_ = public_factory(
CompoundSelect._create_except, ".sql.expression.except_"
)
except_all = public_factory(
CompoundSelect._create_except_all, ".sql.expression.except_all"
)
intersect = public_factory(
CompoundSelect._create_intersect, ".sql.expression.intersect"
)
intersect_all = public_factory(
CompoundSelect._create_intersect_all, ".sql.expression.intersect_all"
)
union = public_factory(CompoundSelect._create_union, ".sql.expression.union")
union_all = public_factory(
CompoundSelect._create_union_all, ".sql.expression.union_all"
)
exists = public_factory(Exists, ".sql.expression.exists")
nullsfirst = public_factory(
UnaryExpression._create_nullsfirst, ".sql.expression.nullsfirst"
)
nullslast = public_factory(
UnaryExpression._create_nullslast, ".sql.expression.nullslast"
)
asc = public_factory(UnaryExpression._create_asc, ".sql.expression.asc")
desc = public_factory(UnaryExpression._create_desc, ".sql.expression.desc")
distinct = public_factory(
UnaryExpression._create_distinct, ".sql.expression.distinct"
)
type_coerce = public_factory(TypeCoerce, ".sql.expression.type_coerce")
true = public_factory(True_._instance, ".sql.expression.true")
false = public_factory(False_._instance, ".sql.expression.false")
null = public_factory(Null._instance, ".sql.expression.null")
join = public_factory(Join._create_join, ".sql.expression.join")
outerjoin = public_factory(Join._create_outerjoin, ".sql.expression.outerjoin")
insert = public_factory(Insert, ".sql.expression.insert")
update = public_factory(Update, ".sql.expression.update")
delete = public_factory(Delete, ".sql.expression.delete")
funcfilter = public_factory(FunctionFilter, ".sql.expression.funcfilter")
# internal functions still being called from tests and the ORM,
# these might be better off in some other namespace
# old names for compatibility
_Executable = Executable
_BindParamClause = BindParameter
_Label = Label
_SelectBase = SelectBase
_BinaryExpression = BinaryExpression
_Cast = Cast
_Null = Null
_False = False_
_True = True_
_TextClause = TextClause
_UnaryExpression = UnaryExpression
_Case = Case
_Tuple = Tuple
_Over = Over
_Generative = Generative
_TypeClause = TypeClause
_Extract = Extract
_Exists = Exists
_Grouping = Grouping
_FromGrouping = FromGrouping
_ScalarSelect = ScalarSelect
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/base.py
|
# sql/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Foundational utilities common to many sql modules.
"""
import itertools
import re
from .visitors import ClauseVisitor
from .. import exc
from .. import util
PARSE_AUTOCOMMIT = util.symbol("PARSE_AUTOCOMMIT")
NO_ARG = util.symbol("NO_ARG")
class Immutable(object):
"""mark a ClauseElement as 'immutable' when expressions are cloned."""
def unique_params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def _clone(self):
return self
def _from_objects(*elements):
return itertools.chain(*[element._from_objects for element in elements])
@util.decorator
def _generative(fn, *args, **kw):
"""Mark a method as generative."""
self = args[0]._generate()
fn(self, *args[1:], **kw)
return self
class _DialectArgView(util.collections_abc.MutableMapping):
"""A dictionary view of dialect-level arguments in the form
<dialectname>_<argument_name>.
"""
def __init__(self, obj):
self.obj = obj
def _key(self, key):
try:
dialect, value_key = key.split("_", 1)
except ValueError as err:
util.raise_(KeyError(key), replace_context=err)
else:
return dialect, value_key
def __getitem__(self, key):
dialect, value_key = self._key(key)
try:
opt = self.obj.dialect_options[dialect]
except exc.NoSuchModuleError as err:
util.raise_(KeyError(key), replace_context=err)
else:
return opt[value_key]
def __setitem__(self, key, value):
try:
dialect, value_key = self._key(key)
except KeyError as err:
util.raise_(
exc.ArgumentError(
"Keys must be of the form <dialectname>_<argname>"
),
replace_context=err,
)
else:
self.obj.dialect_options[dialect][value_key] = value
def __delitem__(self, key):
dialect, value_key = self._key(key)
del self.obj.dialect_options[dialect][value_key]
def __len__(self):
return sum(
len(args._non_defaults)
for args in self.obj.dialect_options.values()
)
def __iter__(self):
return (
util.safe_kwarg("%s_%s" % (dialect_name, value_name))
for dialect_name in self.obj.dialect_options
for value_name in self.obj.dialect_options[
dialect_name
]._non_defaults
)
class _DialectArgDict(util.collections_abc.MutableMapping):
"""A dictionary view of dialect-level arguments for a specific
dialect.
Maintains a separate collection of user-specified arguments
and dialect-specified default arguments.
"""
def __init__(self):
self._non_defaults = {}
self._defaults = {}
def __len__(self):
return len(set(self._non_defaults).union(self._defaults))
def __iter__(self):
return iter(set(self._non_defaults).union(self._defaults))
def __getitem__(self, key):
if key in self._non_defaults:
return self._non_defaults[key]
else:
return self._defaults[key]
def __setitem__(self, key, value):
self._non_defaults[key] = value
def __delitem__(self, key):
del self._non_defaults[key]
class DialectKWArgs(object):
"""Establish the ability for a class to have dialect-specific arguments
with defaults and constructor validation.
The :class:`.DialectKWArgs` interacts with the
:attr:`.DefaultDialect.construct_arguments` present on a dialect.
.. seealso::
:attr:`.DefaultDialect.construct_arguments`
"""
@classmethod
def argument_for(cls, dialect_name, argument_name, default):
"""Add a new kind of dialect-specific keyword argument for this class.
E.g.::
Index.argument_for("mydialect", "length", None)
some_index = Index('a', 'b', mydialect_length=5)
The :meth:`.DialectKWArgs.argument_for` method is a per-argument
way adding extra arguments to the
:attr:`.DefaultDialect.construct_arguments` dictionary. This
dictionary provides a list of argument names accepted by various
schema-level constructs on behalf of a dialect.
New dialects should typically specify this dictionary all at once as a
data member of the dialect class. The use case for ad-hoc addition of
argument names is typically for end-user code that is also using
a custom compilation scheme which consumes the additional arguments.
:param dialect_name: name of a dialect. The dialect must be
locatable, else a :class:`.NoSuchModuleError` is raised. The
dialect must also include an existing
:attr:`.DefaultDialect.construct_arguments` collection, indicating
that it participates in the keyword-argument validation and default
system, else :class:`.ArgumentError` is raised. If the dialect does
not include this collection, then any keyword argument can be
specified on behalf of this dialect already. All dialects packaged
within SQLAlchemy include this collection, however for third party
dialects, support may vary.
:param argument_name: name of the parameter.
:param default: default value of the parameter.
.. versionadded:: 0.9.4
"""
construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name]
if construct_arg_dictionary is None:
raise exc.ArgumentError(
"Dialect '%s' does have keyword-argument "
"validation and defaults enabled configured" % dialect_name
)
if cls not in construct_arg_dictionary:
construct_arg_dictionary[cls] = {}
construct_arg_dictionary[cls][argument_name] = default
@util.memoized_property
def dialect_kwargs(self):
"""A collection of keyword arguments specified as dialect-specific
options to this construct.
The arguments are present here in their original ``<dialect>_<kwarg>``
format. Only arguments that were actually passed are included;
unlike the :attr:`.DialectKWArgs.dialect_options` collection, which
contains all options known by this dialect including defaults.
The collection is also writable; keys are accepted of the
form ``<dialect>_<kwarg>`` where the value will be assembled
into the list of options.
.. versionadded:: 0.9.2
.. versionchanged:: 0.9.4 The :attr:`.DialectKWArgs.dialect_kwargs`
collection is now writable.
.. seealso::
:attr:`.DialectKWArgs.dialect_options` - nested dictionary form
"""
return _DialectArgView(self)
@property
def kwargs(self):
"""A synonym for :attr:`.DialectKWArgs.dialect_kwargs`."""
return self.dialect_kwargs
@util.dependencies("sqlalchemy.dialects")
def _kw_reg_for_dialect(dialects, dialect_name):
dialect_cls = dialects.registry.load(dialect_name)
if dialect_cls.construct_arguments is None:
return None
return dict(dialect_cls.construct_arguments)
_kw_registry = util.PopulateDict(_kw_reg_for_dialect)
def _kw_reg_for_dialect_cls(self, dialect_name):
construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name]
d = _DialectArgDict()
if construct_arg_dictionary is None:
d._defaults.update({"*": None})
else:
for cls in reversed(self.__class__.__mro__):
if cls in construct_arg_dictionary:
d._defaults.update(construct_arg_dictionary[cls])
return d
@util.memoized_property
def dialect_options(self):
"""A collection of keyword arguments specified as dialect-specific
options to this construct.
This is a two-level nested registry, keyed to ``<dialect_name>``
and ``<argument_name>``. For example, the ``postgresql_where``
argument would be locatable as::
arg = my_object.dialect_options['postgresql']['where']
.. versionadded:: 0.9.2
.. seealso::
:attr:`.DialectKWArgs.dialect_kwargs` - flat dictionary form
"""
return util.PopulateDict(
util.portable_instancemethod(self._kw_reg_for_dialect_cls)
)
def _validate_dialect_kwargs(self, kwargs):
# validate remaining kwargs that they all specify DB prefixes
if not kwargs:
return
for k in kwargs:
m = re.match("^(.+?)_(.+)$", k)
if not m:
raise TypeError(
"Additional arguments should be "
"named <dialectname>_<argument>, got '%s'" % k
)
dialect_name, arg_name = m.group(1, 2)
try:
construct_arg_dictionary = self.dialect_options[dialect_name]
except exc.NoSuchModuleError:
util.warn(
"Can't validate argument %r; can't "
"locate any SQLAlchemy dialect named %r"
% (k, dialect_name)
)
self.dialect_options[dialect_name] = d = _DialectArgDict()
d._defaults.update({"*": None})
d._non_defaults[arg_name] = kwargs[k]
else:
if (
"*" not in construct_arg_dictionary
and arg_name not in construct_arg_dictionary
):
raise exc.ArgumentError(
"Argument %r is not accepted by "
"dialect %r on behalf of %r"
% (k, dialect_name, self.__class__)
)
else:
construct_arg_dictionary[arg_name] = kwargs[k]
class Generative(object):
"""Allow a ClauseElement to generate itself via the
@_generative decorator.
"""
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class Executable(Generative):
"""Mark a ClauseElement as supporting execution.
:class:`.Executable` is a superclass for all "statement" types
of objects, including :func:`select`, :func:`delete`, :func:`update`,
:func:`insert`, :func:`text`.
"""
supports_execution = True
_execution_options = util.immutabledict()
_bind = None
@_generative
def execution_options(self, **kw):
""" Set non-SQL options for the statement which take effect during
execution.
Execution options can be set on a per-statement or
per :class:`_engine.Connection` basis. Additionally, the
:class:`_engine.Engine` and ORM :class:`~.orm.query.Query`
objects provide
access to execution options which they in turn configure upon
connections.
The :meth:`execution_options` method is generative. A new
instance of this statement is returned that contains the options::
statement = select([table.c.x, table.c.y])
statement = statement.execution_options(autocommit=True)
Note that only a subset of possible execution options can be applied
to a statement - these include "autocommit" and "stream_results",
but not "isolation_level" or "compiled_cache".
See :meth:`_engine.Connection.execution_options` for a full list of
possible options.
.. seealso::
:meth:`_engine.Connection.execution_options`
:meth:`_query.Query.execution_options`
:meth:`.Executable.get_execution_options`
"""
if "isolation_level" in kw:
raise exc.ArgumentError(
"'isolation_level' execution option may only be specified "
"on Connection.execution_options(), or "
"per-engine using the isolation_level "
"argument to create_engine()."
)
if "compiled_cache" in kw:
raise exc.ArgumentError(
"'compiled_cache' execution option may only be specified "
"on Connection.execution_options(), not per statement."
)
self._execution_options = self._execution_options.union(kw)
def get_execution_options(self):
""" Get the non-SQL options which will take effect during execution.
.. versionadded:: 1.3
.. seealso::
:meth:`.Executable.execution_options`
"""
return self._execution_options
def execute(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`.
"""
e = self.bind
if e is None:
label = getattr(self, "description", self.__class__.__name__)
msg = (
"This %s is not directly bound to a Connection or Engine. "
"Use the .execute() method of a Connection or Engine "
"to execute this construct." % label
)
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`, returning the
result's scalar representation.
"""
return self.execute(*multiparams, **params).scalar()
@property
def bind(self):
"""Returns the :class:`_engine.Engine` or :class:`_engine.Connection`
to
which this :class:`.Executable` is bound, or None if none found.
This is a traversal which checks locally, then
checks among the "from" clauses of associated objects
until a bound engine or connection is found.
"""
if self._bind is not None:
return self._bind
for f in _from_objects(self):
if f is self:
continue
engine = f.bind
if engine is not None:
return engine
else:
return None
class SchemaEventTarget(object):
"""Base class for elements that are the targets of :class:`.DDLEvents`
events.
This includes :class:`.SchemaItem` as well as :class:`.SchemaType`.
"""
def _set_parent(self, parent):
"""Associate with this SchemaEvent's parent object."""
def _set_parent_with_dispatch(self, parent):
self.dispatch.before_parent_attach(self, parent)
self._set_parent(parent)
self.dispatch.after_parent_attach(self, parent)
class SchemaVisitor(ClauseVisitor):
"""Define the visiting for ``SchemaItem`` objects."""
__traverse_options__ = {"schema_visitor": True}
class ColumnCollection(util.OrderedProperties):
"""An ordered dictionary that stores a list of ColumnElement
instances.
Overrides the ``__eq__()`` method to produce SQL clauses between
sets of correlated columns.
"""
__slots__ = "_all_columns"
def __init__(self, *columns):
super(ColumnCollection, self).__init__()
object.__setattr__(self, "_all_columns", [])
for c in columns:
self.add(c)
def __str__(self):
return repr([str(c) for c in self])
def replace(self, column):
"""add the given column to this collection, removing unaliased
versions of this column as well as existing columns with the
same key.
e.g.::
t = Table('sometable', metadata, Column('col1', Integer))
t.columns.replace(Column('col1', Integer, key='columnone'))
will remove the original 'col1' from the collection, and add
the new column under the name 'columnname'.
Used by schema.Column to override columns during table reflection.
"""
remove_col = None
if column.name in self and column.key != column.name:
other = self[column.name]
if other.name == other.key:
remove_col = other
del self._data[other.key]
if column.key in self._data:
remove_col = self._data[column.key]
self._data[column.key] = column
if remove_col is not None:
self._all_columns[:] = [
column if c is remove_col else c for c in self._all_columns
]
else:
self._all_columns.append(column)
def add(self, column):
"""Add a column to this collection.
The key attribute of the column will be used as the hash key
for this dictionary.
"""
if not column.key:
raise exc.ArgumentError(
"Can't add unnamed column to column collection"
)
self[column.key] = column
def __delitem__(self, key):
raise NotImplementedError()
def __setattr__(self, key, obj):
raise NotImplementedError()
def __setitem__(self, key, value):
if key in self:
# this warning is primarily to catch select() statements
# which have conflicting column names in their exported
# columns collection
existing = self[key]
if existing is value:
return
if not existing.shares_lineage(value):
util.warn(
"Column %r on table %r being replaced by "
"%r, which has the same key. Consider "
"use_labels for select() statements."
% (key, getattr(existing, "table", None), value)
)
# pop out memoized proxy_set as this
# operation may very well be occurring
# in a _make_proxy operation
util.memoized_property.reset(value, "proxy_set")
self._all_columns.append(value)
self._data[key] = value
def clear(self):
raise NotImplementedError()
def remove(self, column):
del self._data[column.key]
self._all_columns[:] = [
c for c in self._all_columns if c is not column
]
def update(self, iter_):
cols = list(iter_)
all_col_set = set(self._all_columns)
self._all_columns.extend(
c for label, c in cols if c not in all_col_set
)
self._data.update((label, c) for label, c in cols)
def extend(self, iter_):
cols = list(iter_)
all_col_set = set(self._all_columns)
self._all_columns.extend(c for c in cols if c not in all_col_set)
self._data.update((c.key, c) for c in cols)
__hash__ = None
@util.dependencies("sqlalchemy.sql.elements")
def __eq__(self, elements, other):
l = []
for c in getattr(other, "_all_columns", other):
for local in self._all_columns:
if c.shares_lineage(local):
l.append(c == local)
return elements.and_(*l)
def __contains__(self, other):
if not isinstance(other, util.string_types):
raise exc.ArgumentError("__contains__ requires a string argument")
return util.OrderedProperties.__contains__(self, other)
def __getstate__(self):
return {"_data": self._data, "_all_columns": self._all_columns}
def __setstate__(self, state):
object.__setattr__(self, "_data", state["_data"])
object.__setattr__(self, "_all_columns", state["_all_columns"])
def contains_column(self, col):
return col in set(self._all_columns)
def as_immutable(self):
return ImmutableColumnCollection(self._data, self._all_columns)
class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection):
def __init__(self, data, all_columns):
util.ImmutableProperties.__init__(self, data)
object.__setattr__(self, "_all_columns", all_columns)
extend = remove = util.ImmutableProperties._immutable
class ColumnSet(util.ordered_column_set):
def contains_column(self, col):
return col in self
def extend(self, cols):
for col in cols:
self.add(col)
def __add__(self, other):
return list(self) + list(other)
@util.dependencies("sqlalchemy.sql.elements")
def __eq__(self, elements, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c == local)
return elements.and_(*l)
def __hash__(self):
return hash(tuple(x for x in self))
def _bind_or_error(schemaitem, msg=None):
bind = schemaitem.bind
if not bind:
name = schemaitem.__class__.__name__
label = getattr(
schemaitem, "fullname", getattr(schemaitem, "name", None)
)
if label:
item = "%s object %r" % (name, label)
else:
item = "%s object" % name
if msg is None:
msg = (
"%s is not bound to an Engine or Connection. "
"Execution can not proceed without a database to execute "
"against." % item
)
raise exc.UnboundExecutionError(msg)
return bind
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/type_api.py
|
# sql/types_api.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base types API.
"""
from . import operators
from .base import SchemaEventTarget
from .visitors import Visitable
from .visitors import VisitableType
from .. import exc
from .. import util
# these are back-assigned by sqltypes.
BOOLEANTYPE = None
INTEGERTYPE = None
NULLTYPE = None
STRINGTYPE = None
MATCHTYPE = None
INDEXABLE = None
_resolve_value_to_type = None
class TypeEngine(Visitable):
"""The ultimate base class for all SQL datatypes.
Common subclasses of :class:`.TypeEngine` include
:class:`.String`, :class:`.Integer`, and :class:`.Boolean`.
For an overview of the SQLAlchemy typing system, see
:ref:`types_toplevel`.
.. seealso::
:ref:`types_toplevel`
"""
_sqla_type = True
_isnull = False
class Comparator(operators.ColumnOperators):
"""Base class for custom comparison operations defined at the
type level. See :attr:`.TypeEngine.comparator_factory`.
"""
__slots__ = "expr", "type"
default_comparator = None
def __init__(self, expr):
self.expr = expr
self.type = expr.type
@util.dependencies("sqlalchemy.sql.default_comparator")
def operate(self, default_comparator, op, *other, **kwargs):
o = default_comparator.operator_lookup[op.__name__]
return o[0](self.expr, op, *(other + o[1:]), **kwargs)
@util.dependencies("sqlalchemy.sql.default_comparator")
def reverse_operate(self, default_comparator, op, other, **kwargs):
o = default_comparator.operator_lookup[op.__name__]
return o[0](self.expr, op, other, reverse=True, *o[1:], **kwargs)
def _adapt_expression(self, op, other_comparator):
"""evaluate the return type of <self> <op> <othertype>,
and apply any adaptations to the given operator.
This method determines the type of a resulting binary expression
given two source types and an operator. For example, two
:class:`_schema.Column` objects, both of the type
:class:`.Integer`, will
produce a :class:`.BinaryExpression` that also has the type
:class:`.Integer` when compared via the addition (``+``) operator.
However, using the addition operator with an :class:`.Integer`
and a :class:`.Date` object will produce a :class:`.Date`, assuming
"days delta" behavior by the database (in reality, most databases
other than PostgreSQL don't accept this particular operation).
The method returns a tuple of the form <operator>, <type>.
The resulting operator and type will be those applied to the
resulting :class:`.BinaryExpression` as the final operator and the
right-hand side of the expression.
Note that only a subset of operators make usage of
:meth:`._adapt_expression`,
including math operators and user-defined operators, but not
boolean comparison or special SQL keywords like MATCH or BETWEEN.
"""
return op, self.type
def __reduce__(self):
return _reconstitute_comparator, (self.expr,)
hashable = True
"""Flag, if False, means values from this type aren't hashable.
Used by the ORM when uniquing result lists.
"""
comparator_factory = Comparator
"""A :class:`.TypeEngine.Comparator` class which will apply
to operations performed by owning :class:`_expression.ColumnElement`
objects.
The :attr:`.comparator_factory` attribute is a hook consulted by
the core expression system when column and SQL expression operations
are performed. When a :class:`.TypeEngine.Comparator` class is
associated with this attribute, it allows custom re-definition of
all existing operators, as well as definition of new operators.
Existing operators include those provided by Python operator overloading
such as :meth:`.operators.ColumnOperators.__add__` and
:meth:`.operators.ColumnOperators.__eq__`,
those provided as standard
attributes of :class:`.operators.ColumnOperators` such as
:meth:`.operators.ColumnOperators.like`
and :meth:`.operators.ColumnOperators.in_`.
Rudimentary usage of this hook is allowed through simple subclassing
of existing types, or alternatively by using :class:`.TypeDecorator`.
See the documentation section :ref:`types_operators` for examples.
"""
sort_key_function = None
"""A sorting function that can be passed as the key to sorted.
The default value of ``None`` indicates that the values stored by
this type are self-sorting.
.. versionadded:: 1.3.8
"""
should_evaluate_none = False
"""If True, the Python constant ``None`` is considered to be handled
explicitly by this type.
The ORM uses this flag to indicate that a positive value of ``None``
is passed to the column in an INSERT statement, rather than omitting
the column from the INSERT statement which has the effect of firing
off column-level defaults. It also allows types which have special
behavior for Python None, such as a JSON type, to indicate that
they'd like to handle the None value explicitly.
To set this flag on an existing type, use the
:meth:`.TypeEngine.evaluates_none` method.
.. seealso::
:meth:`.TypeEngine.evaluates_none`
.. versionadded:: 1.1
"""
def evaluates_none(self):
"""Return a copy of this type which has the :attr:`.should_evaluate_none`
flag set to True.
E.g.::
Table(
'some_table', metadata,
Column(
String(50).evaluates_none(),
nullable=True,
server_default='no value')
)
The ORM uses this flag to indicate that a positive value of ``None``
is passed to the column in an INSERT statement, rather than omitting
the column from the INSERT statement which has the effect of firing
off column-level defaults. It also allows for types which have
special behavior associated with the Python None value to indicate
that the value doesn't necessarily translate into SQL NULL; a
prime example of this is a JSON type which may wish to persist the
JSON value ``'null'``.
In all cases, the actual NULL SQL value can be always be
persisted in any column by using
the :obj:`_expression.null` SQL construct in an INSERT statement
or associated with an ORM-mapped attribute.
.. note::
The "evaluates none" flag does **not** apply to a value
of ``None`` passed to :paramref:`_schema.Column.default` or
:paramref:`_schema.Column.server_default`; in these cases,
``None``
still means "no default".
.. versionadded:: 1.1
.. seealso::
:ref:`session_forcing_null` - in the ORM documentation
:paramref:`.postgresql.JSON.none_as_null` - PostgreSQL JSON
interaction with this flag.
:attr:`.TypeEngine.should_evaluate_none` - class-level flag
"""
typ = self.copy()
typ.should_evaluate_none = True
return typ
def copy(self, **kw):
return self.adapt(self.__class__)
def compare_against_backend(self, dialect, conn_type):
"""Compare this type against the given backend type.
This function is currently not implemented for SQLAlchemy
types, and for all built in types will return ``None``. However,
it can be implemented by a user-defined type
where it can be consumed by schema comparison tools such as
Alembic autogenerate.
A future release of SQLAlchemy will potentially implement this method
for builtin types as well.
The function should return True if this type is equivalent to the
given type; the type is typically reflected from the database
so should be database specific. The dialect in use is also
passed. It can also return False to assert that the type is
not equivalent.
:param dialect: a :class:`.Dialect` that is involved in the comparison.
:param conn_type: the type object reflected from the backend.
.. versionadded:: 1.0.3
"""
return None
def copy_value(self, value):
return value
def literal_processor(self, dialect):
"""Return a conversion function for processing literal values that are
to be rendered directly without using binds.
This function is used when the compiler makes use of the
"literal_binds" flag, typically used in DDL generation as well
as in certain scenarios where backends don't accept bound parameters.
.. versionadded:: 0.9.0
"""
return None
def bind_processor(self, dialect):
"""Return a conversion function for processing bind values.
Returns a callable which will receive a bind parameter value
as the sole positional argument and will return a value to
send to the DB-API.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
"""
return None
def result_processor(self, dialect, coltype):
"""Return a conversion function for processing result row values.
Returns a callable which will receive a result row column
value as the sole positional argument and will return a value
to return to the user.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
:param coltype: DBAPI coltype argument received in cursor.description.
"""
return None
def column_expression(self, colexpr):
"""Given a SELECT column expression, return a wrapping SQL expression.
This is typically a SQL function that wraps a column expression
as rendered in the columns clause of a SELECT statement.
It is used for special data types that require
columns to be wrapped in some special database function in order
to coerce the value before being sent back to the application.
It is the SQL analogue of the :meth:`.TypeEngine.result_processor`
method.
The method is evaluated at statement compile time, as opposed
to statement construction time.
.. seealso::
:ref:`types_sql_value_processing`
"""
return None
@util.memoized_property
def _has_column_expression(self):
"""memoized boolean, check if column_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return (
self.__class__.column_expression.__code__
is not TypeEngine.column_expression.__code__
)
def bind_expression(self, bindvalue):
""""Given a bind value (i.e. a :class:`.BindParameter` instance),
return a SQL expression in its place.
This is typically a SQL function that wraps the existing bound
parameter within the statement. It is used for special data types
that require literals being wrapped in some special database function
in order to coerce an application-level value into a database-specific
format. It is the SQL analogue of the
:meth:`.TypeEngine.bind_processor` method.
The method is evaluated at statement compile time, as opposed
to statement construction time.
Note that this method, when implemented, should always return
the exact same structure, without any conditional logic, as it
may be used in an executemany() call against an arbitrary number
of bound parameter sets.
.. seealso::
:ref:`types_sql_value_processing`
"""
return None
@util.memoized_property
def _has_bind_expression(self):
"""memoized boolean, check if bind_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return (
self.__class__.bind_expression.__code__
is not TypeEngine.bind_expression.__code__
)
@staticmethod
def _to_instance(cls_or_self):
return to_instance(cls_or_self)
def compare_values(self, x, y):
"""Compare two values for equality."""
return x == y
def get_dbapi_type(self, dbapi):
"""Return the corresponding type object from the underlying DB-API, if
any.
This can be useful for calling ``setinputsizes()``, for example.
"""
return None
@property
def python_type(self):
"""Return the Python type object expected to be returned
by instances of this type, if known.
Basically, for those types which enforce a return type,
or are known across the board to do such for all common
DBAPIs (like ``int`` for example), will return that type.
If a return type is not defined, raises
``NotImplementedError``.
Note that any type also accommodates NULL in SQL which
means you can also get back ``None`` from any type
in practice.
"""
raise NotImplementedError()
def with_variant(self, type_, dialect_name):
r"""Produce a new type object that will utilize the given
type when applied to the dialect of the given name.
e.g.::
from sqlalchemy.types import String
from sqlalchemy.dialects import mysql
s = String()
s = s.with_variant(mysql.VARCHAR(collation='foo'), 'mysql')
The construction of :meth:`.TypeEngine.with_variant` is always
from the "fallback" type to that which is dialect specific.
The returned type is an instance of :class:`.Variant`, which
itself provides a :meth:`.Variant.with_variant`
that can be called repeatedly.
:param type\_: a :class:`.TypeEngine` that will be selected
as a variant from the originating type, when a dialect
of the given name is in use.
:param dialect_name: base name of the dialect which uses
this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.)
"""
return Variant(self, {dialect_name: to_instance(type_)})
@util.memoized_property
def _type_affinity(self):
"""Return a rudimental 'affinity' value expressing the general class
of type."""
typ = None
for t in self.__class__.__mro__:
if t in (TypeEngine, UserDefinedType):
return typ
elif issubclass(t, (TypeEngine, UserDefinedType)):
typ = t
else:
return self.__class__
def dialect_impl(self, dialect):
"""Return a dialect-specific implementation for this
:class:`.TypeEngine`.
"""
try:
return dialect._type_memos[self]["impl"]
except KeyError:
return self._dialect_info(dialect)["impl"]
def _unwrapped_dialect_impl(self, dialect):
"""Return the 'unwrapped' dialect impl for this type.
For a type that applies wrapping logic (e.g. TypeDecorator), give
us the real, actual dialect-level type that is used.
This is used by TypeDecorator itself as well at least one case where
dialects need to check that a particular specific dialect-level
type is in use, within the :meth:`.DefaultDialect.set_input_sizes`
method.
"""
return self.dialect_impl(dialect)
def _cached_literal_processor(self, dialect):
"""Return a dialect-specific literal processor for this type."""
try:
return dialect._type_memos[self]["literal"]
except KeyError:
pass
# avoid KeyError context coming into literal_processor() function
# raises
d = self._dialect_info(dialect)
d["literal"] = lp = d["impl"].literal_processor(dialect)
return lp
def _cached_bind_processor(self, dialect):
"""Return a dialect-specific bind processor for this type."""
try:
return dialect._type_memos[self]["bind"]
except KeyError:
pass
# avoid KeyError context coming into bind_processor() function
# raises
d = self._dialect_info(dialect)
d["bind"] = bp = d["impl"].bind_processor(dialect)
return bp
def _cached_result_processor(self, dialect, coltype):
"""Return a dialect-specific result processor for this type."""
try:
return dialect._type_memos[self][coltype]
except KeyError:
pass
# avoid KeyError context coming into result_processor() function
# raises
d = self._dialect_info(dialect)
# key assumption: DBAPI type codes are
# constants. Else this dictionary would
# grow unbounded.
d[coltype] = rp = d["impl"].result_processor(dialect, coltype)
return rp
def _cached_custom_processor(self, dialect, key, fn):
try:
return dialect._type_memos[self][key]
except KeyError:
pass
# avoid KeyError context coming into fn() function
# raises
d = self._dialect_info(dialect)
impl = d["impl"]
d[key] = result = fn(impl)
return result
def _dialect_info(self, dialect):
"""Return a dialect-specific registry which
caches a dialect-specific implementation, bind processing
function, and one or more result processing functions."""
if self in dialect._type_memos:
return dialect._type_memos[self]
else:
impl = self._gen_dialect_impl(dialect)
if impl is self:
impl = self.adapt(type(self))
# this can't be self, else we create a cycle
assert impl is not self
dialect._type_memos[self] = d = {"impl": impl}
return d
def _gen_dialect_impl(self, dialect):
return dialect.type_descriptor(self)
def adapt(self, cls, **kw):
"""Produce an "adapted" form of this type, given an "impl" class
to work with.
This method is used internally to associate generic
types with "implementation" types that are specific to a particular
dialect.
"""
return util.constructor_copy(self, cls, **kw)
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
Given an operator and value, gives the type a chance
to return a type which the value should be coerced into.
The default behavior here is conservative; if the right-hand
side is already coerced into a SQL type based on its
Python type, it is usually left alone.
End-user functionality extension here should generally be via
:class:`.TypeDecorator`, which provides more liberal behavior in that
it defaults to coercing the other side of the expression into this
type, thus applying special Python conversions above and beyond those
needed by the DBAPI to both ides. It also provides the public method
:meth:`.TypeDecorator.coerce_compared_value` which is intended for
end-user customization of this behavior.
"""
_coerced_type = _resolve_value_to_type(value)
if (
_coerced_type is NULLTYPE
or _coerced_type._type_affinity is self._type_affinity
):
return self
else:
return _coerced_type
def _compare_type_affinity(self, other):
return self._type_affinity is other._type_affinity
def compile(self, dialect=None):
"""Produce a string-compiled form of this :class:`.TypeEngine`.
When called with no arguments, uses a "default" dialect
to produce a string result.
:param dialect: a :class:`.Dialect` instance.
"""
# arg, return value is inconsistent with
# ClauseElement.compile()....this is a mistake.
if not dialect:
dialect = self._default_dialect()
return dialect.type_compiler.process(self)
@util.dependencies("sqlalchemy.engine.default")
def _default_dialect(self, default):
if self.__class__.__module__.startswith("sqlalchemy.dialects"):
tokens = self.__class__.__module__.split(".")[0:3]
mod = ".".join(tokens)
return getattr(__import__(mod).dialects, tokens[-1]).dialect()
else:
return default.DefaultDialect()
def __str__(self):
if util.py2k:
return unicode(self.compile()).encode( # noqa
"ascii", "backslashreplace"
) # noqa
else:
return str(self.compile())
def __repr__(self):
return util.generic_repr(self)
class VisitableCheckKWArg(util.EnsureKWArgType, VisitableType):
pass
class UserDefinedType(util.with_metaclass(VisitableCheckKWArg, TypeEngine)):
"""Base for user defined types.
This should be the base of new types. Note that
for most cases, :class:`.TypeDecorator` is probably
more appropriate::
import sqlalchemy.types as types
class MyType(types.UserDefinedType):
def __init__(self, precision = 8):
self.precision = precision
def get_col_spec(self, **kw):
return "MYTYPE(%s)" % self.precision
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
Once the type is made, it's immediately usable::
table = Table('foo', meta,
Column('id', Integer, primary_key=True),
Column('data', MyType(16))
)
The ``get_col_spec()`` method will in most cases receive a keyword
argument ``type_expression`` which refers to the owning expression
of the type as being compiled, such as a :class:`_schema.Column` or
:func:`.cast` construct. This keyword is only sent if the method
accepts keyword arguments (e.g. ``**kw``) in its argument signature;
introspection is used to check for this in order to support legacy
forms of this function.
.. versionadded:: 1.0.0 the owning expression is passed to
the ``get_col_spec()`` method via the keyword argument
``type_expression``, if it receives ``**kw`` in its signature.
"""
__visit_name__ = "user_defined"
ensure_kwarg = "get_col_spec"
class Comparator(TypeEngine.Comparator):
__slots__ = ()
def _adapt_expression(self, op, other_comparator):
if hasattr(self.type, "adapt_operator"):
util.warn_deprecated(
"UserDefinedType.adapt_operator is deprecated. Create "
"a UserDefinedType.Comparator subclass instead which "
"generates the desired expression constructs, given a "
"particular operator."
)
return self.type.adapt_operator(op), self.type
else:
return super(
UserDefinedType.Comparator, self
)._adapt_expression(op, other_comparator)
comparator_factory = Comparator
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
Default behavior for :class:`.UserDefinedType` is the
same as that of :class:`.TypeDecorator`; by default it returns
``self``, assuming the compared value should be coerced into
the same type as this one. See
:meth:`.TypeDecorator.coerce_compared_value` for more detail.
"""
return self
class Emulated(object):
"""Mixin for base types that emulate the behavior of a DB-native type.
An :class:`.Emulated` type will use an available database type
in conjunction with Python-side routines and/or database constraints
in order to approximate the behavior of a database type that is provided
natively by some backends. When a native-providing backend is in
use, the native version of the type is used. This native version
should include the :class:`.NativeForEmulated` mixin to allow it to be
distinguished from :class:`.Emulated`.
Current examples of :class:`.Emulated` are: :class:`.Interval`,
:class:`.Enum`, :class:`.Boolean`.
.. versionadded:: 1.2.0b3
"""
def adapt_to_emulated(self, impltype, **kw):
"""Given an impl class, adapt this type to the impl assuming "emulated".
The impl should also be an "emulated" version of this type,
most likely the same class as this type itself.
e.g.: sqltypes.Enum adapts to the Enum class.
"""
return super(Emulated, self).adapt(impltype, **kw)
def adapt(self, impltype, **kw):
if hasattr(impltype, "adapt_emulated_to_native"):
if self.native:
# native support requested, dialect gave us a native
# implementor, pass control over to it
return impltype.adapt_emulated_to_native(self, **kw)
else:
# impltype adapts to native, and we are not native,
# so reject the impltype in favor of "us"
impltype = self.__class__
if issubclass(impltype, self.__class__):
return self.adapt_to_emulated(impltype, **kw)
else:
return super(Emulated, self).adapt(impltype, **kw)
class NativeForEmulated(object):
"""Indicates DB-native types supported by an :class:`.Emulated` type.
.. versionadded:: 1.2.0b3
"""
@classmethod
def adapt_emulated_to_native(cls, impl, **kw):
"""Given an impl, adapt this type's class to the impl assuming "native".
The impl will be an :class:`.Emulated` class but not a
:class:`.NativeForEmulated`.
e.g.: postgresql.ENUM produces a type given an Enum instance.
"""
return cls(**kw)
class TypeDecorator(SchemaEventTarget, TypeEngine):
"""Allows the creation of types which add additional functionality
to an existing type.
This method is preferred to direct subclassing of SQLAlchemy's
built-in types as it ensures that all required functionality of
the underlying type is kept in place.
Typical usage::
import sqlalchemy.types as types
class MyType(types.TypeDecorator):
'''Prefixes Unicode values with "PREFIX:" on the way in and
strips it off on the way out.
'''
impl = types.Unicode
def process_bind_param(self, value, dialect):
return "PREFIX:" + value
def process_result_value(self, value, dialect):
return value[7:]
def copy(self, **kw):
return MyType(self.impl.length)
The class-level "impl" attribute is required, and can reference any
TypeEngine class. Alternatively, the load_dialect_impl() method
can be used to provide different type classes based on the dialect
given; in this case, the "impl" variable can reference
``TypeEngine`` as a placeholder.
Types that receive a Python type that isn't similar to the ultimate type
used may want to define the :meth:`TypeDecorator.coerce_compared_value`
method. This is used to give the expression system a hint when coercing
Python objects into bind parameters within expressions. Consider this
expression::
mytable.c.somecol + datetime.date(2009, 5, 15)
Above, if "somecol" is an ``Integer`` variant, it makes sense that
we're doing date arithmetic, where above is usually interpreted
by databases as adding a number of days to the given date.
The expression system does the right thing by not attempting to
coerce the "date()" value into an integer-oriented bind parameter.
However, in the case of ``TypeDecorator``, we are usually changing an
incoming Python type to something new - ``TypeDecorator`` by default will
"coerce" the non-typed side to be the same type as itself. Such as below,
we define an "epoch" type that stores a date value as an integer::
class MyEpochType(types.TypeDecorator):
impl = types.Integer
epoch = datetime.date(1970, 1, 1)
def process_bind_param(self, value, dialect):
return (value - self.epoch).days
def process_result_value(self, value, dialect):
return self.epoch + timedelta(days=value)
Our expression of ``somecol + date`` with the above type will coerce the
"date" on the right side to also be treated as ``MyEpochType``.
This behavior can be overridden via the
:meth:`~TypeDecorator.coerce_compared_value` method, which returns a type
that should be used for the value of the expression. Below we set it such
that an integer value will be treated as an ``Integer``, and any other
value is assumed to be a date and will be treated as a ``MyEpochType``::
def coerce_compared_value(self, op, value):
if isinstance(value, int):
return Integer()
else:
return self
.. warning::
Note that the **behavior of coerce_compared_value is not inherited
by default from that of the base type**.
If the :class:`.TypeDecorator` is augmenting a
type that requires special logic for certain types of operators,
this method **must** be overridden. A key example is when decorating
the :class:`_postgresql.JSON` and :class:`_postgresql.JSONB` types;
the default rules of :meth:`.TypeEngine.coerce_compared_value` should
be used in order to deal with operators like index operations::
class MyJsonType(TypeDecorator):
impl = postgresql.JSON
def coerce_compared_value(self, op, value):
return self.impl.coerce_compared_value(op, value)
Without the above step, index operations such as ``mycol['foo']``
will cause the index value ``'foo'`` to be JSON encoded.
"""
__visit_name__ = "type_decorator"
def __init__(self, *args, **kwargs):
"""Construct a :class:`.TypeDecorator`.
Arguments sent here are passed to the constructor
of the class assigned to the ``impl`` class level attribute,
assuming the ``impl`` is a callable, and the resulting
object is assigned to the ``self.impl`` instance attribute
(thus overriding the class attribute of the same name).
If the class level ``impl`` is not a callable (the unusual case),
it will be assigned to the same instance attribute 'as-is',
ignoring those arguments passed to the constructor.
Subclasses can override this to customize the generation
of ``self.impl`` entirely.
"""
if not hasattr(self.__class__, "impl"):
raise AssertionError(
"TypeDecorator implementations "
"require a class-level variable "
"'impl' which refers to the class of "
"type being decorated"
)
self.impl = to_instance(self.__class__.impl, *args, **kwargs)
coerce_to_is_types = (util.NoneType,)
"""Specify those Python types which should be coerced at the expression
level to "IS <constant>" when compared using ``==`` (and same for
``IS NOT`` in conjunction with ``!=``.
For most SQLAlchemy types, this includes ``NoneType``, as well as
``bool``.
:class:`.TypeDecorator` modifies this list to only include ``NoneType``,
as typedecorator implementations that deal with boolean types are common.
Custom :class:`.TypeDecorator` classes can override this attribute to
return an empty tuple, in which case no values will be coerced to
constants.
"""
class Comparator(TypeEngine.Comparator):
"""A :class:`.TypeEngine.Comparator` that is specific to
:class:`.TypeDecorator`.
User-defined :class:`.TypeDecorator` classes should not typically
need to modify this.
"""
__slots__ = ()
def operate(self, op, *other, **kwargs):
kwargs["_python_is_types"] = self.expr.type.coerce_to_is_types
return super(TypeDecorator.Comparator, self).operate(
op, *other, **kwargs
)
def reverse_operate(self, op, other, **kwargs):
kwargs["_python_is_types"] = self.expr.type.coerce_to_is_types
return super(TypeDecorator.Comparator, self).reverse_operate(
op, other, **kwargs
)
@property
def comparator_factory(self):
if TypeDecorator.Comparator in self.impl.comparator_factory.__mro__:
return self.impl.comparator_factory
else:
return type(
"TDComparator",
(TypeDecorator.Comparator, self.impl.comparator_factory),
{},
)
def _gen_dialect_impl(self, dialect):
"""
#todo
"""
adapted = dialect.type_descriptor(self)
if adapted is not self:
return adapted
# otherwise adapt the impl type, link
# to a copy of this TypeDecorator and return
# that.
typedesc = self._unwrapped_dialect_impl(dialect)
tt = self.copy()
if not isinstance(tt, self.__class__):
raise AssertionError(
"Type object %s does not properly "
"implement the copy() method, it must "
"return an object of type %s" % (self, self.__class__)
)
tt.impl = typedesc
return tt
@property
def _type_affinity(self):
"""
#todo
"""
return self.impl._type_affinity
def _set_parent(self, column):
"""Support SchemaEventTarget"""
super(TypeDecorator, self)._set_parent(column)
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent(column)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEventTarget"""
super(TypeDecorator, self)._set_parent_with_dispatch(parent)
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent_with_dispatch(parent)
def type_engine(self, dialect):
"""Return a dialect-specific :class:`.TypeEngine` instance
for this :class:`.TypeDecorator`.
In most cases this returns a dialect-adapted form of
the :class:`.TypeEngine` type represented by ``self.impl``.
Makes usage of :meth:`dialect_impl` but also traverses
into wrapped :class:`.TypeDecorator` instances.
Behavior can be customized here by overriding
:meth:`load_dialect_impl`.
"""
adapted = dialect.type_descriptor(self)
if not isinstance(adapted, type(self)):
return adapted
elif isinstance(self.impl, TypeDecorator):
return self.impl.type_engine(dialect)
else:
return self.load_dialect_impl(dialect)
def load_dialect_impl(self, dialect):
"""Return a :class:`.TypeEngine` object corresponding to a dialect.
This is an end-user override hook that can be used to provide
differing types depending on the given dialect. It is used
by the :class:`.TypeDecorator` implementation of :meth:`type_engine`
to help determine what type should ultimately be returned
for a given :class:`.TypeDecorator`.
By default returns ``self.impl``.
"""
return self.impl
def _unwrapped_dialect_impl(self, dialect):
"""Return the 'unwrapped' dialect impl for this type.
For a type that applies wrapping logic (e.g. TypeDecorator), give
us the real, actual dialect-level type that is used.
This is used by TypeDecorator itself as well at least one case where
dialects need to check that a particular specific dialect-level
type is in use, within the :meth:`.DefaultDialect.set_input_sizes`
method.
"""
return self.load_dialect_impl(dialect).dialect_impl(dialect)
def __getattr__(self, key):
"""Proxy all other undefined accessors to the underlying
implementation."""
return getattr(self.impl, key)
def process_literal_param(self, value, dialect):
"""Receive a literal parameter value to be rendered inline within
a statement.
This method is used when the compiler renders a
literal value without using binds, typically within DDL
such as in the "server default" of a column or an expression
within a CHECK constraint.
The returned string will be rendered into the output string.
.. versionadded:: 0.9.0
"""
raise NotImplementedError()
def process_bind_param(self, value, dialect):
"""Receive a bound parameter value to be converted.
Subclasses override this method to return the
value that should be passed along to the underlying
:class:`.TypeEngine` object, and from there to the
DBAPI ``execute()`` method.
The operation could be anything desired to perform custom
behavior, such as transforming or serializing data.
This could also be used as a hook for validating logic.
This operation should be designed with the reverse operation
in mind, which would be the process_result_value method of
this class.
:param value: Data to operate upon, of any type expected by
this method in the subclass. Can be ``None``.
:param dialect: the :class:`.Dialect` in use.
"""
raise NotImplementedError()
def process_result_value(self, value, dialect):
"""Receive a result-row column value to be converted.
Subclasses should implement this method to operate on data
fetched from the database.
Subclasses override this method to return the
value that should be passed back to the application,
given a value that is already processed by
the underlying :class:`.TypeEngine` object, originally
from the DBAPI cursor method ``fetchone()`` or similar.
The operation could be anything desired to perform custom
behavior, such as transforming or serializing data.
This could also be used as a hook for validating logic.
:param value: Data to operate upon, of any type expected by
this method in the subclass. Can be ``None``.
:param dialect: the :class:`.Dialect` in use.
This operation should be designed to be reversible by
the "process_bind_param" method of this class.
"""
raise NotImplementedError()
@util.memoized_property
def _has_bind_processor(self):
"""memoized boolean, check if process_bind_param is implemented.
Allows the base process_bind_param to raise
NotImplementedError without needing to test an expensive
exception throw.
"""
return (
self.__class__.process_bind_param.__code__
is not TypeDecorator.process_bind_param.__code__
)
@util.memoized_property
def _has_literal_processor(self):
"""memoized boolean, check if process_literal_param is implemented.
"""
return (
self.__class__.process_literal_param.__code__
is not TypeDecorator.process_literal_param.__code__
)
def literal_processor(self, dialect):
"""Provide a literal processing function for the given
:class:`.Dialect`.
Subclasses here will typically override
:meth:`.TypeDecorator.process_literal_param` instead of this method
directly.
By default, this method makes use of
:meth:`.TypeDecorator.process_bind_param` if that method is
implemented, where :meth:`.TypeDecorator.process_literal_param` is
not. The rationale here is that :class:`.TypeDecorator` typically
deals with Python conversions of data that are above the layer of
database presentation. With the value converted by
:meth:`.TypeDecorator.process_bind_param`, the underlying type will
then handle whether it needs to be presented to the DBAPI as a bound
parameter or to the database as an inline SQL value.
.. versionadded:: 0.9.0
"""
if self._has_literal_processor:
process_param = self.process_literal_param
elif self._has_bind_processor:
# the bind processor should normally be OK
# for TypeDecorator since it isn't doing DB-level
# handling, the handling here won't be different for bound vs.
# literals.
process_param = self.process_bind_param
else:
process_param = None
if process_param:
impl_processor = self.impl.literal_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
else:
def process(value):
return process_param(value, dialect)
return process
else:
return self.impl.literal_processor(dialect)
def bind_processor(self, dialect):
"""Provide a bound value processing function for the
given :class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for bound value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_bind_param` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_bind_param` so that
the processing provided by ``self.impl`` is maintained.
:param dialect: Dialect instance in use.
This method is the reverse counterpart to the
:meth:`result_processor` method of this class.
"""
if self._has_bind_processor:
process_param = self.process_bind_param
impl_processor = self.impl.bind_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
else:
def process(value):
return process_param(value, dialect)
return process
else:
return self.impl.bind_processor(dialect)
@util.memoized_property
def _has_result_processor(self):
"""memoized boolean, check if process_result_value is implemented.
Allows the base process_result_value to raise
NotImplementedError without needing to test an expensive
exception throw.
"""
return (
self.__class__.process_result_value.__code__
is not TypeDecorator.process_result_value.__code__
)
def result_processor(self, dialect, coltype):
"""Provide a result value processing function for the given
:class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for result value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_result_value` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_result_value` so that
the processing provided by ``self.impl`` is maintained.
:param dialect: Dialect instance in use.
:param coltype: A SQLAlchemy data type
This method is the reverse counterpart to the
:meth:`bind_processor` method of this class.
"""
if self._has_result_processor:
process_value = self.process_result_value
impl_processor = self.impl.result_processor(dialect, coltype)
if impl_processor:
def process(value):
return process_value(impl_processor(value), dialect)
else:
def process(value):
return process_value(value, dialect)
return process
else:
return self.impl.result_processor(dialect, coltype)
@util.memoized_property
def _has_bind_expression(self):
return (
self.__class__.bind_expression.__code__
is not TypeDecorator.bind_expression.__code__
) or self.impl._has_bind_expression
def bind_expression(self, bindparam):
return self.impl.bind_expression(bindparam)
@util.memoized_property
def _has_column_expression(self):
"""memoized boolean, check if column_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return (
self.__class__.column_expression.__code__
is not TypeDecorator.column_expression.__code__
) or self.impl._has_column_expression
def column_expression(self, column):
return self.impl.column_expression(column)
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
By default, returns self. This method is called by
the expression system when an object using this type is
on the left or right side of an expression against a plain Python
object which does not yet have a SQLAlchemy type assigned::
expr = table.c.somecolumn + 35
Where above, if ``somecolumn`` uses this type, this method will
be called with the value ``operator.add``
and ``35``. The return value is whatever SQLAlchemy type should
be used for ``35`` for this particular operation.
"""
return self
def copy(self, **kw):
"""Produce a copy of this :class:`.TypeDecorator` instance.
This is a shallow copy and is provided to fulfill part of
the :class:`.TypeEngine` contract. It usually does not
need to be overridden unless the user-defined :class:`.TypeDecorator`
has local state that should be deep-copied.
"""
instance = self.__class__.__new__(self.__class__)
instance.__dict__.update(self.__dict__)
return instance
def get_dbapi_type(self, dbapi):
"""Return the DBAPI type object represented by this
:class:`.TypeDecorator`.
By default this calls upon :meth:`.TypeEngine.get_dbapi_type` of the
underlying "impl".
"""
return self.impl.get_dbapi_type(dbapi)
def compare_values(self, x, y):
"""Given two values, compare them for equality.
By default this calls upon :meth:`.TypeEngine.compare_values`
of the underlying "impl", which in turn usually
uses the Python equals operator ``==``.
This function is used by the ORM to compare
an original-loaded value with an intercepted
"changed" value, to determine if a net change
has occurred.
"""
return self.impl.compare_values(x, y)
@property
def sort_key_function(self):
return self.impl.sort_key_function
def __repr__(self):
return util.generic_repr(self, to_inspect=self.impl)
class Variant(TypeDecorator):
"""A wrapping type that selects among a variety of
implementations based on dialect in use.
The :class:`.Variant` type is typically constructed
using the :meth:`.TypeEngine.with_variant` method.
.. seealso:: :meth:`.TypeEngine.with_variant` for an example of use.
"""
def __init__(self, base, mapping):
"""Construct a new :class:`.Variant`.
:param base: the base 'fallback' type
:param mapping: dictionary of string dialect names to
:class:`.TypeEngine` instances.
"""
self.impl = base
self.mapping = mapping
def coerce_compared_value(self, operator, value):
result = self.impl.coerce_compared_value(operator, value)
if result is self.impl:
return self
else:
return result
def load_dialect_impl(self, dialect):
if dialect.name in self.mapping:
return self.mapping[dialect.name]
else:
return self.impl
def _set_parent(self, column):
"""Support SchemaEventTarget"""
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent(column)
for impl in self.mapping.values():
if isinstance(impl, SchemaEventTarget):
impl._set_parent(column)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEventTarget"""
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent_with_dispatch(parent)
for impl in self.mapping.values():
if isinstance(impl, SchemaEventTarget):
impl._set_parent_with_dispatch(parent)
def with_variant(self, type_, dialect_name):
r"""Return a new :class:`.Variant` which adds the given
type + dialect name to the mapping, in addition to the
mapping present in this :class:`.Variant`.
:param type\_: a :class:`.TypeEngine` that will be selected
as a variant from the originating type, when a dialect
of the given name is in use.
:param dialect_name: base name of the dialect which uses
this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.)
"""
if dialect_name in self.mapping:
raise exc.ArgumentError(
"Dialect '%s' is already present in "
"the mapping for this Variant" % dialect_name
)
mapping = self.mapping.copy()
mapping[dialect_name] = type_
return Variant(self.impl, mapping)
@property
def comparator_factory(self):
"""express comparison behavior in terms of the base type"""
return self.impl.comparator_factory
def _reconstitute_comparator(expression):
return expression.comparator
def to_instance(typeobj, *arg, **kw):
if typeobj is None:
return NULLTYPE
if util.callable(typeobj):
return typeobj(*arg, **kw)
else:
return typeobj
def adapt_type(typeobj, colspecs):
if isinstance(typeobj, type):
typeobj = typeobj()
for t in typeobj.__class__.__mro__[0:-1]:
try:
impltype = colspecs[t]
break
except KeyError:
pass
else:
# couldn't adapt - so just return the type itself
# (it may be a user-defined type)
return typeobj
# if we adapted the given generic type to a database-specific type,
# but it turns out the originally given "generic" type
# is actually a subclass of our resulting type, then we were already
# given a more specific type than that required; so use that.
if issubclass(typeobj.__class__, impltype):
return typeobj
return typeobj.adapt(impltype)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/crud.py
|
# sql/crud.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Functions used by compiler.py to determine the parameters rendered
within INSERT and UPDATE statements.
"""
import operator
from . import dml
from . import elements
from .. import exc
from .. import util
REQUIRED = util.symbol(
"REQUIRED",
"""
Placeholder for the value within a :class:`.BindParameter`
which is required to be present when the statement is passed
to :meth:`_engine.Connection.execute`.
This symbol is typically used when a :func:`_expression.insert`
or :func:`_expression.update` statement is compiled without parameter
values present.
""",
)
ISINSERT = util.symbol("ISINSERT")
ISUPDATE = util.symbol("ISUPDATE")
ISDELETE = util.symbol("ISDELETE")
def _setup_crud_params(compiler, stmt, local_stmt_type, **kw):
restore_isinsert = compiler.isinsert
restore_isupdate = compiler.isupdate
restore_isdelete = compiler.isdelete
should_restore = (
(restore_isinsert or restore_isupdate or restore_isdelete)
or len(compiler.stack) > 1
or "visiting_cte" in kw
)
if local_stmt_type is ISINSERT:
compiler.isupdate = False
compiler.isinsert = True
elif local_stmt_type is ISUPDATE:
compiler.isupdate = True
compiler.isinsert = False
elif local_stmt_type is ISDELETE:
if not should_restore:
compiler.isdelete = True
else:
assert False, "ISINSERT, ISUPDATE, or ISDELETE expected"
try:
if local_stmt_type in (ISINSERT, ISUPDATE):
return _get_crud_params(compiler, stmt, **kw)
finally:
if should_restore:
compiler.isinsert = restore_isinsert
compiler.isupdate = restore_isupdate
compiler.isdelete = restore_isdelete
def _get_crud_params(compiler, stmt, **kw):
"""create a set of tuples representing column/string pairs for use
in an INSERT or UPDATE statement.
Also generates the Compiled object's postfetch, prefetch, and
returning column collections, used for default handling and ultimately
populating the ResultProxy's prefetch_cols() and postfetch_cols()
collections.
"""
compiler.postfetch = []
compiler.insert_prefetch = []
compiler.update_prefetch = []
compiler.returning = []
# no parameters in the statement, no parameters in the
# compiled params - return binds for all columns
if compiler.column_keys is None and stmt.parameters is None:
return [
(c, _create_bind_param(compiler, c, None, required=True))
for c in stmt.table.columns
]
if stmt._has_multi_parameters:
stmt_parameters = stmt.parameters[0]
else:
stmt_parameters = stmt.parameters
# getters - these are normally just column.key,
# but in the case of mysql multi-table update, the rules for
# .key must conditionally take tablename into account
(
_column_as_key,
_getattr_col_key,
_col_bind_name,
) = _key_getters_for_crud_column(compiler, stmt)
# if we have statement parameters - set defaults in the
# compiled params
if compiler.column_keys is None:
parameters = {}
else:
parameters = dict(
(_column_as_key(key), REQUIRED)
for key in compiler.column_keys
if not stmt_parameters or key not in stmt_parameters
)
# create a list of column assignment clauses as tuples
values = []
if stmt_parameters is not None:
_get_stmt_parameters_params(
compiler, parameters, stmt_parameters, _column_as_key, values, kw
)
check_columns = {}
# special logic that only occurs for multi-table UPDATE
# statements
if compiler.isupdate and stmt._extra_froms and stmt_parameters:
_get_multitable_params(
compiler,
stmt,
stmt_parameters,
check_columns,
_col_bind_name,
_getattr_col_key,
values,
kw,
)
if compiler.isinsert and stmt.select_names:
_scan_insert_from_select_cols(
compiler,
stmt,
parameters,
_getattr_col_key,
_column_as_key,
_col_bind_name,
check_columns,
values,
kw,
)
else:
_scan_cols(
compiler,
stmt,
parameters,
_getattr_col_key,
_column_as_key,
_col_bind_name,
check_columns,
values,
kw,
)
if parameters and stmt_parameters:
check = (
set(parameters)
.intersection(_column_as_key(k) for k in stmt_parameters)
.difference(check_columns)
)
if check:
raise exc.CompileError(
"Unconsumed column names: %s"
% (", ".join("%s" % c for c in check))
)
if stmt._has_multi_parameters:
values = _extend_values_for_multiparams(compiler, stmt, values, kw)
return values
def _create_bind_param(
compiler, col, value, process=True, required=False, name=None, **kw
):
if name is None:
name = col.key
bindparam = elements.BindParameter(
name, value, type_=col.type, required=required
)
bindparam._is_crud = True
if process:
bindparam = bindparam._compiler_dispatch(compiler, **kw)
return bindparam
def _key_getters_for_crud_column(compiler, stmt):
if compiler.isupdate and stmt._extra_froms:
# when extra tables are present, refer to the columns
# in those extra tables as table-qualified, including in
# dictionaries and when rendering bind param names.
# the "main" table of the statement remains unqualified,
# allowing the most compatibility with a non-multi-table
# statement.
_et = set(stmt._extra_froms)
def _column_as_key(key):
str_key = elements._column_as_key(key)
if hasattr(key, "table") and key.table in _et:
return (key.table.name, str_key)
else:
return str_key
def _getattr_col_key(col):
if col.table in _et:
return (col.table.name, col.key)
else:
return col.key
def _col_bind_name(col):
if col.table in _et:
return "%s_%s" % (col.table.name, col.key)
else:
return col.key
else:
_column_as_key = elements._column_as_key
_getattr_col_key = _col_bind_name = operator.attrgetter("key")
return _column_as_key, _getattr_col_key, _col_bind_name
def _scan_insert_from_select_cols(
compiler,
stmt,
parameters,
_getattr_col_key,
_column_as_key,
_col_bind_name,
check_columns,
values,
kw,
):
(
need_pks,
implicit_returning,
implicit_return_defaults,
postfetch_lastrowid,
) = _get_returning_modifiers(compiler, stmt)
cols = [stmt.table.c[_column_as_key(name)] for name in stmt.select_names]
compiler._insert_from_select = stmt.select
add_select_cols = []
if stmt.include_insert_from_select_defaults:
col_set = set(cols)
for col in stmt.table.columns:
if col not in col_set and col.default:
cols.append(col)
for c in cols:
col_key = _getattr_col_key(c)
if col_key in parameters and col_key not in check_columns:
parameters.pop(col_key)
values.append((c, None))
else:
_append_param_insert_select_hasdefault(
compiler, stmt, c, add_select_cols, kw
)
if add_select_cols:
values.extend(add_select_cols)
compiler._insert_from_select = compiler._insert_from_select._generate()
compiler._insert_from_select._raw_columns = tuple(
compiler._insert_from_select._raw_columns
) + tuple(expr for col, expr in add_select_cols)
def _scan_cols(
compiler,
stmt,
parameters,
_getattr_col_key,
_column_as_key,
_col_bind_name,
check_columns,
values,
kw,
):
(
need_pks,
implicit_returning,
implicit_return_defaults,
postfetch_lastrowid,
) = _get_returning_modifiers(compiler, stmt)
if stmt._parameter_ordering:
parameter_ordering = [
_column_as_key(key) for key in stmt._parameter_ordering
]
ordered_keys = set(parameter_ordering)
cols = [stmt.table.c[key] for key in parameter_ordering] + [
c for c in stmt.table.c if c.key not in ordered_keys
]
else:
cols = stmt.table.columns
for c in cols:
col_key = _getattr_col_key(c)
if col_key in parameters and col_key not in check_columns:
_append_param_parameter(
compiler,
stmt,
c,
col_key,
parameters,
_col_bind_name,
implicit_returning,
implicit_return_defaults,
values,
kw,
)
elif compiler.isinsert:
if (
c.primary_key
and need_pks
and (
implicit_returning
or not postfetch_lastrowid
or c is not stmt.table._autoincrement_column
)
):
if implicit_returning:
_append_param_insert_pk_returning(
compiler, stmt, c, values, kw
)
else:
_append_param_insert_pk(compiler, stmt, c, values, kw)
elif c.default is not None:
_append_param_insert_hasdefault(
compiler, stmt, c, implicit_return_defaults, values, kw
)
elif c.server_default is not None:
if implicit_return_defaults and c in implicit_return_defaults:
compiler.returning.append(c)
elif not c.primary_key:
compiler.postfetch.append(c)
elif implicit_return_defaults and c in implicit_return_defaults:
compiler.returning.append(c)
elif (
c.primary_key
and c is not stmt.table._autoincrement_column
and not c.nullable
):
_warn_pk_with_no_anticipated_value(c)
elif compiler.isupdate:
_append_param_update(
compiler, stmt, c, implicit_return_defaults, values, kw
)
def _append_param_parameter(
compiler,
stmt,
c,
col_key,
parameters,
_col_bind_name,
implicit_returning,
implicit_return_defaults,
values,
kw,
):
value = parameters.pop(col_key)
if elements._is_literal(value):
value = _create_bind_param(
compiler,
c,
value,
required=value is REQUIRED,
name=_col_bind_name(c)
if not stmt._has_multi_parameters
else "%s_m0" % _col_bind_name(c),
**kw
)
else:
if isinstance(value, elements.BindParameter) and value.type._isnull:
value = value._clone()
value.type = c.type
if c.primary_key and implicit_returning:
compiler.returning.append(c)
value = compiler.process(value.self_group(), **kw)
elif implicit_return_defaults and c in implicit_return_defaults:
compiler.returning.append(c)
value = compiler.process(value.self_group(), **kw)
else:
# postfetch specifically means, "we can SELECT the row we just
# inserted by primary key to get back the server generated
# defaults". so by definition this can't be used to get the primary
# key value back, because we need to have it ahead of time.
if not c.primary_key:
compiler.postfetch.append(c)
value = compiler.process(value.self_group(), **kw)
values.append((c, value))
def _append_param_insert_pk_returning(compiler, stmt, c, values, kw):
"""Create a primary key expression in the INSERT statement and
possibly a RETURNING clause for it.
If the column has a Python-side default, we will create a bound
parameter for it and "pre-execute" the Python function. If
the column has a SQL expression default, or is a sequence,
we will add it directly into the INSERT statement and add a
RETURNING element to get the new value. If the column has a
server side default or is marked as the "autoincrement" column,
we will add a RETRUNING element to get at the value.
If all the above tests fail, that indicates a primary key column with no
noted default generation capabilities that has no parameter passed;
raise an exception.
"""
if c.default is not None:
if c.default.is_sequence:
if compiler.dialect.supports_sequences and (
not c.default.optional
or not compiler.dialect.sequences_optional
):
proc = compiler.process(c.default, **kw)
values.append((c, proc))
compiler.returning.append(c)
elif c.default.is_clause_element:
values.append(
(c, compiler.process(c.default.arg.self_group(), **kw))
)
compiler.returning.append(c)
else:
values.append((c, _create_insert_prefetch_bind_param(compiler, c)))
elif c is stmt.table._autoincrement_column or c.server_default is not None:
compiler.returning.append(c)
elif not c.nullable:
# no .default, no .server_default, not autoincrement, we have
# no indication this primary key column will have any value
_warn_pk_with_no_anticipated_value(c)
def _create_insert_prefetch_bind_param(compiler, c, process=True, name=None):
param = _create_bind_param(compiler, c, None, process=process, name=name)
compiler.insert_prefetch.append(c)
return param
def _create_update_prefetch_bind_param(compiler, c, process=True, name=None):
param = _create_bind_param(compiler, c, None, process=process, name=name)
compiler.update_prefetch.append(c)
return param
class _multiparam_column(elements.ColumnElement):
_is_multiparam_column = True
def __init__(self, original, index):
self.index = index
self.key = "%s_m%d" % (original.key, index + 1)
self.original = original
self.default = original.default
self.type = original.type
def __eq__(self, other):
return (
isinstance(other, _multiparam_column)
and other.key == self.key
and other.original == self.original
)
def _process_multiparam_default_bind(compiler, stmt, c, index, kw):
if not c.default:
raise exc.CompileError(
"INSERT value for column %s is explicitly rendered as a bound"
"parameter in the VALUES clause; "
"a Python-side value or SQL expression is required" % c
)
elif c.default.is_clause_element:
return compiler.process(c.default.arg.self_group(), **kw)
else:
col = _multiparam_column(c, index)
if isinstance(stmt, dml.Insert):
return _create_insert_prefetch_bind_param(compiler, col)
else:
return _create_update_prefetch_bind_param(compiler, col)
def _append_param_insert_pk(compiler, stmt, c, values, kw):
"""Create a bound parameter in the INSERT statement to receive a
'prefetched' default value.
The 'prefetched' value indicates that we are to invoke a Python-side
default function or expliclt SQL expression before the INSERT statement
proceeds, so that we have a primary key value available.
if the column has no noted default generation capabilities, it has
no value passed in either; raise an exception.
"""
if (
# column has a Python-side default
c.default is not None
and (
# and it won't be a Sequence
not c.default.is_sequence
or compiler.dialect.supports_sequences
)
) or (
# column is the "autoincrement column"
c is stmt.table._autoincrement_column
and (
# and it's either a "sequence" or a
# pre-executable "autoincrement" sequence
compiler.dialect.supports_sequences
or compiler.dialect.preexecute_autoincrement_sequences
)
):
values.append((c, _create_insert_prefetch_bind_param(compiler, c)))
elif c.default is None and c.server_default is None and not c.nullable:
# no .default, no .server_default, not autoincrement, we have
# no indication this primary key column will have any value
_warn_pk_with_no_anticipated_value(c)
def _append_param_insert_hasdefault(
compiler, stmt, c, implicit_return_defaults, values, kw
):
if c.default.is_sequence:
if compiler.dialect.supports_sequences and (
not c.default.optional or not compiler.dialect.sequences_optional
):
proc = compiler.process(c.default, **kw)
values.append((c, proc))
if implicit_return_defaults and c in implicit_return_defaults:
compiler.returning.append(c)
elif not c.primary_key:
compiler.postfetch.append(c)
elif c.default.is_clause_element:
proc = compiler.process(c.default.arg.self_group(), **kw)
values.append((c, proc))
if implicit_return_defaults and c in implicit_return_defaults:
compiler.returning.append(c)
elif not c.primary_key:
# don't add primary key column to postfetch
compiler.postfetch.append(c)
else:
values.append((c, _create_insert_prefetch_bind_param(compiler, c)))
def _append_param_insert_select_hasdefault(compiler, stmt, c, values, kw):
if c.default.is_sequence:
if compiler.dialect.supports_sequences and (
not c.default.optional or not compiler.dialect.sequences_optional
):
proc = c.default
values.append((c, proc.next_value()))
elif c.default.is_clause_element:
proc = c.default.arg.self_group()
values.append((c, proc))
else:
values.append(
(c, _create_insert_prefetch_bind_param(compiler, c, process=False))
)
def _append_param_update(
compiler, stmt, c, implicit_return_defaults, values, kw
):
if c.onupdate is not None and not c.onupdate.is_sequence:
if c.onupdate.is_clause_element:
values.append(
(c, compiler.process(c.onupdate.arg.self_group(), **kw))
)
if implicit_return_defaults and c in implicit_return_defaults:
compiler.returning.append(c)
else:
compiler.postfetch.append(c)
else:
values.append((c, _create_update_prefetch_bind_param(compiler, c)))
elif c.server_onupdate is not None:
if implicit_return_defaults and c in implicit_return_defaults:
compiler.returning.append(c)
else:
compiler.postfetch.append(c)
elif (
implicit_return_defaults
and stmt._return_defaults is not True
and c in implicit_return_defaults
):
compiler.returning.append(c)
def _get_multitable_params(
compiler,
stmt,
stmt_parameters,
check_columns,
_col_bind_name,
_getattr_col_key,
values,
kw,
):
normalized_params = dict(
(elements._clause_element_as_expr(c), param)
for c, param in stmt_parameters.items()
)
affected_tables = set()
for t in stmt._extra_froms:
for c in t.c:
if c in normalized_params:
affected_tables.add(t)
check_columns[_getattr_col_key(c)] = c
value = normalized_params[c]
if elements._is_literal(value):
value = _create_bind_param(
compiler,
c,
value,
required=value is REQUIRED,
name=_col_bind_name(c),
)
else:
compiler.postfetch.append(c)
value = compiler.process(value.self_group(), **kw)
values.append((c, value))
# determine tables which are actually to be updated - process onupdate
# and server_onupdate for these
for t in affected_tables:
for c in t.c:
if c in normalized_params:
continue
elif c.onupdate is not None and not c.onupdate.is_sequence:
if c.onupdate.is_clause_element:
values.append(
(
c,
compiler.process(
c.onupdate.arg.self_group(), **kw
),
)
)
compiler.postfetch.append(c)
else:
values.append(
(
c,
_create_update_prefetch_bind_param(
compiler, c, name=_col_bind_name(c)
),
)
)
elif c.server_onupdate is not None:
compiler.postfetch.append(c)
def _extend_values_for_multiparams(compiler, stmt, values, kw):
values_0 = values
values = [values]
for i, row in enumerate(stmt.parameters[1:]):
extension = []
for (col, param) in values_0:
if col in row or col.key in row:
key = col if col in row else col.key
if elements._is_literal(row[key]):
new_param = _create_bind_param(
compiler,
col,
row[key],
name="%s_m%d" % (col.key, i + 1),
**kw
)
else:
new_param = compiler.process(row[key].self_group(), **kw)
else:
new_param = _process_multiparam_default_bind(
compiler, stmt, col, i, kw
)
extension.append((col, new_param))
values.append(extension)
return values
def _get_stmt_parameters_params(
compiler, parameters, stmt_parameters, _column_as_key, values, kw
):
for k, v in stmt_parameters.items():
colkey = _column_as_key(k)
if colkey is not None:
parameters.setdefault(colkey, v)
else:
# a non-Column expression on the left side;
# add it to values() in an "as-is" state,
# coercing right side to bound param
if elements._is_literal(v):
v = compiler.process(
elements.BindParameter(None, v, type_=k.type), **kw
)
else:
v = compiler.process(v.self_group(), **kw)
values.append((k, v))
def _get_returning_modifiers(compiler, stmt):
need_pks = (
compiler.isinsert
and not compiler.inline
and not stmt._returning
and not stmt._has_multi_parameters
)
implicit_returning = (
need_pks
and compiler.dialect.implicit_returning
and stmt.table.implicit_returning
)
if compiler.isinsert:
implicit_return_defaults = implicit_returning and stmt._return_defaults
elif compiler.isupdate:
implicit_return_defaults = (
compiler.dialect.implicit_returning
and stmt.table.implicit_returning
and stmt._return_defaults
)
else:
# this line is unused, currently we are always
# isinsert or isupdate
implicit_return_defaults = False # pragma: no cover
if implicit_return_defaults:
if stmt._return_defaults is True:
implicit_return_defaults = set(stmt.table.c)
else:
implicit_return_defaults = set(stmt._return_defaults)
postfetch_lastrowid = need_pks and compiler.dialect.postfetch_lastrowid
return (
need_pks,
implicit_returning,
implicit_return_defaults,
postfetch_lastrowid,
)
def _warn_pk_with_no_anticipated_value(c):
msg = (
"Column '%s.%s' is marked as a member of the "
"primary key for table '%s', "
"but has no Python-side or server-side default generator indicated, "
"nor does it indicate 'autoincrement=True' or 'nullable=True', "
"and no explicit value is passed. "
"Primary key columns typically may not store NULL."
% (c.table.fullname, c.name, c.table.fullname)
)
if len(c.table.primary_key) > 1:
msg += (
" Note that as of SQLAlchemy 1.1, 'autoincrement=True' must be "
"indicated explicitly for composite (e.g. multicolumn) primary "
"keys if AUTO_INCREMENT/SERIAL/IDENTITY "
"behavior is expected for one of the columns in the primary key. "
"CREATE TABLE statements are impacted by this change as well on "
"most backends."
)
util.warn(msg)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/sql/schema.py
|
# sql/schema.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The schema module provides the building blocks for database metadata.
Each element within this module describes a database entity which can be
created and dropped, or is otherwise part of such an entity. Examples include
tables, columns, sequences, and indexes.
All entities are subclasses of :class:`~sqlalchemy.schema.SchemaItem`, and as
defined in this module they are intended to be agnostic of any vendor-specific
constructs.
A collection of entities are grouped into a unit called
:class:`~sqlalchemy.schema.MetaData`. MetaData serves as a logical grouping of
schema elements, and can also be associated with an actual database connection
such that operations involving the contained elements can contact the database
as needed.
Two of the elements here also build upon their "syntactic" counterparts, which
are defined in :class:`~sqlalchemy.sql.expression.`, specifically
:class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.Column`.
Since these objects are part of the SQL expression language, they are usable
as components in SQL expressions.
"""
from __future__ import absolute_import
import collections
import operator
import sqlalchemy
from . import ddl
from . import type_api
from . import visitors
from .base import _bind_or_error
from .base import ColumnCollection
from .base import DialectKWArgs
from .base import SchemaEventTarget
from .elements import _as_truncated
from .elements import _document_text_coercion
from .elements import _literal_as_text
from .elements import ClauseElement
from .elements import ColumnClause
from .elements import ColumnElement
from .elements import quoted_name
from .elements import TextClause
from .selectable import TableClause
from .. import event
from .. import exc
from .. import inspection
from .. import util
RETAIN_SCHEMA = util.symbol("retain_schema")
BLANK_SCHEMA = util.symbol(
"blank_schema",
"""Symbol indicating that a :class:`_schema.Table` or :class:`.Sequence`
should have 'None' for its schema, even if the parent
:class:`_schema.MetaData` has specified a schema.
.. versionadded:: 1.0.14
""",
)
def _get_table_key(name, schema):
if schema is None:
return name
else:
return schema + "." + name
# this should really be in sql/util.py but we'd have to
# break an import cycle
def _copy_expression(expression, source_table, target_table):
def replace(col):
if (
isinstance(col, Column)
and col.table is source_table
and col.key in source_table.c
):
return target_table.c[col.key]
else:
return None
return visitors.replacement_traverse(expression, {}, replace)
@inspection._self_inspects
class SchemaItem(SchemaEventTarget, visitors.Visitable):
"""Base class for items that define a database schema."""
__visit_name__ = "schema_item"
def _init_items(self, *args):
"""Initialize the list of child items for this SchemaItem."""
for item in args:
if item is not None:
try:
spwd = item._set_parent_with_dispatch
except AttributeError as err:
util.raise_(
exc.ArgumentError(
"'SchemaItem' object, such as a 'Column' or a "
"'Constraint' expected, got %r" % item
),
replace_context=err,
)
else:
spwd(self)
def get_children(self, **kwargs):
"""used to allow SchemaVisitor access"""
return []
def __repr__(self):
return util.generic_repr(self, omit_kwarg=["info"])
@property
@util.deprecated(
"0.9",
"The :attr:`.SchemaItem.quote` attribute is deprecated and will be "
"removed in a future release. Use the :attr:`.quoted_name.quote` "
"attribute on the ``name`` field of the target schema item to retrieve"
"quoted status.",
)
def quote(self):
"""Return the value of the ``quote`` flag passed
to this schema object, for those schema items which
have a ``name`` field.
"""
return self.name.quote
@util.memoized_property
def info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.SchemaItem`.
The dictionary is automatically generated when first accessed.
It can also be specified in the constructor of some objects,
such as :class:`_schema.Table` and :class:`_schema.Column`.
"""
return {}
def _schema_item_copy(self, schema_item):
if "info" in self.__dict__:
schema_item.info = self.info.copy()
schema_item.dispatch._update(self.dispatch)
return schema_item
def _translate_schema(self, effective_schema, map_):
return map_.get(effective_schema, effective_schema)
class Table(DialectKWArgs, SchemaItem, TableClause):
r"""Represent a table in a database.
e.g.::
mytable = Table("mytable", metadata,
Column('mytable_id', Integer, primary_key=True),
Column('value', String(50))
)
The :class:`_schema.Table`
object constructs a unique instance of itself based
on its name and optional schema name within the given
:class:`_schema.MetaData` object. Calling the :class:`_schema.Table`
constructor with the same name and same :class:`_schema.MetaData` argument
a second time will return the *same* :class:`_schema.Table`
object - in this way
the :class:`_schema.Table` constructor acts as a registry function.
.. seealso::
:ref:`metadata_describing` - Introduction to database metadata
Constructor arguments are as follows:
:param name: The name of this table as represented in the database.
The table name, along with the value of the ``schema`` parameter,
forms a key which uniquely identifies this :class:`_schema.Table`
within
the owning :class:`_schema.MetaData` collection.
Additional calls to :class:`_schema.Table` with the same name,
metadata,
and schema name will return the same :class:`_schema.Table` object.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word or contain special characters.
A name with any number of upper case characters is considered
to be case sensitive, and will be sent as quoted.
To enable unconditional quoting for the table name, specify the flag
``quote=True`` to the constructor, or use the :class:`.quoted_name`
construct to specify the name.
:param metadata: a :class:`_schema.MetaData`
object which will contain this
table. The metadata is used as a point of association of this table
with other tables which are referenced via foreign key. It also
may be used to associate this table with a particular
:class:`.Connectable`.
:param \*args: Additional positional arguments are used primarily
to add the list of :class:`_schema.Column`
objects contained within this
table. Similar to the style of a CREATE TABLE statement, other
:class:`.SchemaItem` constructs may be added here, including
:class:`.PrimaryKeyConstraint`, and
:class:`_schema.ForeignKeyConstraint`.
:param autoload: Defaults to False, unless
:paramref:`_schema.Table.autoload_with`
is set in which case it defaults to True; :class:`_schema.Column`
objects
for this table should be reflected from the database, possibly
augmenting or replacing existing :class:`_schema.Column`
objects that were
explicitly specified.
.. versionchanged:: 1.0.0 setting the
:paramref:`_schema.Table.autoload_with`
parameter implies that :paramref:`_schema.Table.autoload`
will default
to True.
.. seealso::
:ref:`metadata_reflection_toplevel`
:param autoload_replace: Defaults to ``True``; when using
:paramref:`_schema.Table.autoload`
in conjunction with :paramref:`_schema.Table.extend_existing`,
indicates
that :class:`_schema.Column` objects present in the already-existing
:class:`_schema.Table`
object should be replaced with columns of the same
name retrieved from the autoload process. When ``False``, columns
already present under existing names will be omitted from the
reflection process.
Note that this setting does not impact :class:`_schema.Column` objects
specified programmatically within the call to :class:`_schema.Table`
that
also is autoloading; those :class:`_schema.Column` objects will always
replace existing columns of the same name when
:paramref:`_schema.Table.extend_existing` is ``True``.
.. seealso::
:paramref:`_schema.Table.autoload`
:paramref:`_schema.Table.extend_existing`
:param autoload_with: An :class:`_engine.Engine` or
:class:`_engine.Connection` object
with which this :class:`_schema.Table` object will be reflected; when
set to a non-None value, it implies that
:paramref:`_schema.Table.autoload`
is ``True``. If left unset, but :paramref:`_schema.Table.autoload`
is
explicitly set to ``True``, an autoload operation will attempt to
proceed by locating an :class:`_engine.Engine` or
:class:`_engine.Connection` bound
to the underlying :class:`_schema.MetaData` object.
.. seealso::
:paramref:`_schema.Table.autoload`
:param extend_existing: When ``True``, indicates that if this
:class:`_schema.Table` is already present in the given
:class:`_schema.MetaData`,
apply further arguments within the constructor to the existing
:class:`_schema.Table`.
If :paramref:`_schema.Table.extend_existing` or
:paramref:`_schema.Table.keep_existing` are not set,
and the given name
of the new :class:`_schema.Table` refers to a :class:`_schema.Table`
that is
already present in the target :class:`_schema.MetaData` collection,
and
this :class:`_schema.Table`
specifies additional columns or other constructs
or flags that modify the table's state, an
error is raised. The purpose of these two mutually-exclusive flags
is to specify what action should be taken when a
:class:`_schema.Table`
is specified that matches an existing :class:`_schema.Table`,
yet specifies
additional constructs.
:paramref:`_schema.Table.extend_existing`
will also work in conjunction
with :paramref:`_schema.Table.autoload` to run a new reflection
operation against the database, even if a :class:`_schema.Table`
of the same name is already present in the target
:class:`_schema.MetaData`; newly reflected :class:`_schema.Column`
objects
and other options will be added into the state of the
:class:`_schema.Table`, potentially overwriting existing columns
and options of the same name.
As is always the case with :paramref:`_schema.Table.autoload`,
:class:`_schema.Column` objects can be specified in the same
:class:`_schema.Table`
constructor, which will take precedence. Below, the existing
table ``mytable`` will be augmented with :class:`_schema.Column`
objects
both reflected from the database, as well as the given
:class:`_schema.Column`
named "y"::
Table("mytable", metadata,
Column('y', Integer),
extend_existing=True,
autoload=True,
autoload_with=engine
)
.. seealso::
:paramref:`_schema.Table.autoload`
:paramref:`_schema.Table.autoload_replace`
:paramref:`_schema.Table.keep_existing`
:param implicit_returning: True by default - indicates that
RETURNING can be used by default to fetch newly inserted primary key
values, for backends which support this. Note that
create_engine() also provides an implicit_returning flag.
:param include_columns: A list of strings indicating a subset of
columns to be loaded via the ``autoload`` operation; table columns who
aren't present in this list will not be represented on the resulting
``Table`` object. Defaults to ``None`` which indicates all columns
should be reflected.
:param resolve_fks: Whether or not to reflect :class:`_schema.Table`
objects
related to this one via :class:`_schema.ForeignKey` objects, when
:paramref:`_schema.Table.autoload` or
:paramref:`_schema.Table.autoload_with` is
specified. Defaults to True. Set to False to disable reflection of
related tables as :class:`_schema.ForeignKey`
objects are encountered; may be
used either to save on SQL calls or to avoid issues with related tables
that can't be accessed. Note that if a related table is already present
in the :class:`_schema.MetaData` collection, or becomes present later,
a
:class:`_schema.ForeignKey` object associated with this
:class:`_schema.Table` will
resolve to that table normally.
.. versionadded:: 1.3
.. seealso::
:paramref:`.MetaData.reflect.resolve_fks`
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param keep_existing: When ``True``, indicates that if this Table
is already present in the given :class:`_schema.MetaData`, ignore
further arguments within the constructor to the existing
:class:`_schema.Table`, and return the :class:`_schema.Table`
object as
originally created. This is to allow a function that wishes
to define a new :class:`_schema.Table` on first call, but on
subsequent calls will return the same :class:`_schema.Table`,
without any of the declarations (particularly constraints)
being applied a second time.
If :paramref:`_schema.Table.extend_existing` or
:paramref:`_schema.Table.keep_existing` are not set,
and the given name
of the new :class:`_schema.Table` refers to a :class:`_schema.Table`
that is
already present in the target :class:`_schema.MetaData` collection,
and
this :class:`_schema.Table`
specifies additional columns or other constructs
or flags that modify the table's state, an
error is raised. The purpose of these two mutually-exclusive flags
is to specify what action should be taken when a
:class:`_schema.Table`
is specified that matches an existing :class:`_schema.Table`,
yet specifies
additional constructs.
.. seealso::
:paramref:`_schema.Table.extend_existing`
:param listeners: A list of tuples of the form ``(<eventname>, <fn>)``
which will be passed to :func:`.event.listen` upon construction.
This alternate hook to :func:`.event.listen` allows the establishment
of a listener function specific to this :class:`_schema.Table` before
the "autoload" process begins. Particularly useful for
the :meth:`.DDLEvents.column_reflect` event::
def listen_for_reflect(table, column_info):
"handle the column reflection event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
:param mustexist: When ``True``, indicates that this Table must already
be present in the given :class:`_schema.MetaData` collection, else
an exception is raised.
:param prefixes:
A list of strings to insert after CREATE in the CREATE TABLE
statement. They will be separated by spaces.
:param quote: Force quoting of this table's name on or off, corresponding
to ``True`` or ``False``. When left at its default of ``None``,
the column identifier will be quoted according to whether the name is
case sensitive (identifiers with at least one upper case character are
treated as case sensitive), or if it's a reserved word. This flag
is only needed to force quoting of a reserved word which is not known
by the SQLAlchemy dialect.
:param quote_schema: same as 'quote' but applies to the schema identifier.
:param schema: The schema name for this table, which is required if
the table resides in a schema other than the default selected schema
for the engine's database connection. Defaults to ``None``.
If the owning :class:`_schema.MetaData` of this :class:`_schema.Table`
specifies its
own :paramref:`_schema.MetaData.schema` parameter,
then that schema name will
be applied to this :class:`_schema.Table`
if the schema parameter here is set
to ``None``. To set a blank schema name on a :class:`_schema.Table`
that
would otherwise use the schema set on the owning
:class:`_schema.MetaData`,
specify the special symbol :attr:`.BLANK_SCHEMA`.
.. versionadded:: 1.0.14 Added the :attr:`.BLANK_SCHEMA` symbol to
allow a :class:`_schema.Table`
to have a blank schema name even when the
parent :class:`_schema.MetaData` specifies
:paramref:`_schema.MetaData.schema`.
The quoting rules for the schema name are the same as those for the
``name`` parameter, in that quoting is applied for reserved words or
case-sensitive names; to enable unconditional quoting for the schema
name, specify the flag ``quote_schema=True`` to the constructor, or use
the :class:`.quoted_name` construct to specify the name.
:param useexisting: the same as :paramref:`_schema.Table.extend_existing`.
:param comment: Optional string that will render an SQL comment on table
creation.
.. versionadded:: 1.2 Added the :paramref:`_schema.Table.comment`
parameter
to :class:`_schema.Table`.
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form ``<dialectname>_<argname>``.
See the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
"""
__visit_name__ = "table"
@util.deprecated_params(
useexisting=(
"0.7",
"The :paramref:`_schema.Table.useexisting` "
"parameter is deprecated and "
"will be removed in a future release. Please use "
":paramref:`_schema.Table.extend_existing`.",
)
)
def __new__(cls, *args, **kw):
if not args:
# python3k pickle seems to call this
return object.__new__(cls)
try:
name, metadata, args = args[0], args[1], args[2:]
except IndexError:
raise TypeError("Table() takes at least two arguments")
schema = kw.get("schema", None)
if schema is None:
schema = metadata.schema
elif schema is BLANK_SCHEMA:
schema = None
keep_existing = kw.pop("keep_existing", False)
extend_existing = kw.pop("extend_existing", False)
if "useexisting" in kw:
if extend_existing:
msg = "useexisting is synonymous with extend_existing."
raise exc.ArgumentError(msg)
extend_existing = kw.pop("useexisting", False)
if keep_existing and extend_existing:
msg = "keep_existing and extend_existing are mutually exclusive."
raise exc.ArgumentError(msg)
mustexist = kw.pop("mustexist", False)
key = _get_table_key(name, schema)
if key in metadata.tables:
if not keep_existing and not extend_existing and bool(args):
raise exc.InvalidRequestError(
"Table '%s' is already defined for this MetaData "
"instance. Specify 'extend_existing=True' "
"to redefine "
"options and columns on an "
"existing Table object." % key
)
table = metadata.tables[key]
if extend_existing:
table._init_existing(*args, **kw)
return table
else:
if mustexist:
raise exc.InvalidRequestError("Table '%s' not defined" % (key))
table = object.__new__(cls)
table.dispatch.before_parent_attach(table, metadata)
metadata._add_table(name, schema, table)
try:
table._init(name, metadata, *args, **kw)
table.dispatch.after_parent_attach(table, metadata)
return table
except:
with util.safe_reraise():
metadata._remove_table(name, schema)
@property
@util.deprecated(
"0.9",
"The :meth:`.SchemaItem.quote` method is deprecated and will be "
"removed in a future release. Use the :attr:`.quoted_name.quote` "
"attribute on the ``schema`` field of the target schema item to "
"retrieve quoted status.",
)
def quote_schema(self):
"""Return the value of the ``quote_schema`` flag passed
to this :class:`_schema.Table`.
"""
return self.schema.quote
def __init__(self, *args, **kw):
"""Constructor for :class:`_schema.Table`.
This method is a no-op. See the top-level
documentation for :class:`_schema.Table`
for constructor arguments.
"""
# __init__ is overridden to prevent __new__ from
# calling the superclass constructor.
def _init(self, name, metadata, *args, **kwargs):
super(Table, self).__init__(
quoted_name(name, kwargs.pop("quote", None))
)
self.metadata = metadata
self.schema = kwargs.pop("schema", None)
if self.schema is None:
self.schema = metadata.schema
elif self.schema is BLANK_SCHEMA:
self.schema = None
else:
quote_schema = kwargs.pop("quote_schema", None)
self.schema = quoted_name(self.schema, quote_schema)
self.indexes = set()
self.constraints = set()
self._columns = ColumnCollection()
PrimaryKeyConstraint(
_implicit_generated=True
)._set_parent_with_dispatch(self)
self.foreign_keys = set()
self._extra_dependencies = set()
if self.schema is not None:
self.fullname = "%s.%s" % (self.schema, self.name)
else:
self.fullname = self.name
autoload_with = kwargs.pop("autoload_with", None)
autoload = kwargs.pop("autoload", autoload_with is not None)
# this argument is only used with _init_existing()
kwargs.pop("autoload_replace", True)
_extend_on = kwargs.pop("_extend_on", None)
resolve_fks = kwargs.pop("resolve_fks", True)
include_columns = kwargs.pop("include_columns", None)
self.implicit_returning = kwargs.pop("implicit_returning", True)
self.comment = kwargs.pop("comment", None)
if "info" in kwargs:
self.info = kwargs.pop("info")
if "listeners" in kwargs:
listeners = kwargs.pop("listeners")
for evt, fn in listeners:
event.listen(self, evt, fn)
self._prefixes = kwargs.pop("prefixes", [])
self._extra_kwargs(**kwargs)
# load column definitions from the database if 'autoload' is defined
# we do it after the table is in the singleton dictionary to support
# circular foreign keys
if autoload:
self._autoload(
metadata,
autoload_with,
include_columns,
_extend_on=_extend_on,
resolve_fks=resolve_fks,
)
# initialize all the column, etc. objects. done after reflection to
# allow user-overrides
self._init_items(*args)
def _autoload(
self,
metadata,
autoload_with,
include_columns,
exclude_columns=(),
resolve_fks=True,
_extend_on=None,
):
if autoload_with:
autoload_with.run_callable(
autoload_with.dialect.reflecttable,
self,
include_columns,
exclude_columns,
resolve_fks,
_extend_on=_extend_on,
)
else:
bind = _bind_or_error(
metadata,
msg="No engine is bound to this Table's MetaData. "
"Pass an engine to the Table via "
"autoload_with=<someengine>, "
"or associate the MetaData with an engine via "
"metadata.bind=<someengine>",
)
bind.run_callable(
bind.dialect.reflecttable,
self,
include_columns,
exclude_columns,
resolve_fks,
_extend_on=_extend_on,
)
@property
def _sorted_constraints(self):
"""Return the set of constraints as a list, sorted by creation
order.
"""
return sorted(self.constraints, key=lambda c: c._creation_order)
@property
def foreign_key_constraints(self):
""":class:`_schema.ForeignKeyConstraint` objects referred to by this
:class:`_schema.Table`.
This list is produced from the collection of
:class:`_schema.ForeignKey`
objects currently associated.
.. versionadded:: 1.0.0
"""
return set(fkc.constraint for fkc in self.foreign_keys)
def _init_existing(self, *args, **kwargs):
autoload_with = kwargs.pop("autoload_with", None)
autoload = kwargs.pop("autoload", autoload_with is not None)
autoload_replace = kwargs.pop("autoload_replace", True)
schema = kwargs.pop("schema", None)
_extend_on = kwargs.pop("_extend_on", None)
if schema and schema != self.schema:
raise exc.ArgumentError(
"Can't change schema of existing table from '%s' to '%s'",
(self.schema, schema),
)
include_columns = kwargs.pop("include_columns", None)
resolve_fks = kwargs.pop("resolve_fks", True)
if include_columns is not None:
for c in self.c:
if c.name not in include_columns:
self._columns.remove(c)
for key in ("quote", "quote_schema"):
if key in kwargs:
raise exc.ArgumentError(
"Can't redefine 'quote' or 'quote_schema' arguments"
)
if "comment" in kwargs:
self.comment = kwargs.pop("comment", None)
if "info" in kwargs:
self.info = kwargs.pop("info")
if autoload:
if not autoload_replace:
# don't replace columns already present.
# we'd like to do this for constraints also however we don't
# have simple de-duping for unnamed constraints.
exclude_columns = [c.name for c in self.c]
else:
exclude_columns = ()
self._autoload(
self.metadata,
autoload_with,
include_columns,
exclude_columns,
resolve_fks,
_extend_on=_extend_on,
)
self._extra_kwargs(**kwargs)
self._init_items(*args)
def _extra_kwargs(self, **kwargs):
self._validate_dialect_kwargs(kwargs)
def _init_collections(self):
pass
def _reset_exported(self):
pass
@property
def _autoincrement_column(self):
return self.primary_key._autoincrement_column
@property
def key(self):
"""Return the 'key' for this :class:`_schema.Table`.
This value is used as the dictionary key within the
:attr:`_schema.MetaData.tables` collection. It is typically the same
as that of :attr:`_schema.Table.name` for a table with no
:attr:`_schema.Table.schema`
set; otherwise it is typically of the form
``schemaname.tablename``.
"""
return _get_table_key(self.name, self.schema)
def __repr__(self):
return "Table(%s)" % ", ".join(
[repr(self.name)]
+ [repr(self.metadata)]
+ [repr(x) for x in self.columns]
+ ["%s=%s" % (k, repr(getattr(self, k))) for k in ["schema"]]
)
def __str__(self):
return _get_table_key(self.description, self.schema)
@property
def bind(self):
"""Return the connectable associated with this Table."""
return self.metadata and self.metadata.bind or None
def add_is_dependent_on(self, table):
"""Add a 'dependency' for this Table.
This is another Table object which must be created
first before this one can, or dropped after this one.
Usually, dependencies between tables are determined via
ForeignKey objects. However, for other situations that
create dependencies outside of foreign keys (rules, inheriting),
this method can manually establish such a link.
"""
self._extra_dependencies.add(table)
def append_column(self, column):
"""Append a :class:`_schema.Column` to this :class:`_schema.Table`.
The "key" of the newly added :class:`_schema.Column`, i.e. the
value of its ``.key`` attribute, will then be available
in the ``.c`` collection of this :class:`_schema.Table`, and the
column definition will be included in any CREATE TABLE, SELECT,
UPDATE, etc. statements generated from this :class:`_schema.Table`
construct.
Note that this does **not** change the definition of the table
as it exists within any underlying database, assuming that
table has already been created in the database. Relational
databases support the addition of columns to existing tables
using the SQL ALTER command, which would need to be
emitted for an already-existing table that doesn't contain
the newly added column.
"""
column._set_parent_with_dispatch(self)
def append_constraint(self, constraint):
"""Append a :class:`_schema.Constraint` to this
:class:`_schema.Table`.
This has the effect of the constraint being included in any
future CREATE TABLE statement, assuming specific DDL creation
events have not been associated with the given
:class:`_schema.Constraint` object.
Note that this does **not** produce the constraint within the
relational database automatically, for a table that already exists
in the database. To add a constraint to an
existing relational database table, the SQL ALTER command must
be used. SQLAlchemy also provides the
:class:`.AddConstraint` construct which can produce this SQL when
invoked as an executable clause.
"""
constraint._set_parent_with_dispatch(self)
@util.deprecated(
"0.7",
"the :meth:`_schema.Table.append_ddl_listener` "
"method is deprecated and "
"will be removed in a future release. Please refer to "
":class:`.DDLEvents`.",
)
def append_ddl_listener(self, event_name, listener):
"""Append a DDL event listener to this ``Table``.
"""
def adapt_listener(target, connection, **kw):
listener(event_name, target, connection)
event.listen(self, "" + event_name.replace("-", "_"), adapt_listener)
def _set_parent(self, metadata):
metadata._add_table(self.name, self.schema, self)
self.metadata = metadata
def get_children(
self, column_collections=True, schema_visitor=False, **kw
):
if not schema_visitor:
return TableClause.get_children(
self, column_collections=column_collections, **kw
)
else:
if column_collections:
return list(self.columns)
else:
return []
def exists(self, bind=None):
"""Return True if this table exists."""
if bind is None:
bind = _bind_or_error(self)
return bind.run_callable(
bind.dialect.has_table, self.name, schema=self.schema
)
def create(self, bind=None, checkfirst=False):
"""Issue a ``CREATE`` statement for this
:class:`_schema.Table`, using the given :class:`.Connectable`
for connectivity.
.. seealso::
:meth:`_schema.MetaData.create_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
"""Issue a ``DROP`` statement for this
:class:`_schema.Table`, using the given :class:`.Connectable`
for connectivity.
.. seealso::
:meth:`_schema.MetaData.drop_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst)
def tometadata(
self,
metadata,
schema=RETAIN_SCHEMA,
referred_schema_fn=None,
name=None,
):
"""Return a copy of this :class:`_schema.Table`
associated with a different
:class:`_schema.MetaData`.
E.g.::
m1 = MetaData()
user = Table('user', m1, Column('id', Integer, primary_key=True))
m2 = MetaData()
user_copy = user.tometadata(m2)
:param metadata: Target :class:`_schema.MetaData` object,
into which the
new :class:`_schema.Table` object will be created.
:param schema: optional string name indicating the target schema.
Defaults to the special symbol :attr:`.RETAIN_SCHEMA` which indicates
that no change to the schema name should be made in the new
:class:`_schema.Table`. If set to a string name, the new
:class:`_schema.Table`
will have this new name as the ``.schema``. If set to ``None``, the
schema will be set to that of the schema set on the target
:class:`_schema.MetaData`, which is typically ``None`` as well,
unless
set explicitly::
m2 = MetaData(schema='newschema')
# user_copy_one will have "newschema" as the schema name
user_copy_one = user.tometadata(m2, schema=None)
m3 = MetaData() # schema defaults to None
# user_copy_two will have None as the schema name
user_copy_two = user.tometadata(m3, schema=None)
:param referred_schema_fn: optional callable which can be supplied
in order to provide for the schema name that should be assigned
to the referenced table of a :class:`_schema.ForeignKeyConstraint`.
The callable accepts this parent :class:`_schema.Table`, the
target schema that we are changing to, the
:class:`_schema.ForeignKeyConstraint` object, and the existing
"target schema" of that constraint. The function should return the
string schema name that should be applied.
E.g.::
def referred_schema_fn(table, to_schema,
constraint, referred_schema):
if referred_schema == 'base_tables':
return referred_schema
else:
return to_schema
new_table = table.tometadata(m2, schema="alt_schema",
referred_schema_fn=referred_schema_fn)
.. versionadded:: 0.9.2
:param name: optional string name indicating the target table name.
If not specified or None, the table name is retained. This allows
a :class:`_schema.Table` to be copied to the same
:class:`_schema.MetaData` target
with a new name.
.. versionadded:: 1.0.0
"""
if name is None:
name = self.name
if schema is RETAIN_SCHEMA:
schema = self.schema
elif schema is None:
schema = metadata.schema
key = _get_table_key(name, schema)
if key in metadata.tables:
util.warn(
"Table '%s' already exists within the given "
"MetaData - not copying." % self.description
)
return metadata.tables[key]
args = []
for c in self.columns:
args.append(c.copy(schema=schema))
table = Table(
name,
metadata,
schema=schema,
comment=self.comment,
*args,
**self.kwargs
)
for c in self.constraints:
if isinstance(c, ForeignKeyConstraint):
referred_schema = c._referred_schema
if referred_schema_fn:
fk_constraint_schema = referred_schema_fn(
self, schema, c, referred_schema
)
else:
fk_constraint_schema = (
schema if referred_schema == self.schema else None
)
table.append_constraint(
c.copy(schema=fk_constraint_schema, target_table=table)
)
elif not c._type_bound:
# skip unique constraints that would be generated
# by the 'unique' flag on Column
if c._column_flag:
continue
table.append_constraint(
c.copy(schema=schema, target_table=table)
)
for index in self.indexes:
# skip indexes that would be generated
# by the 'index' flag on Column
if index._column_flag:
continue
Index(
index.name,
unique=index.unique,
*[
_copy_expression(expr, self, table)
for expr in index.expressions
],
_table=table,
**index.kwargs
)
return self._schema_item_copy(table)
class Column(DialectKWArgs, SchemaItem, ColumnClause):
"""Represents a column in a database table."""
__visit_name__ = "column"
def __init__(self, *args, **kwargs):
r"""
Construct a new ``Column`` object.
:param name: The name of this column as represented in the database.
This argument may be the first positional argument, or specified
via keyword.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word. Names with any number of upper
case characters will be quoted and sent exactly. Note that this
behavior applies even for databases which standardize upper
case names as case insensitive such as Oracle.
The name field may be omitted at construction time and applied
later, at any time before the Column is associated with a
:class:`_schema.Table`. This is to support convenient
usage within the :mod:`~sqlalchemy.ext.declarative` extension.
:param type\_: The column's type, indicated using an instance which
subclasses :class:`~sqlalchemy.types.TypeEngine`. If no arguments
are required for the type, the class of the type can be sent
as well, e.g.::
# use a type with arguments
Column('data', String(50))
# use no arguments
Column('level', Integer)
The ``type`` argument may be the second positional argument
or specified by keyword.
If the ``type`` is ``None`` or is omitted, it will first default to
the special type :class:`.NullType`. If and when this
:class:`_schema.Column` is made to refer to another column using
:class:`_schema.ForeignKey` and/or
:class:`_schema.ForeignKeyConstraint`, the type
of the remote-referenced column will be copied to this column as
well, at the moment that the foreign key is resolved against that
remote :class:`_schema.Column` object.
.. versionchanged:: 0.9.0
Support for propagation of type to a :class:`_schema.Column`
from its
:class:`_schema.ForeignKey` object has been improved and should be
more reliable and timely.
:param \*args: Additional positional arguments include various
:class:`.SchemaItem` derived constructs which will be applied
as options to the column. These include instances of
:class:`.Constraint`, :class:`_schema.ForeignKey`,
:class:`.ColumnDefault`,
:class:`.Sequence`, :class:`.Computed`. In some cases an
equivalent keyword argument is available such as ``server_default``,
``default`` and ``unique``.
:param autoincrement: Set up "auto increment" semantics for an integer
primary key column. The default value is the string ``"auto"``
which indicates that a single-column primary key that is of
an INTEGER type with no stated client-side or python-side defaults
should receive auto increment semantics automatically;
all other varieties of primary key columns will not. This
includes that :term:`DDL` such as PostgreSQL SERIAL or MySQL
AUTO_INCREMENT will be emitted for this column during a table
create, as well as that the column is assumed to generate new
integer primary key values when an INSERT statement invokes which
will be retrieved by the dialect.
The flag may be set to ``True`` to indicate that a column which
is part of a composite (e.g. multi-column) primary key should
have autoincrement semantics, though note that only one column
within a primary key may have this setting. It can also
be set to ``True`` to indicate autoincrement semantics on a
column that has a client-side or server-side default configured,
however note that not all dialects can accommodate all styles
of default as an "autoincrement". It can also be
set to ``False`` on a single-column primary key that has a
datatype of INTEGER in order to disable auto increment semantics
for that column.
.. versionchanged:: 1.1 The autoincrement flag now defaults to
``"auto"`` which indicates autoincrement semantics by default
for single-column integer primary keys only; for composite
(multi-column) primary keys, autoincrement is never implicitly
enabled; as always, ``autoincrement=True`` will allow for
at most one of those columns to be an "autoincrement" column.
``autoincrement=True`` may also be set on a
:class:`_schema.Column`
that has an explicit client-side or server-side default,
subject to limitations of the backend database and dialect.
The setting *only* has an effect for columns which are:
* Integer derived (i.e. INT, SMALLINT, BIGINT).
* Part of the primary key
* Not referring to another column via :class:`_schema.ForeignKey`,
unless
the value is specified as ``'ignore_fk'``::
# turn on autoincrement for this column despite
# the ForeignKey()
Column('id', ForeignKey('other.id'),
primary_key=True, autoincrement='ignore_fk')
It is typically not desirable to have "autoincrement" enabled on a
column that refers to another via foreign key, as such a column is
required to refer to a value that originates from elsewhere.
The setting has these two effects on columns that meet the
above criteria:
* DDL issued for the column will include database-specific
keywords intended to signify this column as an
"autoincrement" column, such as AUTO INCREMENT on MySQL,
SERIAL on PostgreSQL, and IDENTITY on MS-SQL. It does
*not* issue AUTOINCREMENT for SQLite since this is a
special SQLite flag that is not required for autoincrementing
behavior.
.. seealso::
:ref:`sqlite_autoincrement`
* The column will be considered to be available using an
"autoincrement" method specific to the backend database, such
as calling upon ``cursor.lastrowid``, using RETURNING in an
INSERT statement to get at a sequence-generated value, or using
special functions such as "SELECT scope_identity()".
These methods are highly specific to the DBAPIs and databases in
use and vary greatly, so care should be taken when associating
``autoincrement=True`` with a custom default generation function.
:param default: A scalar, Python callable, or
:class:`_expression.ColumnElement` expression representing the
*default value* for this column, which will be invoked upon insert
if this column is otherwise not specified in the VALUES clause of
the insert. This is a shortcut to using :class:`.ColumnDefault` as
a positional argument; see that class for full detail on the
structure of the argument.
Contrast this argument to
:paramref:`_schema.Column.server_default`
which creates a default generator on the database side.
.. seealso::
:ref:`metadata_defaults_toplevel`
:param doc: optional String that can be used by the ORM or similar
to document attributes on the Python side. This attribute does
**not** render SQL comments; use the
:paramref:`_schema.Column.comment`
parameter for this purpose.
:param key: An optional string identifier which will identify this
``Column`` object on the :class:`_schema.Table`.
When a key is provided,
this is the only identifier referencing the ``Column`` within the
application, including ORM attribute mapping; the ``name`` field
is used only when rendering SQL.
:param index: When ``True``, indicates that the column is indexed.
This is a shortcut for using a :class:`.Index` construct on the
table. To specify indexes with explicit names or indexes that
contain multiple columns, use the :class:`.Index` construct
instead.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param nullable: When set to ``False``, will cause the "NOT NULL"
phrase to be added when generating DDL for the column. When
``True``, will normally generate nothing (in SQL this defaults to
"NULL"), except in some very specific backend-specific edge cases
where "NULL" may render explicitly. Defaults to ``True`` unless
:paramref:`_schema.Column.primary_key` is also ``True``,
in which case it
defaults to ``False``. This parameter is only used when issuing
CREATE TABLE statements.
:param onupdate: A scalar, Python callable, or
:class:`~sqlalchemy.sql.expression.ClauseElement` representing a
default value to be applied to the column within UPDATE
statements, which will be invoked upon update if this column is not
present in the SET clause of the update. This is a shortcut to
using :class:`.ColumnDefault` as a positional argument with
``for_update=True``.
.. seealso::
:ref:`metadata_defaults` - complete discussion of onupdate
:param primary_key: If ``True``, marks this column as a primary key
column. Multiple columns can have this flag set to specify
composite primary keys. As an alternative, the primary key of a
:class:`_schema.Table` can be specified via an explicit
:class:`.PrimaryKeyConstraint` object.
:param server_default: A :class:`.FetchedValue` instance, str, Unicode
or :func:`~sqlalchemy.sql.expression.text` construct representing
the DDL DEFAULT value for the column.
String types will be emitted as-is, surrounded by single quotes::
Column('x', Text, server_default="val")
x TEXT DEFAULT 'val'
A :func:`~sqlalchemy.sql.expression.text` expression will be
rendered as-is, without quotes::
Column('y', DateTime, server_default=text('NOW()'))
y DATETIME DEFAULT NOW()
Strings and text() will be converted into a
:class:`.DefaultClause` object upon initialization.
Use :class:`.FetchedValue` to indicate that an already-existing
column will generate a default value on the database side which
will be available to SQLAlchemy for post-fetch after inserts. This
construct does not specify any DDL and the implementation is left
to the database, such as via a trigger.
.. seealso::
:ref:`server_defaults` - complete discussion of server side
defaults
:param server_onupdate: A :class:`.FetchedValue` instance
representing a database-side default generation function,
such as a trigger. This
indicates to SQLAlchemy that a newly generated value will be
available after updates. This construct does not actually
implement any kind of generation function within the database,
which instead must be specified separately.
.. seealso::
:ref:`triggered_columns`
:param quote: Force quoting of this column's name on or off,
corresponding to ``True`` or ``False``. When left at its default
of ``None``, the column identifier will be quoted according to
whether the name is case sensitive (identifiers with at least one
upper case character are treated as case sensitive), or if it's a
reserved word. This flag is only needed to force quoting of a
reserved word which is not known by the SQLAlchemy dialect.
:param unique: When ``True``, indicates that this column contains a
unique constraint, or if ``index`` is ``True`` as well, indicates
that the :class:`.Index` should be created with the unique flag.
To specify multiple columns in the constraint/index or to specify
an explicit name, use the :class:`.UniqueConstraint` or
:class:`.Index` constructs explicitly.
:param system: When ``True``, indicates this is a "system" column,
that is a column which is automatically made available by the
database, and should not be included in the columns list for a
``CREATE TABLE`` statement.
For more elaborate scenarios where columns should be
conditionally rendered differently on different backends,
consider custom compilation rules for :class:`.CreateColumn`.
:param comment: Optional string that will render an SQL comment on
table creation.
.. versionadded:: 1.2 Added the
:paramref:`_schema.Column.comment`
parameter to :class:`_schema.Column`.
"""
name = kwargs.pop("name", None)
type_ = kwargs.pop("type_", None)
args = list(args)
if args:
if isinstance(args[0], util.string_types):
if name is not None:
raise exc.ArgumentError(
"May not pass name positionally and as a keyword."
)
name = args.pop(0)
if args:
coltype = args[0]
if hasattr(coltype, "_sqla_type"):
if type_ is not None:
raise exc.ArgumentError(
"May not pass type_ positionally and as a keyword."
)
type_ = args.pop(0)
if name is not None:
name = quoted_name(name, kwargs.pop("quote", None))
elif "quote" in kwargs:
raise exc.ArgumentError(
"Explicit 'name' is required when " "sending 'quote' argument"
)
super(Column, self).__init__(name, type_)
self.key = kwargs.pop("key", name)
self.primary_key = kwargs.pop("primary_key", False)
self.nullable = kwargs.pop("nullable", not self.primary_key)
self.default = kwargs.pop("default", None)
self.server_default = kwargs.pop("server_default", None)
self.server_onupdate = kwargs.pop("server_onupdate", None)
# these default to None because .index and .unique is *not*
# an informational flag about Column - there can still be an
# Index or UniqueConstraint referring to this Column.
self.index = kwargs.pop("index", None)
self.unique = kwargs.pop("unique", None)
self.system = kwargs.pop("system", False)
self.doc = kwargs.pop("doc", None)
self.onupdate = kwargs.pop("onupdate", None)
self.autoincrement = kwargs.pop("autoincrement", "auto")
self.constraints = set()
self.foreign_keys = set()
self.comment = kwargs.pop("comment", None)
self.computed = None
# check if this Column is proxying another column
if "_proxies" in kwargs:
self._proxies = kwargs.pop("_proxies")
# otherwise, add DDL-related events
elif isinstance(self.type, SchemaEventTarget):
self.type._set_parent_with_dispatch(self)
if self.default is not None:
if isinstance(self.default, (ColumnDefault, Sequence)):
args.append(self.default)
else:
if getattr(self.type, "_warn_on_bytestring", False):
if isinstance(self.default, util.binary_type):
util.warn(
"Unicode column '%s' has non-unicode "
"default value %r specified."
% (self.key, self.default)
)
args.append(ColumnDefault(self.default))
if self.server_default is not None:
if isinstance(self.server_default, FetchedValue):
args.append(self.server_default._as_for_update(False))
else:
args.append(DefaultClause(self.server_default))
if self.onupdate is not None:
if isinstance(self.onupdate, (ColumnDefault, Sequence)):
args.append(self.onupdate)
else:
args.append(ColumnDefault(self.onupdate, for_update=True))
if self.server_onupdate is not None:
if isinstance(self.server_onupdate, FetchedValue):
args.append(self.server_onupdate._as_for_update(True))
else:
args.append(
DefaultClause(self.server_onupdate, for_update=True)
)
self._init_items(*args)
util.set_creation_order(self)
if "info" in kwargs:
self.info = kwargs.pop("info")
self._extra_kwargs(**kwargs)
def _extra_kwargs(self, **kwargs):
self._validate_dialect_kwargs(kwargs)
# @property
# def quote(self):
# return getattr(self.name, "quote", None)
def __str__(self):
if self.name is None:
return "(no name)"
elif self.table is not None:
if self.table.named_with_column:
return self.table.description + "." + self.description
else:
return self.description
else:
return self.description
def references(self, column):
"""Return True if this Column references the given column via foreign
key."""
for fk in self.foreign_keys:
if fk.column.proxy_set.intersection(column.proxy_set):
return True
else:
return False
def append_foreign_key(self, fk):
fk._set_parent_with_dispatch(self)
def __repr__(self):
kwarg = []
if self.key != self.name:
kwarg.append("key")
if self.primary_key:
kwarg.append("primary_key")
if not self.nullable:
kwarg.append("nullable")
if self.onupdate:
kwarg.append("onupdate")
if self.default:
kwarg.append("default")
if self.server_default:
kwarg.append("server_default")
if self.comment:
kwarg.append("comment")
return "Column(%s)" % ", ".join(
[repr(self.name)]
+ [repr(self.type)]
+ [repr(x) for x in self.foreign_keys if x is not None]
+ [repr(x) for x in self.constraints]
+ [
(
self.table is not None
and "table=<%s>" % self.table.description
or "table=None"
)
]
+ ["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg]
)
def _set_parent(self, table):
if not self.name:
raise exc.ArgumentError(
"Column must be constructed with a non-blank name or "
"assign a non-blank .name before adding to a Table."
)
if self.key is None:
self.key = self.name
existing = getattr(self, "table", None)
if existing is not None and existing is not table:
raise exc.ArgumentError(
"Column object '%s' already assigned to Table '%s'"
% (self.key, existing.description)
)
if self.key in table._columns:
col = table._columns.get(self.key)
if col is not self:
for fk in col.foreign_keys:
table.foreign_keys.remove(fk)
if fk.constraint in table.constraints:
# this might have been removed
# already, if it's a composite constraint
# and more than one col being replaced
table.constraints.remove(fk.constraint)
table._columns.replace(self)
if self.primary_key:
table.primary_key._replace(self)
elif self.key in table.primary_key:
raise exc.ArgumentError(
"Trying to redefine primary-key column '%s' as a "
"non-primary-key column on table '%s'"
% (self.key, table.fullname)
)
self.table = table
if self.index:
if isinstance(self.index, util.string_types):
raise exc.ArgumentError(
"The 'index' keyword argument on Column is boolean only. "
"To create indexes with a specific name, create an "
"explicit Index object external to the Table."
)
table.append_constraint(
Index(
None, self.key, unique=bool(self.unique), _column_flag=True
)
)
elif self.unique:
if isinstance(self.unique, util.string_types):
raise exc.ArgumentError(
"The 'unique' keyword argument on Column is boolean "
"only. To create unique constraints or indexes with a "
"specific name, append an explicit UniqueConstraint to "
"the Table's list of elements, or create an explicit "
"Index object external to the Table."
)
table.append_constraint(
UniqueConstraint(self.key, _column_flag=True)
)
self._setup_on_memoized_fks(lambda fk: fk._set_remote_table(table))
def _setup_on_memoized_fks(self, fn):
fk_keys = [
((self.table.key, self.key), False),
((self.table.key, self.name), True),
]
for fk_key, link_to_name in fk_keys:
if fk_key in self.table.metadata._fk_memos:
for fk in self.table.metadata._fk_memos[fk_key]:
if fk.link_to_name is link_to_name:
fn(fk)
def _on_table_attach(self, fn):
if self.table is not None:
fn(self, self.table)
else:
event.listen(self, "after_parent_attach", fn)
def copy(self, **kw):
"""Create a copy of this ``Column``, uninitialized.
This is used in ``Table.tometadata``.
"""
# Constraint objects plus non-constraint-bound ForeignKey objects
args = [
c.copy(**kw) for c in self.constraints if not c._type_bound
] + [c.copy(**kw) for c in self.foreign_keys if not c.constraint]
# ticket #5276
column_kwargs = {}
for dialect_name in self.dialect_options:
dialect_options = self.dialect_options[dialect_name]._non_defaults
for (
dialect_option_key,
dialect_option_value,
) in dialect_options.items():
column_kwargs[
dialect_name + "_" + dialect_option_key
] = dialect_option_value
server_default = self.server_default
server_onupdate = self.server_onupdate
if isinstance(server_default, Computed):
server_default = server_onupdate = None
args.append(self.server_default.copy(**kw))
type_ = self.type
if isinstance(type_, SchemaEventTarget):
type_ = type_.copy(**kw)
c = self._constructor(
name=self.name,
type_=type_,
key=self.key,
primary_key=self.primary_key,
nullable=self.nullable,
unique=self.unique,
system=self.system,
# quote=self.quote, # disabled 2013-08-27 (commit 031ef080)
index=self.index,
autoincrement=self.autoincrement,
default=self.default,
server_default=server_default,
onupdate=self.onupdate,
server_onupdate=server_onupdate,
doc=self.doc,
comment=self.comment,
*args,
**column_kwargs
)
return self._schema_item_copy(c)
def _make_proxy(
self, selectable, name=None, key=None, name_is_truncatable=False, **kw
):
"""Create a *proxy* for this column.
This is a copy of this ``Column`` referenced by a different parent
(such as an alias or select statement). The column should
be used only in select scenarios, as its full DDL/default
information is not transferred.
"""
fk = [
ForeignKey(f.column, _constraint=f.constraint)
for f in self.foreign_keys
]
if name is None and self.name is None:
raise exc.InvalidRequestError(
"Cannot initialize a sub-selectable"
" with this Column object until its 'name' has "
"been assigned."
)
try:
c = self._constructor(
_as_truncated(name or self.name)
if name_is_truncatable
else (name or self.name),
self.type,
key=key if key else name if name else self.key,
primary_key=self.primary_key,
nullable=self.nullable,
_proxies=[self],
*fk
)
except TypeError as err:
util.raise_(
TypeError(
"Could not create a copy of this %r object. "
"Ensure the class includes a _constructor() "
"attribute or method which accepts the "
"standard Column constructor arguments, or "
"references the Column class itself." % self.__class__
),
from_=err,
)
c.table = selectable
selectable._columns.add(c)
if selectable._is_clone_of is not None:
c._is_clone_of = selectable._is_clone_of.columns.get(c.key)
if self.primary_key:
selectable.primary_key.add(c)
c.dispatch.after_parent_attach(c, selectable)
return c
def get_children(self, schema_visitor=False, **kwargs):
if schema_visitor:
return (
[x for x in (self.default, self.onupdate) if x is not None]
+ list(self.foreign_keys)
+ list(self.constraints)
)
else:
return ColumnClause.get_children(self, **kwargs)
class ForeignKey(DialectKWArgs, SchemaItem):
"""Defines a dependency between two columns.
``ForeignKey`` is specified as an argument to a :class:`_schema.Column`
object,
e.g.::
t = Table("remote_table", metadata,
Column("remote_id", ForeignKey("main_table.id"))
)
Note that ``ForeignKey`` is only a marker object that defines
a dependency between two columns. The actual constraint
is in all cases represented by the :class:`_schema.ForeignKeyConstraint`
object. This object will be generated automatically when
a ``ForeignKey`` is associated with a :class:`_schema.Column` which
in turn is associated with a :class:`_schema.Table`. Conversely,
when :class:`_schema.ForeignKeyConstraint` is applied to a
:class:`_schema.Table`,
``ForeignKey`` markers are automatically generated to be
present on each associated :class:`_schema.Column`, which are also
associated with the constraint object.
Note that you cannot define a "composite" foreign key constraint,
that is a constraint between a grouping of multiple parent/child
columns, using ``ForeignKey`` objects. To define this grouping,
the :class:`_schema.ForeignKeyConstraint` object must be used, and applied
to the :class:`_schema.Table`. The associated ``ForeignKey`` objects
are created automatically.
The ``ForeignKey`` objects associated with an individual
:class:`_schema.Column`
object are available in the `foreign_keys` collection
of that column.
Further examples of foreign key configuration are in
:ref:`metadata_foreignkeys`.
"""
__visit_name__ = "foreign_key"
def __init__(
self,
column,
_constraint=None,
use_alter=False,
name=None,
onupdate=None,
ondelete=None,
deferrable=None,
initially=None,
link_to_name=False,
match=None,
info=None,
**dialect_kw
):
r"""
Construct a column-level FOREIGN KEY.
The :class:`_schema.ForeignKey` object when constructed generates a
:class:`_schema.ForeignKeyConstraint`
which is associated with the parent
:class:`_schema.Table` object's collection of constraints.
:param column: A single target column for the key relationship. A
:class:`_schema.Column` object or a column name as a string:
``tablename.columnkey`` or ``schema.tablename.columnkey``.
``columnkey`` is the ``key`` which has been assigned to the column
(defaults to the column name itself), unless ``link_to_name`` is
``True`` in which case the rendered name of the column is used.
:param name: Optional string. An in-database name for the key if
`constraint` is not provided.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally
assigned ``key``.
:param use_alter: passed to the underlying
:class:`_schema.ForeignKeyConstraint`
to indicate the constraint should
be generated/dropped externally from the CREATE TABLE/ DROP TABLE
statement. See :paramref:`_schema.ForeignKeyConstraint.use_alter`
for further description.
.. seealso::
:paramref:`_schema.ForeignKeyConstraint.use_alter`
:ref:`use_alter`
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. The
arguments are ultimately handled by a corresponding
:class:`_schema.ForeignKeyConstraint`.
See the documentation regarding
an individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
.. versionadded:: 0.9.2
"""
self._colspec = column
if isinstance(self._colspec, util.string_types):
self._table_column = None
else:
if hasattr(self._colspec, "__clause_element__"):
self._table_column = self._colspec.__clause_element__()
else:
self._table_column = self._colspec
if not isinstance(self._table_column, ColumnClause):
raise exc.ArgumentError(
"String, Column, or Column-bound argument "
"expected, got %r" % self._table_column
)
elif not isinstance(
self._table_column.table, (util.NoneType, TableClause)
):
raise exc.ArgumentError(
"ForeignKey received Column not bound "
"to a Table, got: %r" % self._table_column.table
)
# the linked ForeignKeyConstraint.
# ForeignKey will create this when parent Column
# is attached to a Table, *or* ForeignKeyConstraint
# object passes itself in when creating ForeignKey
# markers.
self.constraint = _constraint
self.parent = None
self.use_alter = use_alter
self.name = name
self.onupdate = onupdate
self.ondelete = ondelete
self.deferrable = deferrable
self.initially = initially
self.link_to_name = link_to_name
self.match = match
if info:
self.info = info
self._unvalidated_dialect_kw = dialect_kw
def __repr__(self):
return "ForeignKey(%r)" % self._get_colspec()
def copy(self, schema=None):
"""Produce a copy of this :class:`_schema.ForeignKey` object.
The new :class:`_schema.ForeignKey` will not be bound
to any :class:`_schema.Column`.
This method is usually used by the internal
copy procedures of :class:`_schema.Column`, :class:`_schema.Table`,
and :class:`_schema.MetaData`.
:param schema: The returned :class:`_schema.ForeignKey` will
reference the original table and column name, qualified
by the given string schema name.
"""
fk = ForeignKey(
self._get_colspec(schema=schema),
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match,
**self._unvalidated_dialect_kw
)
return self._schema_item_copy(fk)
def _get_colspec(self, schema=None, table_name=None):
"""Return a string based 'column specification' for this
:class:`_schema.ForeignKey`.
This is usually the equivalent of the string-based "tablename.colname"
argument first passed to the object's constructor.
"""
if schema:
_schema, tname, colname = self._column_tokens
if table_name is not None:
tname = table_name
return "%s.%s.%s" % (schema, tname, colname)
elif table_name:
schema, tname, colname = self._column_tokens
if schema:
return "%s.%s.%s" % (schema, table_name, colname)
else:
return "%s.%s" % (table_name, colname)
elif self._table_column is not None:
return "%s.%s" % (
self._table_column.table.fullname,
self._table_column.key,
)
else:
return self._colspec
@property
def _referred_schema(self):
return self._column_tokens[0]
def _table_key(self):
if self._table_column is not None:
if self._table_column.table is None:
return None
else:
return self._table_column.table.key
else:
schema, tname, colname = self._column_tokens
return _get_table_key(tname, schema)
target_fullname = property(_get_colspec)
def references(self, table):
"""Return True if the given :class:`_schema.Table`
is referenced by this
:class:`_schema.ForeignKey`."""
return table.corresponding_column(self.column) is not None
def get_referent(self, table):
"""Return the :class:`_schema.Column` in the given
:class:`_schema.Table`
referenced by this :class:`_schema.ForeignKey`.
Returns None if this :class:`_schema.ForeignKey`
does not reference the given
:class:`_schema.Table`.
"""
return table.corresponding_column(self.column)
@util.memoized_property
def _column_tokens(self):
"""parse a string-based _colspec into its component parts."""
m = self._get_colspec().split(".")
if m is None:
raise exc.ArgumentError(
"Invalid foreign key column specification: %s" % self._colspec
)
if len(m) == 1:
tname = m.pop()
colname = None
else:
colname = m.pop()
tname = m.pop()
# A FK between column 'bar' and table 'foo' can be
# specified as 'foo', 'foo.bar', 'dbo.foo.bar',
# 'otherdb.dbo.foo.bar'. Once we have the column name and
# the table name, treat everything else as the schema
# name. Some databases (e.g. Sybase) support
# inter-database foreign keys. See tickets#1341 and --
# indirectly related -- Ticket #594. This assumes that '.'
# will never appear *within* any component of the FK.
if len(m) > 0:
schema = ".".join(m)
else:
schema = None
return schema, tname, colname
def _resolve_col_tokens(self):
if self.parent is None:
raise exc.InvalidRequestError(
"this ForeignKey object does not yet have a "
"parent Column associated with it."
)
elif self.parent.table is None:
raise exc.InvalidRequestError(
"this ForeignKey's parent column is not yet associated "
"with a Table."
)
parenttable = self.parent.table
# assertion
# basically Column._make_proxy() sends the actual
# target Column to the ForeignKey object, so the
# string resolution here is never called.
for c in self.parent.base_columns:
if isinstance(c, Column):
assert c.table is parenttable
break
else:
assert False
######################
schema, tname, colname = self._column_tokens
if schema is None and parenttable.metadata.schema is not None:
schema = parenttable.metadata.schema
tablekey = _get_table_key(tname, schema)
return parenttable, tablekey, colname
def _link_to_col_by_colstring(self, parenttable, table, colname):
if not hasattr(self.constraint, "_referred_table"):
self.constraint._referred_table = table
else:
assert self.constraint._referred_table is table
_column = None
if colname is None:
# colname is None in the case that ForeignKey argument
# was specified as table name only, in which case we
# match the column name to the same column on the
# parent.
key = self.parent
_column = table.c.get(self.parent.key, None)
elif self.link_to_name:
key = colname
for c in table.c:
if c.name == colname:
_column = c
else:
key = colname
_column = table.c.get(colname, None)
if _column is None:
raise exc.NoReferencedColumnError(
"Could not initialize target column "
"for ForeignKey '%s' on table '%s': "
"table '%s' has no column named '%s'"
% (self._colspec, parenttable.name, table.name, key),
table.name,
key,
)
self._set_target_column(_column)
def _set_target_column(self, column):
# propagate TypeEngine to parent if it didn't have one
if self.parent.type._isnull:
self.parent.type = column.type
# super-edgy case, if other FKs point to our column,
# they'd get the type propagated out also.
if isinstance(self.parent.table, Table):
def set_type(fk):
if fk.parent.type._isnull:
fk.parent.type = column.type
self.parent._setup_on_memoized_fks(set_type)
self.column = column
@util.memoized_property
def column(self):
"""Return the target :class:`_schema.Column` referenced by this
:class:`_schema.ForeignKey`.
If no target column has been established, an exception
is raised.
.. versionchanged:: 0.9.0
Foreign key target column resolution now occurs as soon as both
the ForeignKey object and the remote Column to which it refers
are both associated with the same MetaData object.
"""
if isinstance(self._colspec, util.string_types):
parenttable, tablekey, colname = self._resolve_col_tokens()
if tablekey not in parenttable.metadata:
raise exc.NoReferencedTableError(
"Foreign key associated with column '%s' could not find "
"table '%s' with which to generate a "
"foreign key to target column '%s'"
% (self.parent, tablekey, colname),
tablekey,
)
elif parenttable.key not in parenttable.metadata:
raise exc.InvalidRequestError(
"Table %s is no longer associated with its "
"parent MetaData" % parenttable
)
else:
raise exc.NoReferencedColumnError(
"Could not initialize target column for "
"ForeignKey '%s' on table '%s': "
"table '%s' has no column named '%s'"
% (self._colspec, parenttable.name, tablekey, colname),
tablekey,
colname,
)
elif hasattr(self._colspec, "__clause_element__"):
_column = self._colspec.__clause_element__()
return _column
else:
_column = self._colspec
return _column
def _set_parent(self, column):
if self.parent is not None and self.parent is not column:
raise exc.InvalidRequestError(
"This ForeignKey already has a parent !"
)
self.parent = column
self.parent.foreign_keys.add(self)
self.parent._on_table_attach(self._set_table)
def _set_remote_table(self, table):
parenttable, tablekey, colname = self._resolve_col_tokens()
self._link_to_col_by_colstring(parenttable, table, colname)
self.constraint._validate_dest_table(table)
def _remove_from_metadata(self, metadata):
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if self in metadata._fk_memos[fk_key]:
# TODO: no test coverage for self not in memos
metadata._fk_memos[fk_key].remove(self)
def _set_table(self, column, table):
# standalone ForeignKey - create ForeignKeyConstraint
# on the hosting Table when attached to the Table.
if self.constraint is None and isinstance(table, Table):
self.constraint = ForeignKeyConstraint(
[],
[],
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
match=self.match,
**self._unvalidated_dialect_kw
)
self.constraint._append_element(column, self)
self.constraint._set_parent_with_dispatch(table)
table.foreign_keys.add(self)
# set up remote ".column" attribute, or a note to pick it
# up when the other Table/Column shows up
if isinstance(self._colspec, util.string_types):
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if table_key in parenttable.metadata.tables:
table = parenttable.metadata.tables[table_key]
try:
self._link_to_col_by_colstring(parenttable, table, colname)
except exc.NoReferencedColumnError:
# this is OK, we'll try later
pass
parenttable.metadata._fk_memos[fk_key].append(self)
elif hasattr(self._colspec, "__clause_element__"):
_column = self._colspec.__clause_element__()
self._set_target_column(_column)
else:
_column = self._colspec
self._set_target_column(_column)
class _NotAColumnExpr(object):
def _not_a_column_expr(self):
raise exc.InvalidRequestError(
"This %s cannot be used directly "
"as a column expression." % self.__class__.__name__
)
__clause_element__ = self_group = lambda self: self._not_a_column_expr()
_from_objects = property(lambda self: self._not_a_column_expr())
class DefaultGenerator(_NotAColumnExpr, SchemaItem):
"""Base class for column *default* values."""
__visit_name__ = "default_generator"
is_sequence = False
is_server_default = False
column = None
def __init__(self, for_update=False):
self.for_update = for_update
def _set_parent(self, column):
self.column = column
if self.for_update:
self.column.onupdate = self
else:
self.column.default = self
def execute(self, bind=None, **kwargs):
if bind is None:
bind = _bind_or_error(self)
return bind.execute(self, **kwargs)
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_default(self, multiparams, params)
@property
def bind(self):
"""Return the connectable associated with this default."""
if getattr(self, "column", None) is not None:
return self.column.table.bind
else:
return None
class ColumnDefault(DefaultGenerator):
"""A plain default value on a column.
This could correspond to a constant, a callable function,
or a SQL clause.
:class:`.ColumnDefault` is generated automatically
whenever the ``default``, ``onupdate`` arguments of
:class:`_schema.Column` are used. A :class:`.ColumnDefault`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, default=50)
Is equivalent to::
Column('foo', Integer, ColumnDefault(50))
"""
def __init__(self, arg, **kwargs):
""""Construct a new :class:`.ColumnDefault`.
:param arg: argument representing the default value.
May be one of the following:
* a plain non-callable Python value, such as a
string, integer, boolean, or other simple type.
The default value will be used as is each time.
* a SQL expression, that is one which derives from
:class:`_expression.ColumnElement`. The SQL expression will
be rendered into the INSERT or UPDATE statement,
or in the case of a primary key column when
RETURNING is not used may be
pre-executed before an INSERT within a SELECT.
* A Python callable. The function will be invoked for each
new row subject to an INSERT or UPDATE.
The callable must accept exactly
zero or one positional arguments. The one-argument form
will receive an instance of the :class:`.ExecutionContext`,
which provides contextual information as to the current
:class:`_engine.Connection` in use as well as the current
statement and parameters.
"""
super(ColumnDefault, self).__init__(**kwargs)
if isinstance(arg, FetchedValue):
raise exc.ArgumentError(
"ColumnDefault may not be a server-side default type."
)
if util.callable(arg):
arg = self._maybe_wrap_callable(arg)
self.arg = arg
@util.memoized_property
def is_callable(self):
return util.callable(self.arg)
@util.memoized_property
def is_clause_element(self):
return isinstance(self.arg, ClauseElement)
@util.memoized_property
def is_scalar(self):
return (
not self.is_callable
and not self.is_clause_element
and not self.is_sequence
)
@util.memoized_property
@util.dependencies("sqlalchemy.sql.sqltypes")
def _arg_is_typed(self, sqltypes):
if self.is_clause_element:
return not isinstance(self.arg.type, sqltypes.NullType)
else:
return False
def _maybe_wrap_callable(self, fn):
"""Wrap callables that don't accept a context.
This is to allow easy compatibility with default callables
that aren't specific to accepting of a context.
"""
try:
argspec = util.get_callable_argspec(fn, no_self=True)
except TypeError:
return util.wrap_callable(lambda ctx: fn(), fn)
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
if positionals == 0:
return util.wrap_callable(lambda ctx: fn(), fn)
elif positionals == 1:
return fn
else:
raise exc.ArgumentError(
"ColumnDefault Python function takes zero or one "
"positional arguments"
)
def _visit_name(self):
if self.for_update:
return "column_onupdate"
else:
return "column_default"
__visit_name__ = property(_visit_name)
def __repr__(self):
return "ColumnDefault(%r)" % (self.arg,)
class IdentityOptions(object):
"""Defines options for a named database sequence or an identity column.
.. versionadded:: 1.3.18
.. seealso::
:class:`.Sequence`
"""
def __init__(
self,
start=None,
increment=None,
minvalue=None,
maxvalue=None,
nominvalue=None,
nomaxvalue=None,
cycle=None,
cache=None,
order=None,
):
"""Construct a :class:`.IdentityOptions` object.
See the :class:`.Sequence` documentation for a complete description
of the parameters
:param start: the starting index of the sequence.
:param increment: the increment value of the sequence.
:param minvalue: the minimum value of the sequence.
:param maxvalue: the maximum value of the sequence.
:param nominvalue: no minimum value of the sequence.
:param nomaxvalue: no maximum value of the sequence.
:param cycle: allows the sequence to wrap around when the maxvalue
or minvalue has been reached.
:param cache: optional integer value; number of future values in the
sequence which are calculated in advance.
:param order: optional boolean value; if true, renders the
ORDER keyword.
name.
"""
self.start = start
self.increment = increment
self.minvalue = minvalue
self.maxvalue = maxvalue
self.nominvalue = nominvalue
self.nomaxvalue = nomaxvalue
self.cycle = cycle
self.cache = cache
self.order = order
class Sequence(IdentityOptions, DefaultGenerator):
"""Represents a named database sequence.
The :class:`.Sequence` object represents the name and configurational
parameters of a database sequence. It also represents
a construct that can be "executed" by a SQLAlchemy :class:`_engine.Engine`
or :class:`_engine.Connection`,
rendering the appropriate "next value" function
for the target database and returning a result.
The :class:`.Sequence` is typically associated with a primary key column::
some_table = Table(
'some_table', metadata,
Column('id', Integer, Sequence('some_table_seq'),
primary_key=True)
)
When CREATE TABLE is emitted for the above :class:`_schema.Table`, if the
target platform supports sequences, a CREATE SEQUENCE statement will
be emitted as well. For platforms that don't support sequences,
the :class:`.Sequence` construct is ignored.
.. seealso::
:class:`.CreateSequence`
:class:`.DropSequence`
"""
__visit_name__ = "sequence"
is_sequence = True
def __init__(
self,
name,
start=None,
increment=None,
minvalue=None,
maxvalue=None,
nominvalue=None,
nomaxvalue=None,
cycle=None,
schema=None,
cache=None,
order=None,
optional=False,
quote=None,
metadata=None,
quote_schema=None,
for_update=False,
):
"""Construct a :class:`.Sequence` object.
:param name: the name of the sequence.
:param start: the starting index of the sequence. This value is
used when the CREATE SEQUENCE command is emitted to the database
as the value of the "START WITH" clause. If ``None``, the
clause is omitted, which on most platforms indicates a starting
value of 1.
:param increment: the increment value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "INCREMENT BY" clause. If ``None``,
the clause is omitted, which on most platforms indicates an
increment of 1.
:param minvalue: the minimum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "MINVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
minvalue of 1 and -2^63-1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param maxvalue: the maximum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "MAXVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
maxvalue of 2^63-1 and -1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param nominvalue: no minimum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "NO MINVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
minvalue of 1 and -2^63-1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param nomaxvalue: no maximum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "NO MAXVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
maxvalue of 2^63-1 and -1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param cycle: allows the sequence to wrap around when the maxvalue
or minvalue has been reached by an ascending or descending sequence
respectively. This value is used when the CREATE SEQUENCE command
is emitted to the database as the "CYCLE" clause. If the limit is
reached, the next number generated will be the minvalue or maxvalue,
respectively. If cycle=False (the default) any calls to nextval
after the sequence has reached its maximum value will return an
error.
.. versionadded:: 1.0.7
:param schema: optional schema name for the sequence, if located
in a schema other than the default. The rules for selecting the
schema name when a :class:`_schema.MetaData`
is also present are the same
as that of :paramref:`_schema.Table.schema`.
:param cache: optional integer value; number of future values in the
sequence which are calculated in advance. Renders the CACHE keyword
understood by Oracle and PostgreSQL.
.. versionadded:: 1.1.12
:param order: optional boolean value; if true, renders the
ORDER keyword, understood by Oracle, indicating the sequence is
definitively ordered. May be necessary to provide deterministic
ordering using Oracle RAC.
.. versionadded:: 1.1.12
:param optional: boolean value, when ``True``, indicates that this
:class:`.Sequence` object only needs to be explicitly generated
on backends that don't provide another way to generate primary
key identifiers. Currently, it essentially means, "don't create
this sequence on the PostgreSQL backend, where the SERIAL keyword
creates a sequence for us automatically".
:param quote: boolean value, when ``True`` or ``False``, explicitly
forces quoting of the :paramref:`_schema.Sequence.name` on or off.
When left at its default of ``None``, normal quoting rules based
on casing and reserved words take place.
:param quote_schema: Set the quoting preferences for the ``schema``
name.
:param metadata: optional :class:`_schema.MetaData` object which this
:class:`.Sequence` will be associated with. A :class:`.Sequence`
that is associated with a :class:`_schema.MetaData`
gains the following
capabilities:
* The :class:`.Sequence` will inherit the
:paramref:`_schema.MetaData.schema`
parameter specified to the target :class:`_schema.MetaData`, which
affects the production of CREATE / DROP DDL, if any.
* The :meth:`.Sequence.create` and :meth:`.Sequence.drop` methods
automatically use the engine bound to the :class:`_schema.MetaData`
object, if any.
* The :meth:`_schema.MetaData.create_all` and
:meth:`_schema.MetaData.drop_all`
methods will emit CREATE / DROP for this :class:`.Sequence`,
even if the :class:`.Sequence` is not associated with any
:class:`_schema.Table` / :class:`_schema.Column`
that's a member of this
:class:`_schema.MetaData`.
The above behaviors can only occur if the :class:`.Sequence` is
explicitly associated with the :class:`_schema.MetaData`
via this parameter.
.. seealso::
:ref:`sequence_metadata` - full discussion of the
:paramref:`.Sequence.metadata` parameter.
:param for_update: Indicates this :class:`.Sequence`, when associated
with a :class:`_schema.Column`,
should be invoked for UPDATE statements
on that column's table, rather than for INSERT statements, when
no value is otherwise present for that column in the statement.
"""
DefaultGenerator.__init__(self, for_update=for_update)
IdentityOptions.__init__(
self,
start=start,
increment=increment,
minvalue=minvalue,
maxvalue=maxvalue,
nominvalue=nominvalue,
nomaxvalue=nomaxvalue,
cycle=cycle,
cache=cache,
order=order,
)
self.name = quoted_name(name, quote)
self.optional = optional
if schema is BLANK_SCHEMA:
self.schema = schema = None
elif metadata is not None and schema is None and metadata.schema:
self.schema = schema = metadata.schema
else:
self.schema = quoted_name(schema, quote_schema)
self.metadata = metadata
self._key = _get_table_key(name, schema)
if metadata:
self._set_metadata(metadata)
@util.memoized_property
def is_callable(self):
return False
@util.memoized_property
def is_clause_element(self):
return False
@util.dependencies("sqlalchemy.sql.functions.func")
def next_value(self, func):
"""Return a :class:`.next_value` function element
which will render the appropriate increment function
for this :class:`.Sequence` within any SQL expression.
"""
return func.next_value(self, bind=self.bind)
def _set_parent(self, column):
super(Sequence, self)._set_parent(column)
column._on_table_attach(self._set_table)
def _set_table(self, column, table):
self._set_metadata(table.metadata)
def _set_metadata(self, metadata):
self.metadata = metadata
self.metadata._sequences[self._key] = self
@property
def bind(self):
if self.metadata:
return self.metadata.bind
else:
return None
def create(self, bind=None, checkfirst=True):
"""Creates this sequence in the database."""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=True):
"""Drops this sequence from the database."""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst)
def _not_a_column_expr(self):
raise exc.InvalidRequestError(
"This %s cannot be used directly "
"as a column expression. Use func.next_value(sequence) "
"to produce a 'next value' function that's usable "
"as a column element." % self.__class__.__name__
)
@inspection._self_inspects
class FetchedValue(_NotAColumnExpr, SchemaEventTarget):
"""A marker for a transparent database-side default.
Use :class:`.FetchedValue` when the database is configured
to provide some automatic default for a column.
E.g.::
Column('foo', Integer, FetchedValue())
Would indicate that some trigger or default generator
will create a new value for the ``foo`` column during an
INSERT.
.. seealso::
:ref:`triggered_columns`
"""
is_server_default = True
reflected = False
has_argument = False
def __init__(self, for_update=False):
self.for_update = for_update
def _as_for_update(self, for_update):
if for_update == self.for_update:
return self
else:
return self._clone(for_update)
def _clone(self, for_update):
n = self.__class__.__new__(self.__class__)
n.__dict__.update(self.__dict__)
n.__dict__.pop("column", None)
n.for_update = for_update
return n
def _set_parent(self, column):
self.column = column
if self.for_update:
self.column.server_onupdate = self
else:
self.column.server_default = self
def __repr__(self):
return util.generic_repr(self)
class DefaultClause(FetchedValue):
"""A DDL-specified DEFAULT column value.
:class:`.DefaultClause` is a :class:`.FetchedValue`
that also generates a "DEFAULT" clause when
"CREATE TABLE" is emitted.
:class:`.DefaultClause` is generated automatically
whenever the ``server_default``, ``server_onupdate`` arguments of
:class:`_schema.Column` are used. A :class:`.DefaultClause`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, server_default="50")
Is equivalent to::
Column('foo', Integer, DefaultClause("50"))
"""
has_argument = True
def __init__(self, arg, for_update=False, _reflected=False):
util.assert_arg_type(
arg, (util.string_types[0], ClauseElement, TextClause), "arg"
)
super(DefaultClause, self).__init__(for_update)
self.arg = arg
self.reflected = _reflected
def __repr__(self):
return "DefaultClause(%r, for_update=%r)" % (self.arg, self.for_update)
@util.deprecated_cls(
"0.6",
":class:`.PassiveDefault` is deprecated and will be removed in a "
"future release. Please refer to :class:`.DefaultClause`.",
)
class PassiveDefault(DefaultClause):
"""A DDL-specified DEFAULT column value.
"""
def __init__(self, *arg, **kw):
DefaultClause.__init__(self, *arg, **kw)
class Constraint(DialectKWArgs, SchemaItem):
"""A table-level SQL constraint."""
__visit_name__ = "constraint"
def __init__(
self,
name=None,
deferrable=None,
initially=None,
_create_rule=None,
info=None,
_type_bound=False,
**dialect_kw
):
r"""Create a SQL constraint.
:param name:
Optional, the in-database name of this ``Constraint``.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param _create_rule:
a callable which is passed the DDLCompiler object during
compilation. Returns True or False to signal inline generation of
this Constraint.
The AddConstraint and DropConstraint DDL constructs provide
DDLElement's more comprehensive "conditional DDL" approach that is
passed a database connection when DDL is being issued. _create_rule
is instead called during any CREATE TABLE compilation, where there
may not be any transaction/connection in progress. However, it
allows conditional compilation of the constraint even for backends
which do not support addition of constraints through ALTER TABLE,
which currently includes SQLite.
_create_rule is used by some types to create constraints.
Currently, its call signature is subject to change at any time.
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. See
the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
"""
self.name = name
self.deferrable = deferrable
self.initially = initially
if info:
self.info = info
self._create_rule = _create_rule
self._type_bound = _type_bound
util.set_creation_order(self)
self._validate_dialect_kwargs(dialect_kw)
@property
def table(self):
try:
if isinstance(self.parent, Table):
return self.parent
except AttributeError:
pass
raise exc.InvalidRequestError(
"This constraint is not bound to a table. Did you "
"mean to call table.append_constraint(constraint) ?"
)
def _set_parent(self, parent):
self.parent = parent
parent.constraints.add(self)
def copy(self, **kw):
raise NotImplementedError()
def _to_schema_column(element):
if hasattr(element, "__clause_element__"):
element = element.__clause_element__()
if not isinstance(element, Column):
raise exc.ArgumentError("schema.Column object expected")
return element
def _to_schema_column_or_string(element):
if element is None:
return element
elif hasattr(element, "__clause_element__"):
element = element.__clause_element__()
if not isinstance(element, util.string_types + (ColumnElement,)):
msg = "Element %r is not a string name or column element"
raise exc.ArgumentError(msg % element)
return element
class ColumnCollectionMixin(object):
columns = None
"""A :class:`_expression.ColumnCollection` of :class:`_schema.Column`
objects.
This collection represents the columns which are referred to by
this object.
"""
_allow_multiple_tables = False
def __init__(self, *columns, **kw):
_autoattach = kw.pop("_autoattach", True)
self._column_flag = kw.pop("_column_flag", False)
self.columns = ColumnCollection()
self._pending_colargs = [
_to_schema_column_or_string(c) for c in columns
]
if _autoattach and self._pending_colargs:
self._check_attach()
@classmethod
def _extract_col_expression_collection(cls, expressions):
for expr in expressions:
strname = None
column = None
if hasattr(expr, "__clause_element__"):
expr = expr.__clause_element__()
if not isinstance(expr, (ColumnElement, TextClause)):
# this assumes a string
strname = expr
else:
cols = []
visitors.traverse(expr, {}, {"column": cols.append})
if cols:
column = cols[0]
add_element = column if column is not None else strname
yield expr, column, strname, add_element
def _check_attach(self, evt=False):
col_objs = [c for c in self._pending_colargs if isinstance(c, Column)]
cols_w_table = [c for c in col_objs if isinstance(c.table, Table)]
cols_wo_table = set(col_objs).difference(cols_w_table)
if cols_wo_table:
# feature #3341 - place event listeners for Column objects
# such that when all those cols are attached, we autoattach.
assert not evt, "Should not reach here on event call"
# issue #3411 - don't do the per-column auto-attach if some of the
# columns are specified as strings.
has_string_cols = set(
c for c in self._pending_colargs if c is not None
).difference(col_objs)
if not has_string_cols:
def _col_attached(column, table):
# this isinstance() corresponds with the
# isinstance() above; only want to count Table-bound
# columns
if isinstance(table, Table):
cols_wo_table.discard(column)
if not cols_wo_table:
self._check_attach(evt=True)
self._cols_wo_table = cols_wo_table
for col in cols_wo_table:
col._on_table_attach(_col_attached)
return
columns = cols_w_table
tables = {c.table for c in columns}
if len(tables) == 1:
self._set_parent_with_dispatch(tables.pop())
elif len(tables) > 1 and not self._allow_multiple_tables:
table = columns[0].table
others = [c for c in columns[1:] if c.table is not table]
if others:
raise exc.ArgumentError(
"Column(s) %s are not part of table '%s'."
% (
", ".join("'%s'" % c for c in others),
table.description,
)
)
def _col_expressions(self, table):
return [
table.c[col] if isinstance(col, util.string_types) else col
for col in self._pending_colargs
]
def _set_parent(self, table):
for col in self._col_expressions(table):
if col is not None:
self.columns.add(col)
class ColumnCollectionConstraint(ColumnCollectionMixin, Constraint):
"""A constraint that proxies a ColumnCollection."""
def __init__(self, *columns, **kw):
r"""
:param \*columns:
A sequence of column names or Column objects.
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param \**kw: other keyword arguments including dialect-specific
arguments are propagated to the :class:`.Constraint` superclass.
"""
_autoattach = kw.pop("_autoattach", True)
_column_flag = kw.pop("_column_flag", False)
Constraint.__init__(self, **kw)
ColumnCollectionMixin.__init__(
self, *columns, _autoattach=_autoattach, _column_flag=_column_flag
)
columns = None
"""A :class:`_expression.ColumnCollection` representing the set of columns
for this constraint.
"""
def _set_parent(self, table):
Constraint._set_parent(self, table)
ColumnCollectionMixin._set_parent(self, table)
def __contains__(self, x):
return x in self.columns
def copy(self, **kw):
# ticket #5276
constraint_kwargs = {}
for dialect_name in self.dialect_options:
dialect_options = self.dialect_options[dialect_name]._non_defaults
for (
dialect_option_key,
dialect_option_value,
) in dialect_options.items():
constraint_kwargs[
dialect_name + "_" + dialect_option_key
] = dialect_option_value
c = self.__class__(
name=self.name,
deferrable=self.deferrable,
initially=self.initially,
*self.columns.keys(),
**constraint_kwargs
)
return self._schema_item_copy(c)
def contains_column(self, col):
"""Return True if this constraint contains the given column.
Note that this object also contains an attribute ``.columns``
which is a :class:`_expression.ColumnCollection` of
:class:`_schema.Column` objects.
"""
return self.columns.contains_column(col)
def __iter__(self):
# inlining of
# return iter(self.columns)
# ColumnCollection->OrderedProperties->OrderedDict
ordered_dict = self.columns._data
return (ordered_dict[key] for key in ordered_dict._list)
def __len__(self):
return len(self.columns._data)
class CheckConstraint(ColumnCollectionConstraint):
"""A table- or column-level CHECK constraint.
Can be included in the definition of a Table or Column.
"""
_allow_multiple_tables = True
@_document_text_coercion(
"sqltext",
":class:`.CheckConstraint`",
":paramref:`.CheckConstraint.sqltext`",
)
def __init__(
self,
sqltext,
name=None,
deferrable=None,
initially=None,
table=None,
info=None,
_create_rule=None,
_autoattach=True,
_type_bound=False,
**kw
):
r"""Construct a CHECK constraint.
:param sqltext:
A string containing the constraint definition, which will be used
verbatim, or a SQL expression construct. If given as a string,
the object is converted to a :func:`_expression.text` object.
If the textual
string includes a colon character, escape this using a backslash::
CheckConstraint(r"foo ~ E'a(?\:b|c)d")
:param name:
Optional, the in-database name of the constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
"""
self.sqltext = _literal_as_text(sqltext, allow_coercion_to_text=True)
columns = []
visitors.traverse(self.sqltext, {}, {"column": columns.append})
super(CheckConstraint, self).__init__(
name=name,
deferrable=deferrable,
initially=initially,
_create_rule=_create_rule,
info=info,
_type_bound=_type_bound,
_autoattach=_autoattach,
*columns,
**kw
)
if table is not None:
self._set_parent_with_dispatch(table)
def __visit_name__(self):
if isinstance(self.parent, Table):
return "check_constraint"
else:
return "column_check_constraint"
__visit_name__ = property(__visit_name__)
def copy(self, target_table=None, **kw):
if target_table is not None:
sqltext = _copy_expression(self.sqltext, self.table, target_table)
else:
sqltext = self.sqltext
c = CheckConstraint(
sqltext,
name=self.name,
initially=self.initially,
deferrable=self.deferrable,
_create_rule=self._create_rule,
table=target_table,
_autoattach=False,
_type_bound=self._type_bound,
)
return self._schema_item_copy(c)
class ForeignKeyConstraint(ColumnCollectionConstraint):
"""A table-level FOREIGN KEY constraint.
Defines a single column or composite FOREIGN KEY ... REFERENCES
constraint. For a no-frills, single column foreign key, adding a
:class:`_schema.ForeignKey` to the definition of a :class:`_schema.Column`
is a
shorthand equivalent for an unnamed, single column
:class:`_schema.ForeignKeyConstraint`.
Examples of foreign key configuration are in :ref:`metadata_foreignkeys`.
"""
__visit_name__ = "foreign_key_constraint"
def __init__(
self,
columns,
refcolumns,
name=None,
onupdate=None,
ondelete=None,
deferrable=None,
initially=None,
use_alter=False,
link_to_name=False,
match=None,
table=None,
info=None,
**dialect_kw
):
r"""Construct a composite-capable FOREIGN KEY.
:param columns: A sequence of local column names. The named columns
must be defined and present in the parent Table. The names should
match the ``key`` given to each column (defaults to the name) unless
``link_to_name`` is True.
:param refcolumns: A sequence of foreign column names or Column
objects. The columns must all be located within the same Table.
:param name: Optional, the in-database name of the key.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally assigned
``key``.
:param use_alter: If True, do not emit the DDL for this constraint as
part of the CREATE TABLE definition. Instead, generate it via an
ALTER TABLE statement issued after the full collection of tables
have been created, and drop it via an ALTER TABLE statement before
the full collection of tables are dropped.
The use of :paramref:`_schema.ForeignKeyConstraint.use_alter` is
particularly geared towards the case where two or more tables
are established within a mutually-dependent foreign key constraint
relationship; however, the :meth:`_schema.MetaData.create_all` and
:meth:`_schema.MetaData.drop_all`
methods will perform this resolution
automatically, so the flag is normally not needed.
.. versionchanged:: 1.0.0 Automatic resolution of foreign key
cycles has been added, removing the need to use the
:paramref:`_schema.ForeignKeyConstraint.use_alter` in typical use
cases.
.. seealso::
:ref:`use_alter`
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. See
the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
.. versionadded:: 0.9.2
"""
Constraint.__init__(
self,
name=name,
deferrable=deferrable,
initially=initially,
info=info,
**dialect_kw
)
self.onupdate = onupdate
self.ondelete = ondelete
self.link_to_name = link_to_name
self.use_alter = use_alter
self.match = match
if len(set(columns)) != len(refcolumns):
if len(set(columns)) != len(columns):
# e.g. FOREIGN KEY (a, a) REFERENCES r (b, c)
raise exc.ArgumentError(
"ForeignKeyConstraint with duplicate source column "
"references are not supported."
)
else:
# e.g. FOREIGN KEY (a) REFERENCES r (b, c)
# paraphrasing https://www.postgresql.org/docs/9.2/static/\
# ddl-constraints.html
raise exc.ArgumentError(
"ForeignKeyConstraint number "
"of constrained columns must match the number of "
"referenced columns."
)
# standalone ForeignKeyConstraint - create
# associated ForeignKey objects which will be applied to hosted
# Column objects (in col.foreign_keys), either now or when attached
# to the Table for string-specified names
self.elements = [
ForeignKey(
refcol,
_constraint=self,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
link_to_name=self.link_to_name,
match=self.match,
deferrable=self.deferrable,
initially=self.initially,
**self.dialect_kwargs
)
for refcol in refcolumns
]
ColumnCollectionMixin.__init__(self, *columns)
if table is not None:
if hasattr(self, "parent"):
assert table is self.parent
self._set_parent_with_dispatch(table)
def _append_element(self, column, fk):
self.columns.add(column)
self.elements.append(fk)
columns = None
"""A :class:`_expression.ColumnCollection` representing the set of columns
for this constraint.
"""
elements = None
"""A sequence of :class:`_schema.ForeignKey` objects.
Each :class:`_schema.ForeignKey`
represents a single referring column/referred
column pair.
This collection is intended to be read-only.
"""
@property
def _elements(self):
# legacy - provide a dictionary view of (column_key, fk)
return util.OrderedDict(zip(self.column_keys, self.elements))
@property
def _referred_schema(self):
for elem in self.elements:
return elem._referred_schema
else:
return None
@property
def referred_table(self):
"""The :class:`_schema.Table` object to which this
:class:`_schema.ForeignKeyConstraint` references.
This is a dynamically calculated attribute which may not be available
if the constraint and/or parent table is not yet associated with
a metadata collection that contains the referred table.
.. versionadded:: 1.0.0
"""
return self.elements[0].column.table
def _validate_dest_table(self, table):
table_keys = set([elem._table_key() for elem in self.elements])
if None not in table_keys and len(table_keys) > 1:
elem0, elem1 = sorted(table_keys)[0:2]
raise exc.ArgumentError(
"ForeignKeyConstraint on %s(%s) refers to "
"multiple remote tables: %s and %s"
% (table.fullname, self._col_description, elem0, elem1)
)
@property
def column_keys(self):
"""Return a list of string keys representing the local
columns in this :class:`_schema.ForeignKeyConstraint`.
This list is either the original string arguments sent
to the constructor of the :class:`_schema.ForeignKeyConstraint`,
or if the constraint has been initialized with :class:`_schema.Column`
objects, is the string .key of each element.
.. versionadded:: 1.0.0
"""
if hasattr(self, "parent"):
return self.columns.keys()
else:
return [
col.key if isinstance(col, ColumnElement) else str(col)
for col in self._pending_colargs
]
@property
def _col_description(self):
return ", ".join(self.column_keys)
def _set_parent(self, table):
Constraint._set_parent(self, table)
try:
ColumnCollectionConstraint._set_parent(self, table)
except KeyError as ke:
util.raise_(
exc.ArgumentError(
"Can't create ForeignKeyConstraint "
"on table '%s': no column "
"named '%s' is present." % (table.description, ke.args[0])
),
from_=ke,
)
for col, fk in zip(self.columns, self.elements):
if not hasattr(fk, "parent") or fk.parent is not col:
fk._set_parent_with_dispatch(col)
self._validate_dest_table(table)
def copy(self, schema=None, target_table=None, **kw):
fkc = ForeignKeyConstraint(
[x.parent.key for x in self.elements],
[
x._get_colspec(
schema=schema,
table_name=target_table.name
if target_table is not None
and x._table_key() == x.parent.table.key
else None,
)
for x in self.elements
],
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match,
)
for self_fk, other_fk in zip(self.elements, fkc.elements):
self_fk._schema_item_copy(other_fk)
return self._schema_item_copy(fkc)
class PrimaryKeyConstraint(ColumnCollectionConstraint):
"""A table-level PRIMARY KEY constraint.
The :class:`.PrimaryKeyConstraint` object is present automatically
on any :class:`_schema.Table` object; it is assigned a set of
:class:`_schema.Column` objects corresponding to those marked with
the :paramref:`_schema.Column.primary_key` flag::
>>> my_table = Table('mytable', metadata,
... Column('id', Integer, primary_key=True),
... Column('version_id', Integer, primary_key=True),
... Column('data', String(50))
... )
>>> my_table.primary_key
PrimaryKeyConstraint(
Column('id', Integer(), table=<mytable>,
primary_key=True, nullable=False),
Column('version_id', Integer(), table=<mytable>,
primary_key=True, nullable=False)
)
The primary key of a :class:`_schema.Table` can also be specified by using
a :class:`.PrimaryKeyConstraint` object explicitly; in this mode of usage,
the "name" of the constraint can also be specified, as well as other
options which may be recognized by dialects::
my_table = Table('mytable', metadata,
Column('id', Integer),
Column('version_id', Integer),
Column('data', String(50)),
PrimaryKeyConstraint('id', 'version_id',
name='mytable_pk')
)
The two styles of column-specification should generally not be mixed.
An warning is emitted if the columns present in the
:class:`.PrimaryKeyConstraint`
don't match the columns that were marked as ``primary_key=True``, if both
are present; in this case, the columns are taken strictly from the
:class:`.PrimaryKeyConstraint` declaration, and those columns otherwise
marked as ``primary_key=True`` are ignored. This behavior is intended to
be backwards compatible with previous behavior.
.. versionchanged:: 0.9.2 Using a mixture of columns within a
:class:`.PrimaryKeyConstraint` in addition to columns marked as
``primary_key=True`` now emits a warning if the lists don't match.
The ultimate behavior of ignoring those columns marked with the flag
only is currently maintained for backwards compatibility; this warning
may raise an exception in a future release.
For the use case where specific options are to be specified on the
:class:`.PrimaryKeyConstraint`, but the usual style of using
``primary_key=True`` flags is still desirable, an empty
:class:`.PrimaryKeyConstraint` may be specified, which will take on the
primary key column collection from the :class:`_schema.Table` based on the
flags::
my_table = Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('version_id', Integer, primary_key=True),
Column('data', String(50)),
PrimaryKeyConstraint(name='mytable_pk',
mssql_clustered=True)
)
.. versionadded:: 0.9.2 an empty :class:`.PrimaryKeyConstraint` may now
be specified for the purposes of establishing keyword arguments with
the constraint, independently of the specification of "primary key"
columns within the :class:`_schema.Table` itself; columns marked as
``primary_key=True`` will be gathered into the empty constraint's
column collection.
"""
__visit_name__ = "primary_key_constraint"
def __init__(self, *columns, **kw):
self._implicit_generated = kw.pop("_implicit_generated", False)
super(PrimaryKeyConstraint, self).__init__(*columns, **kw)
def _set_parent(self, table):
super(PrimaryKeyConstraint, self)._set_parent(table)
if table.primary_key is not self:
table.constraints.discard(table.primary_key)
table.primary_key = self
table.constraints.add(self)
table_pks = [c for c in table.c if c.primary_key]
if (
self.columns
and table_pks
and set(table_pks) != set(self.columns.values())
):
util.warn(
"Table '%s' specifies columns %s as primary_key=True, "
"not matching locally specified columns %s; setting the "
"current primary key columns to %s. This warning "
"may become an exception in a future release"
% (
table.name,
", ".join("'%s'" % c.name for c in table_pks),
", ".join("'%s'" % c.name for c in self.columns),
", ".join("'%s'" % c.name for c in self.columns),
)
)
table_pks[:] = []
for c in self.columns:
c.primary_key = True
c.nullable = False
self.columns.extend(table_pks)
def _reload(self, columns):
"""repopulate this :class:`.PrimaryKeyConstraint` given
a set of columns.
Existing columns in the table that are marked as primary_key=True
are maintained.
Also fires a new event.
This is basically like putting a whole new
:class:`.PrimaryKeyConstraint` object on the parent
:class:`_schema.Table` object without actually replacing the object.
The ordering of the given list of columns is also maintained; these
columns will be appended to the list of columns after any which
are already present.
"""
# set the primary key flag on new columns.
# note any existing PK cols on the table also have their
# flag still set.
for col in columns:
col.primary_key = True
self.columns.extend(columns)
PrimaryKeyConstraint._autoincrement_column._reset(self)
self._set_parent_with_dispatch(self.table)
def _replace(self, col):
PrimaryKeyConstraint._autoincrement_column._reset(self)
self.columns.replace(col)
@property
def columns_autoinc_first(self):
autoinc = self._autoincrement_column
if autoinc is not None:
return [autoinc] + [c for c in self.columns if c is not autoinc]
else:
return list(self.columns)
@util.memoized_property
def _autoincrement_column(self):
def _validate_autoinc(col, autoinc_true):
if col.type._type_affinity is None or not issubclass(
col.type._type_affinity, type_api.INTEGERTYPE._type_affinity
):
if autoinc_true:
raise exc.ArgumentError(
"Column type %s on column '%s' is not "
"compatible with autoincrement=True" % (col.type, col)
)
else:
return False
elif (
not isinstance(col.default, (type(None), Sequence))
and not autoinc_true
):
return False
elif col.server_default is not None and not autoinc_true:
return False
elif col.foreign_keys and col.autoincrement not in (
True,
"ignore_fk",
):
return False
return True
if len(self.columns) == 1:
col = list(self.columns)[0]
if col.autoincrement is True:
_validate_autoinc(col, True)
return col
elif col.autoincrement in (
"auto",
"ignore_fk",
) and _validate_autoinc(col, False):
return col
else:
autoinc = None
for col in self.columns:
if col.autoincrement is True:
_validate_autoinc(col, True)
if autoinc is not None:
raise exc.ArgumentError(
"Only one Column may be marked "
"autoincrement=True, found both %s and %s."
% (col.name, autoinc.name)
)
else:
autoinc = col
return autoinc
class UniqueConstraint(ColumnCollectionConstraint):
"""A table-level UNIQUE constraint.
Defines a single column or composite UNIQUE constraint. For a no-frills,
single column constraint, adding ``unique=True`` to the ``Column``
definition is a shorthand equivalent for an unnamed, single column
UniqueConstraint.
"""
__visit_name__ = "unique_constraint"
class Index(DialectKWArgs, ColumnCollectionMixin, SchemaItem):
"""A table-level INDEX.
Defines a composite (one or more column) INDEX.
E.g.::
sometable = Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100))
)
Index("some_index", sometable.c.name)
For a no-frills, single column index, adding
:class:`_schema.Column` also supports ``index=True``::
sometable = Table("sometable", metadata,
Column("name", String(50), index=True)
)
For a composite index, multiple columns can be specified::
Index("some_index", sometable.c.name, sometable.c.address)
Functional indexes are supported as well, typically by using the
:data:`.func` construct in conjunction with table-bound
:class:`_schema.Column` objects::
Index("some_index", func.lower(sometable.c.name))
An :class:`.Index` can also be manually associated with a
:class:`_schema.Table`,
either through inline declaration or using
:meth:`_schema.Table.append_constraint`. When this approach is used,
the names
of the indexed columns can be specified as strings::
Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", "name", "address")
)
To support functional or expression-based indexes in this form, the
:func:`_expression.text` construct may be used::
from sqlalchemy import text
Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", text("lower(name)"))
)
.. versionadded:: 0.9.5 the :func:`_expression.text`
construct may be used to
specify :class:`.Index` expressions, provided the :class:`.Index`
is explicitly associated with the :class:`_schema.Table`.
.. seealso::
:ref:`schema_indexes` - General information on :class:`.Index`.
:ref:`postgresql_indexes` - PostgreSQL-specific options available for
the :class:`.Index` construct.
:ref:`mysql_indexes` - MySQL-specific options available for the
:class:`.Index` construct.
:ref:`mssql_indexes` - MSSQL-specific options available for the
:class:`.Index` construct.
"""
__visit_name__ = "index"
def __init__(self, name, *expressions, **kw):
r"""Construct an index object.
:param name:
The name of the index
:param \*expressions:
Column expressions to include in the index. The expressions
are normally instances of :class:`_schema.Column`, but may also
be arbitrary SQL expressions which ultimately refer to a
:class:`_schema.Column`.
:param unique=False:
Keyword only argument; if True, create a unique index.
:param quote=None:
Keyword only argument; whether to apply quoting to the name of
the index. Works in the same manner as that of
:paramref:`_schema.Column.quote`.
:param info=None: Optional data dictionary which will be populated
into the :attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
"""
self.table = table = None
columns = []
processed_expressions = []
for (
expr,
column,
strname,
add_element,
) in self._extract_col_expression_collection(expressions):
columns.append(add_element)
processed_expressions.append(expr)
self.expressions = processed_expressions
self.name = quoted_name(name, kw.pop("quote", None))
self.unique = kw.pop("unique", False)
_column_flag = kw.pop("_column_flag", False)
if "info" in kw:
self.info = kw.pop("info")
# TODO: consider "table" argument being public, but for
# the purpose of the fix here, it starts as private.
if "_table" in kw:
table = kw.pop("_table")
self._validate_dialect_kwargs(kw)
# will call _set_parent() if table-bound column
# objects are present
ColumnCollectionMixin.__init__(
self, *columns, _column_flag=_column_flag
)
if table is not None:
self._set_parent(table)
def _set_parent(self, table):
ColumnCollectionMixin._set_parent(self, table)
if self.table is not None and table is not self.table:
raise exc.ArgumentError(
"Index '%s' is against table '%s', and "
"cannot be associated with table '%s'."
% (self.name, self.table.description, table.description)
)
self.table = table
table.indexes.add(self)
expressions = self.expressions
col_expressions = self._col_expressions(table)
assert len(expressions) == len(col_expressions)
self.expressions = [
expr if isinstance(expr, ClauseElement) else colexpr
for expr, colexpr in zip(expressions, col_expressions)
]
@property
def bind(self):
"""Return the connectable associated with this Index."""
return self.table.bind
def create(self, bind=None):
"""Issue a ``CREATE`` statement for this
:class:`.Index`, using the given :class:`.Connectable`
for connectivity.
.. seealso::
:meth:`_schema.MetaData.create_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator, self)
return self
def drop(self, bind=None):
"""Issue a ``DROP`` statement for this
:class:`.Index`, using the given :class:`.Connectable`
for connectivity.
.. seealso::
:meth:`_schema.MetaData.drop_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper, self)
def __repr__(self):
return "Index(%s)" % (
", ".join(
[repr(self.name)]
+ [repr(e) for e in self.expressions]
+ (self.unique and ["unique=True"] or [])
)
)
DEFAULT_NAMING_CONVENTION = util.immutabledict({"ix": "ix_%(column_0_label)s"})
class MetaData(SchemaItem):
"""A collection of :class:`_schema.Table`
objects and their associated schema
constructs.
Holds a collection of :class:`_schema.Table` objects as well as
an optional binding to an :class:`_engine.Engine` or
:class:`_engine.Connection`. If bound, the :class:`_schema.Table` objects
in the collection and their columns may participate in implicit SQL
execution.
The :class:`_schema.Table` objects themselves are stored in the
:attr:`_schema.MetaData.tables` dictionary.
:class:`_schema.MetaData` is a thread-safe object for read operations.
Construction of new tables within a single :class:`_schema.MetaData`
object,
either explicitly or via reflection, may not be completely thread-safe.
.. seealso::
:ref:`metadata_describing` - Introduction to database metadata
"""
__visit_name__ = "metadata"
@util.deprecated_params(
reflect=(
"0.8",
"The :paramref:`_schema.MetaData.reflect` "
"flag is deprecated and will "
"be removed in a future release. Please use the "
":meth:`_schema.MetaData.reflect` method.",
)
)
def __init__(
self,
bind=None,
reflect=False,
schema=None,
quote_schema=None,
naming_convention=None,
info=None,
):
"""Create a new MetaData object.
:param bind:
An Engine or Connection to bind to. May also be a string or URL
instance, these are passed to create_engine() and this MetaData will
be bound to the resulting engine.
:param reflect:
Optional, automatically load all tables from the bound database.
Defaults to False. ``bind`` is required when this option is set.
:param schema:
The default schema to use for the :class:`_schema.Table`,
:class:`.Sequence`, and potentially other objects associated with
this :class:`_schema.MetaData`. Defaults to ``None``.
When this value is set, any :class:`_schema.Table` or
:class:`.Sequence`
which specifies ``None`` for the schema parameter will instead
have this schema name defined. To build a :class:`_schema.Table`
or :class:`.Sequence` that still has ``None`` for the schema
even when this parameter is present, use the :attr:`.BLANK_SCHEMA`
symbol.
.. note::
As referred above, the :paramref:`_schema.MetaData.schema`
parameter
only refers to the **default value** that will be applied to
the :paramref:`_schema.Table.schema` parameter of an incoming
:class:`_schema.Table` object. It does not refer to how the
:class:`_schema.Table` is catalogued within the
:class:`_schema.MetaData`,
which remains consistent vs. a :class:`_schema.MetaData`
collection
that does not define this parameter. The
:class:`_schema.Table`
within the :class:`_schema.MetaData`
will still be keyed based on its
schema-qualified name, e.g.
``my_metadata.tables["some_schema.my_table"]``.
The current behavior of the :class:`_schema.ForeignKey`
object is to
circumvent this restriction, where it can locate a table given
the table name alone, where the schema will be assumed to be
present from this value as specified on the owning
:class:`_schema.MetaData` collection. However,
this implies that a
table qualified with BLANK_SCHEMA cannot currently be referred
to by string name from :class:`_schema.ForeignKey`.
Other parts of
SQLAlchemy such as Declarative may not have similar behaviors
built in, however may do so in a future release, along with a
consistent method of referring to a table in BLANK_SCHEMA.
.. seealso::
:paramref:`_schema.Table.schema`
:paramref:`.Sequence.schema`
:param quote_schema:
Sets the ``quote_schema`` flag for those :class:`_schema.Table`,
:class:`.Sequence`, and other objects which make usage of the
local ``schema`` name.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param naming_convention: a dictionary referring to values which
will establish default naming conventions for :class:`.Constraint`
and :class:`.Index` objects, for those objects which are not given
a name explicitly.
The keys of this dictionary may be:
* a constraint or Index class, e.g. the :class:`.UniqueConstraint`,
:class:`_schema.ForeignKeyConstraint` class, the :class:`.Index`
class
* a string mnemonic for one of the known constraint classes;
``"fk"``, ``"pk"``, ``"ix"``, ``"ck"``, ``"uq"`` for foreign key,
primary key, index, check, and unique constraint, respectively.
* the string name of a user-defined "token" that can be used
to define new naming tokens.
The values associated with each "constraint class" or "constraint
mnemonic" key are string naming templates, such as
``"uq_%(table_name)s_%(column_0_name)s"``,
which describe how the name should be composed. The values
associated with user-defined "token" keys should be callables of the
form ``fn(constraint, table)``, which accepts the constraint/index
object and :class:`_schema.Table` as arguments, returning a string
result.
The built-in names are as follows, some of which may only be
available for certain types of constraint:
* ``%(table_name)s`` - the name of the :class:`_schema.Table`
object
associated with the constraint.
* ``%(referred_table_name)s`` - the name of the
:class:`_schema.Table`
object associated with the referencing target of a
:class:`_schema.ForeignKeyConstraint`.
* ``%(column_0_name)s`` - the name of the :class:`_schema.Column`
at
index position "0" within the constraint.
* ``%(column_0N_name)s`` - the name of all :class:`_schema.Column`
objects in order within the constraint, joined without a
separator.
* ``%(column_0_N_name)s`` - the name of all
:class:`_schema.Column`
objects in order within the constraint, joined with an
underscore as a separator.
* ``%(column_0_label)s``, ``%(column_0N_label)s``,
``%(column_0_N_label)s`` - the label of either the zeroth
:class:`_schema.Column` or all :class:`.Columns`, separated with
or without an underscore
* ``%(column_0_key)s``, ``%(column_0N_key)s``,
``%(column_0_N_key)s`` - the key of either the zeroth
:class:`_schema.Column` or all :class:`.Columns`, separated with
or without an underscore
* ``%(referred_column_0_name)s``, ``%(referred_column_0N_name)s``
``%(referred_column_0_N_name)s``, ``%(referred_column_0_key)s``,
``%(referred_column_0N_key)s``, ... column tokens which
render the names/keys/labels of columns that are referenced
by a :class:`_schema.ForeignKeyConstraint`.
* ``%(constraint_name)s`` - a special key that refers to the
existing name given to the constraint. When this key is
present, the :class:`.Constraint` object's existing name will be
replaced with one that is composed from template string that
uses this token. When this token is present, it is required that
the :class:`.Constraint` is given an explicit name ahead of time.
* user-defined: any additional token may be implemented by passing
it along with a ``fn(constraint, table)`` callable to the
naming_convention dictionary.
.. versionadded:: 1.3.0 - added new ``%(column_0N_name)s``,
``%(column_0_N_name)s``, and related tokens that produce
concatenations of names, keys, or labels for all columns referred
to by a given constraint.
.. seealso::
:ref:`constraint_naming_conventions` - for detailed usage
examples.
"""
self.tables = util.immutabledict()
self.schema = quoted_name(schema, quote_schema)
self.naming_convention = (
naming_convention
if naming_convention
else DEFAULT_NAMING_CONVENTION
)
if info:
self.info = info
self._schemas = set()
self._sequences = {}
self._fk_memos = collections.defaultdict(list)
self.bind = bind
if reflect:
if not bind:
raise exc.ArgumentError(
"A bind must be supplied in conjunction "
"with reflect=True"
)
self.reflect()
tables = None
"""A dictionary of :class:`_schema.Table`
objects keyed to their name or "table key".
The exact key is that determined by the :attr:`_schema.Table.key`
attribute;
for a table with no :attr:`_schema.Table.schema` attribute,
this is the same
as :attr:`_schema.Table.name`. For a table with a schema,
it is typically of the
form ``schemaname.tablename``.
.. seealso::
:attr:`_schema.MetaData.sorted_tables`
"""
def __repr__(self):
return "MetaData(bind=%r)" % self.bind
def __contains__(self, table_or_key):
if not isinstance(table_or_key, util.string_types):
table_or_key = table_or_key.key
return table_or_key in self.tables
def _add_table(self, name, schema, table):
key = _get_table_key(name, schema)
dict.__setitem__(self.tables, key, table)
if schema:
self._schemas.add(schema)
def _remove_table(self, name, schema):
key = _get_table_key(name, schema)
removed = dict.pop(self.tables, key, None)
if removed is not None:
for fk in removed.foreign_keys:
fk._remove_from_metadata(self)
if self._schemas:
self._schemas = set(
[
t.schema
for t in self.tables.values()
if t.schema is not None
]
)
def __getstate__(self):
return {
"tables": self.tables,
"schema": self.schema,
"schemas": self._schemas,
"sequences": self._sequences,
"fk_memos": self._fk_memos,
"naming_convention": self.naming_convention,
}
def __setstate__(self, state):
self.tables = state["tables"]
self.schema = state["schema"]
self.naming_convention = state["naming_convention"]
self._bind = None
self._sequences = state["sequences"]
self._schemas = state["schemas"]
self._fk_memos = state["fk_memos"]
def is_bound(self):
"""True if this MetaData is bound to an Engine or Connection."""
return self._bind is not None
def bind(self):
"""An :class:`_engine.Engine` or :class:`_engine.Connection`
to which this
:class:`_schema.MetaData` is bound.
Typically, a :class:`_engine.Engine` is assigned to this attribute
so that "implicit execution" may be used, or alternatively
as a means of providing engine binding information to an
ORM :class:`.Session` object::
engine = create_engine("someurl://")
metadata.bind = engine
.. seealso::
:ref:`dbengine_implicit` - background on "bound metadata"
"""
return self._bind
@util.dependencies("sqlalchemy.engine.url")
def _bind_to(self, url, bind):
"""Bind this MetaData to an Engine, Connection, string or URL."""
if isinstance(bind, util.string_types + (url.URL,)):
self._bind = sqlalchemy.create_engine(bind)
else:
self._bind = bind
bind = property(bind, _bind_to)
def clear(self):
"""Clear all Table objects from this MetaData."""
dict.clear(self.tables)
self._schemas.clear()
self._fk_memos.clear()
def remove(self, table):
"""Remove the given Table object from this MetaData."""
self._remove_table(table.name, table.schema)
@property
def sorted_tables(self):
"""Returns a list of :class:`_schema.Table` objects sorted in order of
foreign key dependency.
The sorting will place :class:`_schema.Table`
objects that have dependencies
first, before the dependencies themselves, representing the
order in which they can be created. To get the order in which
the tables would be dropped, use the ``reversed()`` Python built-in.
.. warning::
The :attr:`.MetaData.sorted_tables` attribute cannot by itself
accommodate automatic resolution of dependency cycles between
tables, which are usually caused by mutually dependent foreign key
constraints. When these cycles are detected, the foreign keys
of these tables are omitted from consideration in the sort.
A warning is emitted when this condition occurs, which will be an
exception raise in a future release. Tables which are not part
of the cycle will still be returned in dependency order.
To resolve these cycles, the
:paramref:`_schema.ForeignKeyConstraint.use_alter` parameter may be
applied to those constraints which create a cycle. Alternatively,
the :func:`_schema.sort_tables_and_constraints` function will
automatically return foreign key constraints in a separate
collection when cycles are detected so that they may be applied
to a schema separately.
.. versionchanged:: 1.3.17 - a warning is emitted when
:attr:`.MetaData.sorted_tables` cannot perform a proper sort
due to cyclical dependencies. This will be an exception in a
future release. Additionally, the sort will continue to return
other tables not involved in the cycle in dependency order which
was not the case previously.
.. seealso::
:func:`_schema.sort_tables`
:func:`_schema.sort_tables_and_constraints`
:attr:`_schema.MetaData.tables`
:meth:`_reflection.Inspector.get_table_names`
:meth:`_reflection.Inspector.get_sorted_table_and_fkc_names`
"""
return ddl.sort_tables(
sorted(self.tables.values(), key=lambda t: t.key)
)
def reflect(
self,
bind=None,
schema=None,
views=False,
only=None,
extend_existing=False,
autoload_replace=True,
resolve_fks=True,
**dialect_kwargs
):
r"""Load all available table definitions from the database.
Automatically creates ``Table`` entries in this ``MetaData`` for any
table available in the database but not yet present in the
``MetaData``. May be called multiple times to pick up tables recently
added to the database, however no special action is taken if a table
in this ``MetaData`` no longer exists in the database.
:param bind:
A :class:`.Connectable` used to access the database; if None, uses
the existing bind on this ``MetaData``, if any.
:param schema:
Optional, query and reflect tables from an alternate schema.
If None, the schema associated with this :class:`_schema.MetaData`
is used, if any.
:param views:
If True, also reflect views.
:param only:
Optional. Load only a sub-set of available named tables. May be
specified as a sequence of names or a callable.
If a sequence of names is provided, only those tables will be
reflected. An error is raised if a table is requested but not
available. Named tables already present in this ``MetaData`` are
ignored.
If a callable is provided, it will be used as a boolean predicate to
filter the list of potential table names. The callable is called
with a table name and this ``MetaData`` instance as positional
arguments and should return a true value for any table to reflect.
:param extend_existing: Passed along to each :class:`_schema.Table` as
:paramref:`_schema.Table.extend_existing`.
.. versionadded:: 0.9.1
:param autoload_replace: Passed along to each :class:`_schema.Table`
as
:paramref:`_schema.Table.autoload_replace`.
.. versionadded:: 0.9.1
:param resolve_fks: if True, reflect :class:`_schema.Table`
objects linked
to :class:`_schema.ForeignKey` objects located in each
:class:`_schema.Table`.
For :meth:`_schema.MetaData.reflect`,
this has the effect of reflecting
related tables that might otherwise not be in the list of tables
being reflected, for example if the referenced table is in a
different schema or is omitted via the
:paramref:`.MetaData.reflect.only` parameter. When False,
:class:`_schema.ForeignKey` objects are not followed to the
:class:`_schema.Table`
in which they link, however if the related table is also part of the
list of tables that would be reflected in any case, the
:class:`_schema.ForeignKey` object will still resolve to its related
:class:`_schema.Table` after the :meth:`_schema.MetaData.reflect`
operation is
complete. Defaults to True.
.. versionadded:: 1.3.0
.. seealso::
:paramref:`_schema.Table.resolve_fks`
:param \**dialect_kwargs: Additional keyword arguments not mentioned
above are dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
.. versionadded:: 0.9.2 - Added
:paramref:`.MetaData.reflect.**dialect_kwargs` to support
dialect-level reflection options for all :class:`_schema.Table`
objects reflected.
"""
if bind is None:
bind = _bind_or_error(self)
with bind.connect() as conn:
reflect_opts = {
"autoload": True,
"autoload_with": conn,
"extend_existing": extend_existing,
"autoload_replace": autoload_replace,
"resolve_fks": resolve_fks,
"_extend_on": set(),
}
reflect_opts.update(dialect_kwargs)
if schema is None:
schema = self.schema
if schema is not None:
reflect_opts["schema"] = schema
available = util.OrderedSet(
bind.engine.table_names(schema, connection=conn)
)
if views:
available.update(bind.dialect.get_view_names(conn, schema))
if schema is not None:
available_w_schema = util.OrderedSet(
["%s.%s" % (schema, name) for name in available]
)
else:
available_w_schema = available
current = set(self.tables)
if only is None:
load = [
name
for name, schname in zip(available, available_w_schema)
if extend_existing or schname not in current
]
elif util.callable(only):
load = [
name
for name, schname in zip(available, available_w_schema)
if (extend_existing or schname not in current)
and only(name, self)
]
else:
missing = [name for name in only if name not in available]
if missing:
s = schema and (" schema '%s'" % schema) or ""
raise exc.InvalidRequestError(
"Could not reflect: requested table(s) not available "
"in %r%s: (%s)" % (bind.engine, s, ", ".join(missing))
)
load = [
name
for name in only
if extend_existing or name not in current
]
for name in load:
try:
Table(name, self, **reflect_opts)
except exc.UnreflectableTableError as uerr:
util.warn("Skipping table %s: %s" % (name, uerr))
@util.deprecated(
"0.7",
"the :meth:`_schema.MetaData.append_ddl_listener` "
"method is deprecated and "
"will be removed in a future release. Please refer to "
":class:`.DDLEvents`.",
)
def append_ddl_listener(self, event_name, listener):
"""Append a DDL event listener to this ``MetaData``.
"""
def adapt_listener(target, connection, **kw):
tables = kw["tables"]
listener(event, target, connection, tables=tables)
event.listen(self, "" + event_name.replace("-", "_"), adapt_listener)
def create_all(self, bind=None, tables=None, checkfirst=True):
"""Create all tables stored in this metadata.
Conditional by default, will not attempt to recreate tables already
present in the target database.
:param bind:
A :class:`.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
:param tables:
Optional list of ``Table`` objects, which is a subset of the total
tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, don't issue CREATEs for tables already present
in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(
ddl.SchemaGenerator, self, checkfirst=checkfirst, tables=tables
)
def drop_all(self, bind=None, tables=None, checkfirst=True):
"""Drop all tables stored in this metadata.
Conditional by default, will not attempt to drop tables not present in
the target database.
:param bind:
A :class:`.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
:param tables:
Optional list of ``Table`` objects, which is a subset of the
total tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, only issue DROPs for tables confirmed to be
present in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(
ddl.SchemaDropper, self, checkfirst=checkfirst, tables=tables
)
class ThreadLocalMetaData(MetaData):
"""A MetaData variant that presents a different ``bind`` in every thread.
Makes the ``bind`` property of the MetaData a thread-local value, allowing
this collection of tables to be bound to different ``Engine``
implementations or connections in each thread.
The ThreadLocalMetaData starts off bound to None in each thread. Binds
must be made explicitly by assigning to the ``bind`` property or using
``connect()``. You can also re-bind dynamically multiple times per
thread, just like a regular ``MetaData``.
"""
__visit_name__ = "metadata"
def __init__(self):
"""Construct a ThreadLocalMetaData."""
self.context = util.threading.local()
self.__engines = {}
super(ThreadLocalMetaData, self).__init__()
def bind(self):
"""The bound Engine or Connection for this thread.
This property may be assigned an Engine or Connection, or assigned a
string or URL to automatically create a basic Engine for this bind
with ``create_engine()``."""
return getattr(self.context, "_engine", None)
@util.dependencies("sqlalchemy.engine.url")
def _bind_to(self, url, bind):
"""Bind to a Connectable in the caller's thread."""
if isinstance(bind, util.string_types + (url.URL,)):
try:
self.context._engine = self.__engines[bind]
except KeyError:
e = sqlalchemy.create_engine(bind)
self.__engines[bind] = e
self.context._engine = e
else:
# TODO: this is squirrely. we shouldn't have to hold onto engines
# in a case like this
if bind not in self.__engines:
self.__engines[bind] = bind
self.context._engine = bind
bind = property(bind, _bind_to)
def is_bound(self):
"""True if there is a bind for this thread."""
return (
hasattr(self.context, "_engine")
and self.context._engine is not None
)
def dispose(self):
"""Dispose all bound engines, in all thread contexts."""
for e in self.__engines.values():
if hasattr(e, "dispose"):
e.dispose()
class _SchemaTranslateMap(object):
"""Provide translation of schema names based on a mapping.
Also provides helpers for producing cache keys and optimized
access when no mapping is present.
Used by the :paramref:`.Connection.execution_options.schema_translate_map`
feature.
.. versionadded:: 1.1
"""
__slots__ = "map_", "__call__", "hash_key", "is_default"
_default_schema_getter = operator.attrgetter("schema")
def __init__(self, map_):
self.map_ = map_
if map_ is not None:
def schema_for_object(obj):
effective_schema = self._default_schema_getter(obj)
effective_schema = obj._translate_schema(
effective_schema, map_
)
return effective_schema
self.__call__ = schema_for_object
self.hash_key = ";".join(
"%s=%s" % (k, map_[k]) for k in sorted(map_, key=str)
)
self.is_default = False
else:
self.hash_key = 0
self.__call__ = self._default_schema_getter
self.is_default = True
@classmethod
def _schema_getter(cls, map_):
if map_ is None:
return _default_schema_map
elif isinstance(map_, _SchemaTranslateMap):
return map_
else:
return _SchemaTranslateMap(map_)
_default_schema_map = _SchemaTranslateMap(None)
_schema_getter = _SchemaTranslateMap._schema_getter
class Computed(FetchedValue, SchemaItem):
"""Defines a generated column, i.e. "GENERATED ALWAYS AS" syntax.
The :class:`.Computed` construct is an inline construct added to the
argument list of a :class:`_schema.Column` object::
from sqlalchemy import Computed
Table('square', meta,
Column('side', Float, nullable=False),
Column('area', Float, Computed('side * side'))
)
See the linked documentation below for complete details.
.. versionadded:: 1.3.11
.. seealso::
:ref:`computed_ddl`
"""
__visit_name__ = "computed_column"
@_document_text_coercion(
"sqltext", ":class:`.Computed`", ":paramref:`.Computed.sqltext`"
)
def __init__(self, sqltext, persisted=None):
"""Construct a GENERATED ALWAYS AS DDL construct to accompany a
:class:`_schema.Column`.
:param sqltext:
A string containing the column generation expression, which will be
used verbatim, or a SQL expression construct, such as a
:func:`_expression.text`
object. If given as a string, the object is converted to a
:func:`_expression.text` object.
:param persisted:
Optional, controls how this column should be persisted by the
database. Possible values are:
* None, the default, it will use the default persistence defined
by the database.
* True, will render ``GENERATED ALWAYS AS ... STORED``, or the
equivalent for the target database if supported
* False, will render ``GENERATED ALWAYS AS ... VIRTUAL``, or the
equivalent for the target database if supported.
Specifying ``True`` or ``False`` may raise an error when the DDL
is emitted to the target database if the databse does not support
that persistence option. Leaving this parameter at its default
of ``None`` is guaranteed to succeed for all databases that support
``GENERATED ALWAYS AS``.
"""
self.sqltext = _literal_as_text(sqltext, allow_coercion_to_text=True)
self.persisted = persisted
self.column = None
def _set_parent(self, parent):
if not isinstance(
parent.server_default, (type(None), Computed)
) or not isinstance(parent.server_onupdate, (type(None), Computed)):
raise exc.ArgumentError(
"A generated column cannot specify a server_default or a "
"server_onupdate argument"
)
self.column = parent
parent.computed = self
self.column.server_onupdate = self
self.column.server_default = self
def _as_for_update(self, for_update):
return self
def copy(self, target_table=None, **kw):
if target_table is not None:
sqltext = _copy_expression(self.sqltext, self.table, target_table)
else:
sqltext = self.sqltext
g = Computed(sqltext, persisted=self.persisted)
return self._schema_item_copy(g)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_restful/fields.py
|
from datetime import datetime
from calendar import timegm
import pytz
from decimal import Decimal as MyDecimal, ROUND_HALF_EVEN
from email.utils import formatdate
import six
try:
from urlparse import urlparse, urlunparse
except ImportError:
# python3
from urllib.parse import urlparse, urlunparse
from flask_restful import inputs, marshal
from flask import url_for, request
__all__ = ["String", "FormattedString", "Url", "DateTime", "Float",
"Integer", "Arbitrary", "Nested", "List", "Raw", "Boolean",
"Fixed", "Price"]
class MarshallingException(Exception):
"""
This is an encapsulating Exception in case of marshalling error.
"""
def __init__(self, underlying_exception):
# just put the contextual representation of the error to hint on what
# went wrong without exposing internals
super(MarshallingException, self).__init__(six.text_type(underlying_exception))
def is_indexable_but_not_string(obj):
return not hasattr(obj, "strip") and hasattr(obj, "__iter__")
def get_value(key, obj, default=None):
"""Helper for pulling a keyed value off various types of objects"""
if isinstance(key, int):
return _get_value_for_key(key, obj, default)
elif callable(key):
return key(obj)
else:
return _get_value_for_keys(key.split('.'), obj, default)
def _get_value_for_keys(keys, obj, default):
if len(keys) == 1:
return _get_value_for_key(keys[0], obj, default)
else:
return _get_value_for_keys(
keys[1:], _get_value_for_key(keys[0], obj, default), default)
def _get_value_for_key(key, obj, default):
if is_indexable_but_not_string(obj):
try:
return obj[key]
except (IndexError, TypeError, KeyError):
pass
return getattr(obj, key, default)
def to_marshallable_type(obj):
"""Helper for converting an object to a dictionary only if it is not
dictionary already or an indexable object nor a simple type"""
if obj is None:
return None # make it idempotent for None
if hasattr(obj, '__marshallable__'):
return obj.__marshallable__()
if hasattr(obj, '__getitem__'):
return obj # it is indexable it is ok
return dict(obj.__dict__)
class Raw(object):
"""Raw provides a base field class from which others should extend. It
applies no formatting by default, and should only be used in cases where
data does not need to be formatted before being serialized. Fields should
throw a :class:`MarshallingException` in case of parsing problem.
:param default: The default value for the field, if no value is
specified.
:param attribute: If the public facing value differs from the internal
value, use this to retrieve a different attribute from the response
than the publicly named value.
"""
def __init__(self, default=None, attribute=None):
self.attribute = attribute
self.default = default
def format(self, value):
"""Formats a field's value. No-op by default - field classes that
modify how the value of existing object keys should be presented should
override this and apply the appropriate formatting.
:param value: The value to format
:exception MarshallingException: In case of formatting problem
Ex::
class TitleCase(Raw):
def format(self, value):
return unicode(value).title()
"""
return value
def output(self, key, obj):
"""Pulls the value for the given key from the object, applies the
field's formatting and returns the result. If the key is not found
in the object, returns the default value. Field classes that create
values which do not require the existence of the key in the object
should override this and return the desired value.
:exception MarshallingException: In case of formatting problem
"""
value = get_value(key if self.attribute is None else self.attribute, obj)
if value is None:
return self.default
return self.format(value)
class Nested(Raw):
"""Allows you to nest one set of fields inside another.
See :ref:`nested-field` for more information
:param dict nested: The dictionary to nest
:param bool allow_null: Whether to return None instead of a dictionary
with null keys, if a nested dictionary has all-null keys
:param kwargs: If ``default`` keyword argument is present, a nested
dictionary will be marshaled as its value if nested dictionary is
all-null keys (e.g. lets you return an empty JSON object instead of
null)
"""
def __init__(self, nested, allow_null=False, **kwargs):
self.nested = nested
self.allow_null = allow_null
super(Nested, self).__init__(**kwargs)
def output(self, key, obj):
value = get_value(key if self.attribute is None else self.attribute, obj)
if value is None:
if self.allow_null:
return None
elif self.default is not None:
return self.default
return marshal(value, self.nested)
class List(Raw):
"""
Field for marshalling lists of other fields.
See :ref:`list-field` for more information.
:param cls_or_instance: The field type the list will contain.
"""
def __init__(self, cls_or_instance, **kwargs):
super(List, self).__init__(**kwargs)
error_msg = ("The type of the list elements must be a subclass of "
"flask_restful.fields.Raw")
if isinstance(cls_or_instance, type):
if not issubclass(cls_or_instance, Raw):
raise MarshallingException(error_msg)
self.container = cls_or_instance()
else:
if not isinstance(cls_or_instance, Raw):
raise MarshallingException(error_msg)
self.container = cls_or_instance
def format(self, value):
# Convert all instances in typed list to container type
if isinstance(value, set):
value = list(value)
return [
self.container.output(idx,
val if (isinstance(val, dict)
or (self.container.attribute
and hasattr(val, self.container.attribute)))
and not isinstance(self.container, Nested)
and not type(self.container) is Raw
else value)
for idx, val in enumerate(value)
]
def output(self, key, data):
value = get_value(key if self.attribute is None else self.attribute, data)
# we cannot really test for external dict behavior
if is_indexable_but_not_string(value) and not isinstance(value, dict):
return self.format(value)
if value is None:
return self.default
return [marshal(value, self.container.nested)]
class String(Raw):
"""
Marshal a value as a string. Uses ``six.text_type`` so values will
be converted to :class:`unicode` in python2 and :class:`str` in
python3.
"""
def format(self, value):
try:
return six.text_type(value)
except ValueError as ve:
raise MarshallingException(ve)
class Integer(Raw):
""" Field for outputting an integer value.
:param int default: The default value for the field, if no value is
specified.
"""
def __init__(self, default=0, **kwargs):
super(Integer, self).__init__(default=default, **kwargs)
def format(self, value):
try:
if value is None:
return self.default
return int(value)
except ValueError as ve:
raise MarshallingException(ve)
class Boolean(Raw):
"""
Field for outputting a boolean value.
Empty collections such as ``""``, ``{}``, ``[]``, etc. will be converted to
``False``.
"""
def format(self, value):
return bool(value)
class FormattedString(Raw):
"""
FormattedString is used to interpolate other values from
the response into this field. The syntax for the source string is
the same as the string :meth:`~str.format` method from the python
stdlib.
Ex::
fields = {
'name': fields.String,
'greeting': fields.FormattedString("Hello {name}")
}
data = {
'name': 'Doug',
}
marshal(data, fields)
"""
def __init__(self, src_str):
"""
:param string src_str: the string to format with the other
values from the response.
"""
super(FormattedString, self).__init__()
self.src_str = six.text_type(src_str)
def output(self, key, obj):
try:
data = to_marshallable_type(obj)
return self.src_str.format(**data)
except (TypeError, IndexError) as error:
raise MarshallingException(error)
class Url(Raw):
"""
A string representation of a Url
:param endpoint: Endpoint name. If endpoint is ``None``,
``request.endpoint`` is used instead
:type endpoint: str
:param absolute: If ``True``, ensures that the generated urls will have the
hostname included
:type absolute: bool
:param scheme: URL scheme specifier (e.g. ``http``, ``https``)
:type scheme: str
"""
def __init__(self, endpoint=None, absolute=False, scheme=None, **kwargs):
super(Url, self).__init__(**kwargs)
self.endpoint = endpoint
self.absolute = absolute
self.scheme = scheme
def output(self, key, obj):
try:
data = to_marshallable_type(obj)
endpoint = self.endpoint if self.endpoint is not None else request.endpoint
o = urlparse(url_for(endpoint, _external=self.absolute, **data))
if self.absolute:
scheme = self.scheme if self.scheme is not None else o.scheme
return urlunparse((scheme, o.netloc, o.path, "", "", ""))
return urlunparse(("", "", o.path, "", "", ""))
except TypeError as te:
raise MarshallingException(te)
class Float(Raw):
"""
A double as IEEE-754 double precision.
ex : 3.141592653589793 3.1415926535897933e-06 3.141592653589793e+24 nan inf
-inf
"""
def format(self, value):
try:
return float(value)
except ValueError as ve:
raise MarshallingException(ve)
class Arbitrary(Raw):
"""
A floating point number with an arbitrary precision
ex: 634271127864378216478362784632784678324.23432
"""
def format(self, value):
return six.text_type(MyDecimal(value))
class DateTime(Raw):
"""
Return a formatted datetime string in UTC. Supported formats are RFC 822
and ISO 8601.
See :func:`email.utils.formatdate` for more info on the RFC 822 format.
See :meth:`datetime.datetime.isoformat` for more info on the ISO 8601
format.
:param dt_format: ``'rfc822'`` or ``'iso8601'``
:type dt_format: str
"""
def __init__(self, dt_format='rfc822', **kwargs):
super(DateTime, self).__init__(**kwargs)
self.dt_format = dt_format
def format(self, value):
try:
if self.dt_format == 'rfc822':
return _rfc822(value)
elif self.dt_format == 'iso8601':
return _iso8601(value)
else:
raise MarshallingException(
'Unsupported date format %s' % self.dt_format
)
except AttributeError as ae:
raise MarshallingException(ae)
ZERO = MyDecimal()
class Fixed(Raw):
"""
A decimal number with a fixed precision.
"""
def __init__(self, decimals=5, **kwargs):
super(Fixed, self).__init__(**kwargs)
self.precision = MyDecimal('0.' + '0' * (decimals - 1) + '1')
def format(self, value):
dvalue = MyDecimal(value)
if not dvalue.is_normal() and dvalue != ZERO:
raise MarshallingException('Invalid Fixed precision number.')
return six.text_type(dvalue.quantize(self.precision, rounding=ROUND_HALF_EVEN))
"""Alias for :class:`~fields.Fixed`"""
Price = Fixed
def _rfc822(dt):
"""Turn a datetime object into a formatted date.
Example::
fields._rfc822(datetime(2011, 1, 1)) => "Sat, 01 Jan 2011 00:00:00 -0000"
:param dt: The datetime to transform
:type dt: datetime
:return: A RFC 822 formatted date string
"""
return formatdate(timegm(dt.utctimetuple()))
def _iso8601(dt):
"""Turn a datetime object into an ISO8601 formatted date.
Example::
fields._iso8601(datetime(2012, 1, 1, 0, 0)) => "2012-01-01T00:00:00"
:param dt: The datetime to transform
:type dt: datetime
:return: A ISO 8601 formatted date string
"""
return dt.isoformat()
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_restful/reqparse.py
|
from copy import deepcopy
try:
from collections.abc import MutableSequence
except ImportError:
from collections import MutableSequence
from flask import current_app, request
from werkzeug.datastructures import MultiDict, FileStorage
from werkzeug import exceptions
import flask_restful
import decimal
import six
class Namespace(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
_friendly_location = {
u'json': u'the JSON body',
u'form': u'the post body',
u'args': u'the query string',
u'values': u'the post body or the query string',
u'headers': u'the HTTP headers',
u'cookies': u'the request\'s cookies',
u'files': u'an uploaded file',
}
text_type = lambda x: six.text_type(x)
class Argument(object):
"""
:param name: Either a name or a list of option strings, e.g. foo or
-f, --foo.
:param default: The value produced if the argument is absent from the
request.
:param dest: The name of the attribute to be added to the object
returned by :meth:`~reqparse.RequestParser.parse_args()`.
:param bool required: Whether or not the argument may be omitted (optionals
only).
:param action: The basic type of action to be taken when this argument
is encountered in the request. Valid options are "store" and "append".
:param ignore: Whether to ignore cases where the argument fails type
conversion
:param type: The type to which the request argument should be
converted. If a type raises an exception, the message in the
error will be returned in the response. Defaults to :class:`unicode`
in python2 and :class:`str` in python3.
:param location: The attributes of the :class:`flask.Request` object
to source the arguments from (ex: headers, args, etc.), can be an
iterator. The last item listed takes precedence in the result set.
:param choices: A container of the allowable values for the argument.
:param help: A brief description of the argument, returned in the
response when the argument is invalid. May optionally contain
an "{error_msg}" interpolation token, which will be replaced with
the text of the error raised by the type converter.
:param bool case_sensitive: Whether argument values in the request are
case sensitive or not (this will convert all values to lowercase)
:param bool store_missing: Whether the arguments default value should
be stored if the argument is missing from the request.
:param bool trim: If enabled, trims whitespace around the argument.
:param bool nullable: If enabled, allows null value in argument.
"""
def __init__(self, name, default=None, dest=None, required=False,
ignore=False, type=text_type, location=('json', 'values',),
choices=(), action='store', help=None, operators=('=',),
case_sensitive=True, store_missing=True, trim=False,
nullable=True):
self.name = name
self.default = default
self.dest = dest
self.required = required
self.ignore = ignore
self.location = location
self.type = type
self.choices = choices
self.action = action
self.help = help
self.case_sensitive = case_sensitive
self.operators = operators
self.store_missing = store_missing
self.trim = trim
self.nullable = nullable
def __str__(self):
if len(self.choices) > 5:
choices = self.choices[0:3]
choices.append('...')
choices.append(self.choices[-1])
else:
choices = self.choices
return 'Name: {0}, type: {1}, choices: {2}'.format(self.name, self.type, choices)
def __repr__(self):
return "{0}('{1}', default={2}, dest={3}, required={4}, ignore={5}, location={6}, " \
"type=\"{7}\", choices={8}, action='{9}', help={10}, case_sensitive={11}, " \
"operators={12}, store_missing={13}, trim={14}, nullable={15})".format(
self.__class__.__name__, self.name, self.default, self.dest, self.required, self.ignore, self.location,
self.type, self.choices, self.action, self.help, self.case_sensitive,
self.operators, self.store_missing, self.trim, self.nullable)
def source(self, request):
"""Pulls values off the request in the provided location
:param request: The flask request object to parse arguments from
"""
if isinstance(self.location, six.string_types):
value = getattr(request, self.location, MultiDict())
if callable(value):
value = value()
if value is not None:
return value
else:
values = MultiDict()
for l in self.location:
value = getattr(request, l, None)
if callable(value):
value = value()
if value is not None:
values.update(value)
return values
return MultiDict()
def convert(self, value, op):
# Don't cast None
if value is None:
if self.nullable:
return None
else:
raise ValueError('Must not be null!')
# and check if we're expecting a filestorage and haven't overridden `type`
# (required because the below instantiation isn't valid for FileStorage)
elif isinstance(value, FileStorage) and self.type == FileStorage:
return value
try:
return self.type(value, self.name, op)
except TypeError:
try:
if self.type is decimal.Decimal:
return self.type(str(value))
else:
return self.type(value, self.name)
except TypeError:
return self.type(value)
def handle_validation_error(self, error, bundle_errors):
"""Called when an error is raised while parsing. Aborts the request
with a 400 status and an error message
:param error: the error that was raised
:param bundle_errors: do not abort when first error occurs, return a
dict with the name of the argument and the error message to be
bundled
"""
error_str = six.text_type(error)
error_msg = self.help.format(error_msg=error_str) if self.help else error_str
msg = {self.name: error_msg}
if current_app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return error, msg
flask_restful.abort(400, message=msg)
def parse(self, request, bundle_errors=False):
"""Parses argument value(s) from the request, converting according to
the argument's type.
:param request: The flask request object to parse arguments from
:param bundle_errors: Do not abort when first error occurs, return a
dict with the name of the argument and the error message to be
bundled
"""
source = self.source(request)
results = []
# Sentinels
_not_found = False
_found = True
for operator in self.operators:
name = self.name + operator.replace("=", "", 1)
if name in source:
# Account for MultiDict and regular dict
if hasattr(source, "getlist"):
values = source.getlist(name)
else:
values = source.get(name)
if not (isinstance(values, MutableSequence) and self.action == 'append'):
values = [values]
for value in values:
if hasattr(value, "strip") and self.trim:
value = value.strip()
if hasattr(value, "lower") and not self.case_sensitive:
value = value.lower()
if hasattr(self.choices, "__iter__"):
self.choices = [choice.lower()
for choice in self.choices]
try:
value = self.convert(value, operator)
except Exception as error:
if self.ignore:
continue
return self.handle_validation_error(error, bundle_errors)
if self.choices and value not in self.choices:
if current_app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return self.handle_validation_error(
ValueError(u"{0} is not a valid choice".format(
value)), bundle_errors)
self.handle_validation_error(
ValueError(u"{0} is not a valid choice".format(
value)), bundle_errors)
if name in request.unparsed_arguments:
request.unparsed_arguments.pop(name)
results.append(value)
if not results and self.required:
if isinstance(self.location, six.string_types):
error_msg = u"Missing required parameter in {0}".format(
_friendly_location.get(self.location, self.location)
)
else:
friendly_locations = [_friendly_location.get(loc, loc)
for loc in self.location]
error_msg = u"Missing required parameter in {0}".format(
' or '.join(friendly_locations)
)
if current_app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return self.handle_validation_error(ValueError(error_msg), bundle_errors)
self.handle_validation_error(ValueError(error_msg), bundle_errors)
if not results:
if callable(self.default):
return self.default(), _not_found
else:
return self.default, _not_found
if self.action == 'append':
return results, _found
if self.action == 'store' or len(results) == 1:
return results[0], _found
return results, _found
class RequestParser(object):
"""Enables adding and parsing of multiple arguments in the context of a
single request. Ex::
from flask_restful import reqparse
parser = reqparse.RequestParser()
parser.add_argument('foo')
parser.add_argument('int_bar', type=int)
args = parser.parse_args()
:param bool trim: If enabled, trims whitespace on all arguments in this
parser
:param bool bundle_errors: If enabled, do not abort when first error occurs,
return a dict with the name of the argument and the error message to be
bundled and return all validation errors
"""
def __init__(self, argument_class=Argument, namespace_class=Namespace,
trim=False, bundle_errors=False):
self.args = []
self.argument_class = argument_class
self.namespace_class = namespace_class
self.trim = trim
self.bundle_errors = bundle_errors
def add_argument(self, *args, **kwargs):
"""Adds an argument to be parsed.
Accepts either a single instance of Argument or arguments to be passed
into :class:`Argument`'s constructor.
See :class:`Argument`'s constructor for documentation on the
available options.
"""
if len(args) == 1 and isinstance(args[0], self.argument_class):
self.args.append(args[0])
else:
self.args.append(self.argument_class(*args, **kwargs))
# Do not know what other argument classes are out there
if self.trim and self.argument_class is Argument:
# enable trim for appended element
self.args[-1].trim = kwargs.get('trim', self.trim)
return self
def parse_args(self, req=None, strict=False, http_error_code=400):
"""Parse all arguments from the provided request and return the results
as a Namespace
:param req: Can be used to overwrite request from Flask
:param strict: if req includes args not in parser, throw 400 BadRequest exception
:param http_error_code: use custom error code for `flask_restful.abort()`
"""
if req is None:
req = request
namespace = self.namespace_class()
# A record of arguments not yet parsed; as each is found
# among self.args, it will be popped out
req.unparsed_arguments = dict(self.argument_class('').source(req)) if strict else {}
errors = {}
for arg in self.args:
value, found = arg.parse(req, self.bundle_errors)
if isinstance(value, ValueError):
errors.update(found)
found = None
if found or arg.store_missing:
namespace[arg.dest or arg.name] = value
if errors:
flask_restful.abort(http_error_code, message=errors)
if strict and req.unparsed_arguments:
raise exceptions.BadRequest('Unknown arguments: %s'
% ', '.join(req.unparsed_arguments.keys()))
return namespace
def copy(self):
""" Creates a copy of this RequestParser with the same set of arguments """
parser_copy = self.__class__(self.argument_class, self.namespace_class)
parser_copy.args = deepcopy(self.args)
parser_copy.trim = self.trim
parser_copy.bundle_errors = self.bundle_errors
return parser_copy
def replace_argument(self, name, *args, **kwargs):
""" Replace the argument matching the given name with a new version. """
new_arg = self.argument_class(name, *args, **kwargs)
for index, arg in enumerate(self.args[:]):
if new_arg.name == arg.name:
del self.args[index]
self.args.append(new_arg)
break
return self
def remove_argument(self, name):
""" Remove the argument matching the given name. """
for index, arg in enumerate(self.args[:]):
if name == arg.name:
del self.args[index]
break
return self
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_restful/__init__.py
|
from __future__ import absolute_import
from functools import wraps, partial
from flask import request, url_for, current_app
from flask import abort as original_flask_abort
from flask import make_response as original_flask_make_response
from flask.views import MethodView
from flask.signals import got_request_exception
from werkzeug.datastructures import Headers
from werkzeug.exceptions import HTTPException, MethodNotAllowed, NotFound, NotAcceptable, InternalServerError
from werkzeug.wrappers import Response as ResponseBase
from flask_restful.utils import http_status_message, unpack, OrderedDict
from flask_restful.representations.json import output_json
import sys
from flask.helpers import _endpoint_from_view_func
from types import MethodType
import operator
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
__all__ = ('Api', 'Resource', 'marshal', 'marshal_with', 'marshal_with_field', 'abort')
def abort(http_status_code, **kwargs):
"""Raise a HTTPException for the given http_status_code. Attach any keyword
arguments to the exception for later processing.
"""
#noinspection PyUnresolvedReferences
try:
original_flask_abort(http_status_code)
except HTTPException as e:
if len(kwargs):
e.data = kwargs
raise
DEFAULT_REPRESENTATIONS = [('application/json', output_json)]
class Api(object):
"""
The main entry point for the application.
You need to initialize it with a Flask Application: ::
>>> app = Flask(__name__)
>>> api = restful.Api(app)
Alternatively, you can use :meth:`init_app` to set the Flask application
after it has been constructed.
:param app: the Flask application object
:type app: flask.Flask or flask.Blueprint
:param prefix: Prefix all routes with a value, eg v1 or 2010-04-01
:type prefix: str
:param default_mediatype: The default media type to return
:type default_mediatype: str
:param decorators: Decorators to attach to every resource
:type decorators: list
:param catch_all_404s: Use :meth:`handle_error`
to handle 404 errors throughout your app
:param serve_challenge_on_401: Whether to serve a challenge response to
clients on receiving 401. This usually leads to a username/password
popup in web browsers.
:param url_part_order: A string that controls the order that the pieces
of the url are concatenated when the full url is constructed. 'b'
is the blueprint (or blueprint registration) prefix, 'a' is the api
prefix, and 'e' is the path component the endpoint is added with
:type catch_all_404s: bool
:param errors: A dictionary to define a custom response for each
exception or error raised during a request
:type errors: dict
"""
def __init__(self, app=None, prefix='',
default_mediatype='application/json', decorators=None,
catch_all_404s=False, serve_challenge_on_401=False,
url_part_order='bae', errors=None):
self.representations = OrderedDict(DEFAULT_REPRESENTATIONS)
self.urls = {}
self.prefix = prefix
self.default_mediatype = default_mediatype
self.decorators = decorators if decorators else []
self.catch_all_404s = catch_all_404s
self.serve_challenge_on_401 = serve_challenge_on_401
self.url_part_order = url_part_order
self.errors = errors or {}
self.blueprint_setup = None
self.endpoints = set()
self.resources = []
self.app = None
self.blueprint = None
if app is not None:
self.app = app
self.init_app(app)
def init_app(self, app):
"""Initialize this class with the given :class:`flask.Flask`
application or :class:`flask.Blueprint` object.
:param app: the Flask application or blueprint object
:type app: flask.Flask
:type app: flask.Blueprint
Examples::
api = Api()
api.add_resource(...)
api.init_app(app)
"""
# If app is a blueprint, defer the initialization
try:
app.record(self._deferred_blueprint_init)
# Flask.Blueprint has a 'record' attribute, Flask.Api does not
except AttributeError:
self._init_app(app)
else:
self.blueprint = app
def _complete_url(self, url_part, registration_prefix):
"""This method is used to defer the construction of the final url in
the case that the Api is created with a Blueprint.
:param url_part: The part of the url the endpoint is registered with
:param registration_prefix: The part of the url contributed by the
blueprint. Generally speaking, BlueprintSetupState.url_prefix
"""
parts = {
'b': registration_prefix,
'a': self.prefix,
'e': url_part
}
return ''.join(parts[key] for key in self.url_part_order if parts[key])
@staticmethod
def _blueprint_setup_add_url_rule_patch(blueprint_setup, rule, endpoint=None, view_func=None, **options):
"""Method used to patch BlueprintSetupState.add_url_rule for setup
state instance corresponding to this Api instance. Exists primarily
to enable _complete_url's function.
:param blueprint_setup: The BlueprintSetupState instance (self)
:param rule: A string or callable that takes a string and returns a
string(_complete_url) that is the url rule for the endpoint
being registered
:param endpoint: See BlueprintSetupState.add_url_rule
:param view_func: See BlueprintSetupState.add_url_rule
:param **options: See BlueprintSetupState.add_url_rule
"""
if callable(rule):
rule = rule(blueprint_setup.url_prefix)
elif blueprint_setup.url_prefix:
rule = blueprint_setup.url_prefix + rule
options.setdefault('subdomain', blueprint_setup.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = blueprint_setup.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
blueprint_setup.app.add_url_rule(rule, '%s.%s' % (blueprint_setup.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
def _deferred_blueprint_init(self, setup_state):
"""Synchronize prefix between blueprint/api and registration options, then
perform initialization with setup_state.app :class:`flask.Flask` object.
When a :class:`flask_restful.Api` object is initialized with a blueprint,
this method is recorded on the blueprint to be run when the blueprint is later
registered to a :class:`flask.Flask` object. This method also monkeypatches
BlueprintSetupState.add_url_rule with _blueprint_setup_add_url_rule_patch.
:param setup_state: The setup state object passed to deferred functions
during blueprint registration
:type setup_state: flask.blueprints.BlueprintSetupState
"""
self.blueprint_setup = setup_state
if setup_state.add_url_rule.__name__ != '_blueprint_setup_add_url_rule_patch':
setup_state._original_add_url_rule = setup_state.add_url_rule
setup_state.add_url_rule = MethodType(Api._blueprint_setup_add_url_rule_patch,
setup_state)
if not setup_state.first_registration:
raise ValueError('flask-restful blueprints can only be registered once.')
self._init_app(setup_state.app)
def _init_app(self, app):
"""Perform initialization actions with the given :class:`flask.Flask`
object.
:param app: The flask application object
:type app: flask.Flask
"""
app.handle_exception = partial(self.error_router, app.handle_exception)
app.handle_user_exception = partial(self.error_router, app.handle_user_exception)
if len(self.resources) > 0:
for resource, urls, kwargs in self.resources:
self._register_view(app, resource, *urls, **kwargs)
def owns_endpoint(self, endpoint):
"""Tests if an endpoint name (not path) belongs to this Api. Takes
in to account the Blueprint name part of the endpoint name.
:param endpoint: The name of the endpoint being checked
:return: bool
"""
if self.blueprint:
if endpoint.startswith(self.blueprint.name):
endpoint = endpoint.split(self.blueprint.name + '.', 1)[-1]
else:
return False
return endpoint in self.endpoints
def _should_use_fr_error_handler(self):
""" Determine if error should be handled with FR or default Flask
The goal is to return Flask error handlers for non-FR-related routes,
and FR errors (with the correct media type) for FR endpoints. This
method currently handles 404 and 405 errors.
:return: bool
"""
adapter = current_app.create_url_adapter(request)
try:
adapter.match()
except MethodNotAllowed as e:
# Check if the other HTTP methods at this url would hit the Api
valid_route_method = e.valid_methods[0]
rule, _ = adapter.match(method=valid_route_method, return_rule=True)
return self.owns_endpoint(rule.endpoint)
except NotFound:
return self.catch_all_404s
except:
# Werkzeug throws other kinds of exceptions, such as Redirect
pass
def _has_fr_route(self):
"""Encapsulating the rules for whether the request was to a Flask endpoint"""
# 404's, 405's, which might not have a url_rule
if self._should_use_fr_error_handler():
return True
# for all other errors, just check if FR dispatched the route
if not request.url_rule:
return False
return self.owns_endpoint(request.url_rule.endpoint)
def error_router(self, original_handler, e):
"""This function decides whether the error occured in a flask-restful
endpoint or not. If it happened in a flask-restful endpoint, our
handler will be dispatched. If it happened in an unrelated view, the
app's original error handler will be dispatched.
In the event that the error occurred in a flask-restful endpoint but
the local handler can't resolve the situation, the router will fall
back onto the original_handler as last resort.
:param original_handler: the original Flask error handler for the app
:type original_handler: function
:param e: the exception raised while handling the request
:type e: Exception
"""
if self._has_fr_route():
try:
return self.handle_error(e)
except Exception:
pass # Fall through to original handler
return original_handler(e)
def handle_error(self, e):
"""Error handler for the API transforms a raised exception into a Flask
response, with the appropriate HTTP status code and body.
:param e: the raised Exception object
:type e: Exception
"""
got_request_exception.send(current_app._get_current_object(), exception=e)
if not isinstance(e, HTTPException) and current_app.propagate_exceptions:
exc_type, exc_value, tb = sys.exc_info()
if exc_value is e:
raise
else:
raise e
headers = Headers()
if isinstance(e, HTTPException):
if e.response is not None:
# If HTTPException is initialized with a response, then return e.get_response().
# This prevents specified error response from being overridden.
# eg. HTTPException(response=Response("Hello World"))
resp = e.get_response()
return resp
code = e.code
default_data = {
'message': getattr(e, 'description', http_status_message(code))
}
headers = e.get_response().headers
else:
code = 500
default_data = {
'message': http_status_message(code),
}
# Werkzeug exceptions generate a content-length header which is added
# to the response in addition to the actual content-length header
# https://github.com/flask-restful/flask-restful/issues/534
remove_headers = ('Content-Length',)
for header in remove_headers:
headers.pop(header, None)
data = getattr(e, 'data', default_data)
if code and code >= 500:
exc_info = sys.exc_info()
if exc_info[1] is None:
exc_info = None
current_app.log_exception(exc_info)
error_cls_name = type(e).__name__
if error_cls_name in self.errors:
custom_data = self.errors.get(error_cls_name, {})
code = custom_data.get('status', 500)
data.update(custom_data)
if code == 406 and self.default_mediatype is None:
# if we are handling NotAcceptable (406), make sure that
# make_response uses a representation we support as the
# default mediatype (so that make_response doesn't throw
# another NotAcceptable error).
supported_mediatypes = list(self.representations.keys())
fallback_mediatype = supported_mediatypes[0] if supported_mediatypes else "text/plain"
resp = self.make_response(
data,
code,
headers,
fallback_mediatype = fallback_mediatype
)
else:
resp = self.make_response(data, code, headers)
if code == 401:
resp = self.unauthorized(resp)
return resp
def mediatypes_method(self):
"""Return a method that returns a list of mediatypes
"""
return lambda resource_cls: self.mediatypes() + [self.default_mediatype]
def add_resource(self, resource, *urls, **kwargs):
"""Adds a resource to the api.
:param resource: the class name of your resource
:type resource: :class:`Type[Resource]`
:param urls: one or more url routes to match for the resource, standard
flask routing rules apply. Any url variables will be
passed to the resource method as args.
:type urls: str
:param endpoint: endpoint name (defaults to :meth:`Resource.__name__.lower`
Can be used to reference this route in :class:`fields.Url` fields
:type endpoint: str
:param resource_class_args: args to be forwarded to the constructor of
the resource.
:type resource_class_args: tuple
:param resource_class_kwargs: kwargs to be forwarded to the constructor
of the resource.
:type resource_class_kwargs: dict
Additional keyword arguments not specified above will be passed as-is
to :meth:`flask.Flask.add_url_rule`.
Examples::
api.add_resource(HelloWorld, '/', '/hello')
api.add_resource(Foo, '/foo', endpoint="foo")
api.add_resource(FooSpecial, '/special/foo', endpoint="foo")
"""
if self.app is not None:
self._register_view(self.app, resource, *urls, **kwargs)
else:
self.resources.append((resource, urls, kwargs))
def resource(self, *urls, **kwargs):
"""Wraps a :class:`~flask_restful.Resource` class, adding it to the
api. Parameters are the same as :meth:`~flask_restful.Api.add_resource`.
Example::
app = Flask(__name__)
api = restful.Api(app)
@api.resource('/foo')
class Foo(Resource):
def get(self):
return 'Hello, World!'
"""
def decorator(cls):
self.add_resource(cls, *urls, **kwargs)
return cls
return decorator
def _register_view(self, app, resource, *urls, **kwargs):
endpoint = kwargs.pop('endpoint', None) or resource.__name__.lower()
self.endpoints.add(endpoint)
resource_class_args = kwargs.pop('resource_class_args', ())
resource_class_kwargs = kwargs.pop('resource_class_kwargs', {})
# NOTE: 'view_functions' is cleaned up from Blueprint class in Flask 1.0
if endpoint in getattr(app, 'view_functions', {}):
previous_view_class = app.view_functions[endpoint].__dict__['view_class']
# if you override the endpoint with a different class, avoid the collision by raising an exception
if previous_view_class != resource:
raise ValueError('This endpoint (%s) is already set to the class %s.' % (endpoint, previous_view_class.__name__))
resource.mediatypes = self.mediatypes_method() # Hacky
resource.endpoint = endpoint
resource_func = self.output(resource.as_view(endpoint, *resource_class_args,
**resource_class_kwargs))
for decorator in self.decorators:
resource_func = decorator(resource_func)
for url in urls:
# If this Api has a blueprint
if self.blueprint:
# And this Api has been setup
if self.blueprint_setup:
# Set the rule to a string directly, as the blueprint is already
# set up.
self.blueprint_setup.add_url_rule(url, view_func=resource_func, **kwargs)
continue
else:
# Set the rule to a function that expects the blueprint prefix
# to construct the final url. Allows deferment of url finalization
# in the case that the associated Blueprint has not yet been
# registered to an application, so we can wait for the registration
# prefix
rule = partial(self._complete_url, url)
else:
# If we've got no Blueprint, just build a url with no prefix
rule = self._complete_url(url, '')
# Add the url to the application or blueprint
app.add_url_rule(rule, view_func=resource_func, **kwargs)
def output(self, resource):
"""Wraps a resource (as a flask view function), for cases where the
resource does not directly return a response object
:param resource: The resource as a flask view function
"""
@wraps(resource)
def wrapper(*args, **kwargs):
resp = resource(*args, **kwargs)
if isinstance(resp, ResponseBase): # There may be a better way to test
return resp
data, code, headers = unpack(resp)
return self.make_response(data, code, headers=headers)
return wrapper
def url_for(self, resource, **values):
"""Generates a URL to the given resource.
Works like :func:`flask.url_for`."""
endpoint = resource.endpoint
if self.blueprint:
endpoint = '{0}.{1}'.format(self.blueprint.name, endpoint)
return url_for(endpoint, **values)
def make_response(self, data, *args, **kwargs):
"""Looks up the representation transformer for the requested media
type, invoking the transformer to create a response object. This
defaults to default_mediatype if no transformer is found for the
requested mediatype. If default_mediatype is None, a 406 Not
Acceptable response will be sent as per RFC 2616 section 14.1
:param data: Python object containing response data to be transformed
"""
default_mediatype = kwargs.pop('fallback_mediatype', None) or self.default_mediatype
mediatype = request.accept_mimetypes.best_match(
self.representations,
default=default_mediatype,
)
if mediatype is None:
raise NotAcceptable()
if mediatype in self.representations:
resp = self.representations[mediatype](data, *args, **kwargs)
resp.headers['Content-Type'] = mediatype
return resp
elif mediatype == 'text/plain':
resp = original_flask_make_response(str(data), *args, **kwargs)
resp.headers['Content-Type'] = 'text/plain'
return resp
else:
raise InternalServerError()
def mediatypes(self):
"""Returns a list of requested mediatypes sent in the Accept header"""
return [h for h, q in sorted(request.accept_mimetypes,
key=operator.itemgetter(1), reverse=True)]
def representation(self, mediatype):
"""Allows additional representation transformers to be declared for the
api. Transformers are functions that must be decorated with this
method, passing the mediatype the transformer represents. Three
arguments are passed to the transformer:
* The data to be represented in the response body
* The http status code
* A dictionary of headers
The transformer should convert the data appropriately for the mediatype
and return a Flask response object.
Ex::
@api.representation('application/xml')
def xml(data, code, headers):
resp = make_response(convert_data_to_xml(data), code)
resp.headers.extend(headers)
return resp
"""
def wrapper(func):
self.representations[mediatype] = func
return func
return wrapper
def unauthorized(self, response):
""" Given a response, change it to ask for credentials """
if self.serve_challenge_on_401:
realm = current_app.config.get("HTTP_BASIC_AUTH_REALM", "flask-restful")
challenge = u"{0} realm=\"{1}\"".format("Basic", realm)
response.headers['WWW-Authenticate'] = challenge
return response
class Resource(MethodView):
"""
Represents an abstract RESTful resource. Concrete resources should
extend from this class and expose methods for each supported HTTP
method. If a resource is invoked with an unsupported HTTP method,
the API will return a response with status 405 Method Not Allowed.
Otherwise the appropriate method is called and passed all arguments
from the url rule used when adding the resource to an Api instance. See
:meth:`~flask_restful.Api.add_resource` for details.
"""
representations = None
method_decorators = []
def dispatch_request(self, *args, **kwargs):
# Taken from flask
#noinspection PyUnresolvedReferences
meth = getattr(self, request.method.lower(), None)
if meth is None and request.method == 'HEAD':
meth = getattr(self, 'get', None)
assert meth is not None, 'Unimplemented method %r' % request.method
if isinstance(self.method_decorators, Mapping):
decorators = self.method_decorators.get(request.method.lower(), [])
else:
decorators = self.method_decorators
for decorator in decorators:
meth = decorator(meth)
resp = meth(*args, **kwargs)
if isinstance(resp, ResponseBase): # There may be a better way to test
return resp
representations = self.representations or OrderedDict()
#noinspection PyUnresolvedReferences
mediatype = request.accept_mimetypes.best_match(representations, default=None)
if mediatype in representations:
data, code, headers = unpack(resp)
resp = representations[mediatype](data, code, headers)
resp.headers['Content-Type'] = mediatype
return resp
return resp
def marshal(data, fields, envelope=None):
"""Takes raw data (in the form of a dict, list, object) and a dict of
fields to output and filters the data based on those fields.
:param data: the actual object(s) from which the fields are taken from
:param fields: a dict of whose keys will make up the final serialized
response output
:param envelope: optional key that will be used to envelop the serialized
response
>>> from flask_restful import fields, marshal
>>> data = { 'a': 100, 'b': 'foo' }
>>> mfields = { 'a': fields.Raw }
>>> marshal(data, mfields)
OrderedDict([('a', 100)])
>>> marshal(data, mfields, envelope='data')
OrderedDict([('data', OrderedDict([('a', 100)]))])
"""
def make(cls):
if isinstance(cls, type):
return cls()
return cls
if isinstance(data, (list, tuple)):
return (OrderedDict([(envelope, [marshal(d, fields) for d in data])])
if envelope else [marshal(d, fields) for d in data])
items = ((k, marshal(data, v) if isinstance(v, dict)
else make(v).output(k, data))
for k, v in fields.items())
return OrderedDict([(envelope, OrderedDict(items))]) if envelope else OrderedDict(items)
class marshal_with(object):
"""A decorator that apply marshalling to the return values of your methods.
>>> from flask_restful import fields, marshal_with
>>> mfields = { 'a': fields.Raw }
>>> @marshal_with(mfields)
... def get():
... return { 'a': 100, 'b': 'foo' }
...
...
>>> get()
OrderedDict([('a', 100)])
>>> @marshal_with(mfields, envelope='data')
... def get():
... return { 'a': 100, 'b': 'foo' }
...
...
>>> get()
OrderedDict([('data', OrderedDict([('a', 100)]))])
see :meth:`flask_restful.marshal`
"""
def __init__(self, fields, envelope=None):
"""
:param fields: a dict of whose keys will make up the final
serialized response output
:param envelope: optional key that will be used to envelop the serialized
response
"""
self.fields = fields
self.envelope = envelope
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
resp = f(*args, **kwargs)
if isinstance(resp, tuple):
data, code, headers = unpack(resp)
return marshal(data, self.fields, self.envelope), code, headers
else:
return marshal(resp, self.fields, self.envelope)
return wrapper
class marshal_with_field(object):
"""
A decorator that formats the return values of your methods with a single field.
>>> from flask_restful import marshal_with_field, fields
>>> @marshal_with_field(fields.List(fields.Integer))
... def get():
... return ['1', 2, 3.0]
...
>>> get()
[1, 2, 3]
see :meth:`flask_restful.marshal_with`
"""
def __init__(self, field):
"""
:param field: a single field with which to marshal the output.
"""
if isinstance(field, type):
self.field = field()
else:
self.field = field
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
resp = f(*args, **kwargs)
if isinstance(resp, tuple):
data, code, headers = unpack(resp)
return self.field.format(data), code, headers
return self.field.format(resp)
return wrapper
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_restful/__version__.py
|
#!/usr/bin/env python
__version__ = '0.3.8'
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_restful/inputs.py
|
from calendar import timegm
from datetime import datetime, time, timedelta
from email.utils import parsedate_tz, mktime_tz
import re
import aniso8601
import pytz
# Constants for upgrading date-based intervals to full datetimes.
START_OF_DAY = time(0, 0, 0, tzinfo=pytz.UTC)
END_OF_DAY = time(23, 59, 59, 999999, tzinfo=pytz.UTC)
# https://code.djangoproject.com/browser/django/trunk/django/core/validators.py
# basic auth added by frank
url_regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:[^:@]+?:[^:@]*?@|)' # basic auth
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def url(value):
"""Validate a URL.
:param string value: The URL to validate
:returns: The URL if valid.
:raises: ValueError
"""
if not url_regex.search(value):
message = u"{0} is not a valid URL".format(value)
if url_regex.search('http://' + value):
message += u". Did you mean: http://{0}".format(value)
raise ValueError(message)
return value
class regex(object):
"""Validate a string based on a regular expression.
Example::
parser = reqparse.RequestParser()
parser.add_argument('example', type=inputs.regex('^[0-9]+$'))
Input to the ``example`` argument will be rejected if it contains anything
but numbers.
:param pattern: The regular expression the input must match
:type pattern: str
:param flags: Flags to change expression behavior
:type flags: int
"""
def __init__(self, pattern, flags=0):
self.pattern = pattern
self.re = re.compile(pattern, flags)
def __call__(self, value):
if not self.re.search(value):
message = 'Value does not match pattern: "{0}"'.format(self.pattern)
raise ValueError(message)
return value
def __deepcopy__(self, memo):
return regex(self.pattern)
def _normalize_interval(start, end, value):
"""Normalize datetime intervals.
Given a pair of datetime.date or datetime.datetime objects,
returns a 2-tuple of tz-aware UTC datetimes spanning the same interval.
For datetime.date objects, the returned interval starts at 00:00:00.0
on the first date and ends at 00:00:00.0 on the second.
Naive datetimes are upgraded to UTC.
Timezone-aware datetimes are normalized to the UTC tzdata.
Params:
- start: A date or datetime
- end: A date or datetime
"""
if not isinstance(start, datetime):
start = datetime.combine(start, START_OF_DAY)
end = datetime.combine(end, START_OF_DAY)
if start.tzinfo is None:
start = pytz.UTC.localize(start)
end = pytz.UTC.localize(end)
else:
start = start.astimezone(pytz.UTC)
end = end.astimezone(pytz.UTC)
return start, end
def _expand_datetime(start, value):
if not isinstance(start, datetime):
# Expand a single date object to be the interval spanning
# that entire day.
end = start + timedelta(days=1)
else:
# Expand a datetime based on the finest resolution provided
# in the original input string.
time = value.split('T')[1]
time_without_offset = re.sub('[+-].+', '', time)
num_separators = time_without_offset.count(':')
if num_separators == 0:
# Hour resolution
end = start + timedelta(hours=1)
elif num_separators == 1:
# Minute resolution:
end = start + timedelta(minutes=1)
else:
# Second resolution
end = start + timedelta(seconds=1)
return end
def _parse_interval(value):
"""Do some nasty try/except voodoo to get some sort of datetime
object(s) out of the string.
"""
try:
return sorted(aniso8601.parse_interval(value))
except ValueError:
try:
return aniso8601.parse_datetime(value), None
except ValueError:
return aniso8601.parse_date(value), None
def iso8601interval(value, argument='argument'):
"""Parses ISO 8601-formatted datetime intervals into tuples of datetimes.
Accepts both a single date(time) or a full interval using either start/end
or start/duration notation, with the following behavior:
- Intervals are defined as inclusive start, exclusive end
- Single datetimes are translated into the interval spanning the
largest resolution not specified in the input value, up to the day.
- The smallest accepted resolution is 1 second.
- All timezones are accepted as values; returned datetimes are
localized to UTC. Naive inputs and date inputs will are assumed UTC.
Examples::
"2013-01-01" -> datetime(2013, 1, 1), datetime(2013, 1, 2)
"2013-01-01T12" -> datetime(2013, 1, 1, 12), datetime(2013, 1, 1, 13)
"2013-01-01/2013-02-28" -> datetime(2013, 1, 1), datetime(2013, 2, 28)
"2013-01-01/P3D" -> datetime(2013, 1, 1), datetime(2013, 1, 4)
"2013-01-01T12:00/PT30M" -> datetime(2013, 1, 1, 12), datetime(2013, 1, 1, 12, 30)
"2013-01-01T06:00/2013-01-01T12:00" -> datetime(2013, 1, 1, 6), datetime(2013, 1, 1, 12)
:param str value: The ISO8601 date time as a string
:return: Two UTC datetimes, the start and the end of the specified interval
:rtype: A tuple (datetime, datetime)
:raises: ValueError, if the interval is invalid.
"""
try:
start, end = _parse_interval(value)
if end is None:
end = _expand_datetime(start, value)
start, end = _normalize_interval(start, end, value)
except ValueError:
raise ValueError(
"Invalid {arg}: {value}. {arg} must be a valid ISO8601 "
"date/time interval.".format(arg=argument, value=value),
)
return start, end
def date(value):
"""Parse a valid looking date in the format YYYY-mm-dd"""
date = datetime.strptime(value, "%Y-%m-%d")
return date
def _get_integer(value):
try:
return int(value)
except (TypeError, ValueError):
raise ValueError('{0} is not a valid integer'.format(value))
def natural(value, argument='argument'):
""" Restrict input type to the natural numbers (0, 1, 2, 3...) """
value = _get_integer(value)
if value < 0:
error = ('Invalid {arg}: {value}. {arg} must be a non-negative '
'integer'.format(arg=argument, value=value))
raise ValueError(error)
return value
def positive(value, argument='argument'):
""" Restrict input type to the positive integers (1, 2, 3...) """
value = _get_integer(value)
if value < 1:
error = ('Invalid {arg}: {value}. {arg} must be a positive '
'integer'.format(arg=argument, value=value))
raise ValueError(error)
return value
class int_range(object):
""" Restrict input to an integer in a range (inclusive) """
def __init__(self, low, high, argument='argument'):
self.low = low
self.high = high
self.argument = argument
def __call__(self, value):
value = _get_integer(value)
if value < self.low or value > self.high:
error = ('Invalid {arg}: {val}. {arg} must be within the range {lo} - {hi}'
.format(arg=self.argument, val=value, lo=self.low, hi=self.high))
raise ValueError(error)
return value
def boolean(value):
"""Parse the string ``"true"`` or ``"false"`` as a boolean (case
insensitive). Also accepts ``"1"`` and ``"0"`` as ``True``/``False``
(respectively). If the input is from the request JSON body, the type is
already a native python boolean, and will be passed through without
further parsing.
"""
if isinstance(value, bool):
return value
if not value:
raise ValueError("boolean type must be non-null")
value = value.lower()
if value in ('true', '1',):
return True
if value in ('false', '0',):
return False
raise ValueError("Invalid literal for boolean(): {0}".format(value))
def datetime_from_rfc822(datetime_str):
"""Turns an RFC822 formatted date into a datetime object.
Example::
inputs.datetime_from_rfc822("Wed, 02 Oct 2002 08:00:00 EST")
:param datetime_str: The RFC822-complying string to transform
:type datetime_str: str
:return: A datetime
"""
return datetime.fromtimestamp(mktime_tz(parsedate_tz(datetime_str)), pytz.utc)
def datetime_from_iso8601(datetime_str):
"""Turns an ISO8601 formatted date into a datetime object.
Example::
inputs.datetime_from_iso8601("2012-01-01T23:30:00+02:00")
:param datetime_str: The ISO8601-complying string to transform
:type datetime_str: str
:return: A datetime
"""
return aniso8601.parse_datetime(datetime_str)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_restful
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_restful/representations/__init__.py
| 0 |
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_restful
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_restful/representations/json.py
|
from __future__ import absolute_import
from flask import make_response, current_app
from flask_restful.utils import PY3
from json import dumps
def output_json(data, code, headers=None):
"""Makes a Flask response with a JSON encoded body"""
settings = current_app.config.get('RESTFUL_JSON', {})
# If we're in debug mode, and the indent is not set, we set it to a
# reasonable value here. Note that this won't override any existing value
# that was set. We also set the "sort_keys" value.
if current_app.debug:
settings.setdefault('indent', 4)
settings.setdefault('sort_keys', not PY3)
# always end the json dumps with a new line
# see https://github.com/mitsuhiko/flask/pull/1262
dumped = dumps(data, **settings) + "\n"
resp = make_response(dumped, code)
resp.headers.extend(headers or {})
return resp
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_restful
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_restful/utils/cors.py
|
from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
def crossdomain(origin=None, methods=None, headers=None, expose_headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True, credentials=False):
"""
http://flask.pocoo.org/snippets/56/
"""
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if expose_headers is not None and not isinstance(expose_headers, str):
expose_headers = ', '.join(x.upper() for x in expose_headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if credentials:
h['Access-Control-Allow-Credentials'] = 'true'
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
if expose_headers is not None:
h['Access-Control-Expose-Headers'] = expose_headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_restful
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_restful/utils/__init__.py
|
import sys
try:
from collections.abc import OrderedDict
except ImportError:
from collections import OrderedDict
from werkzeug.http import HTTP_STATUS_CODES
PY3 = sys.version_info > (3,)
def http_status_message(code):
"""Maps an HTTP status code to the textual status"""
return HTTP_STATUS_CODES.get(code, '')
def unpack(value):
"""Return a three tuple of data, code, and headers"""
if not isinstance(value, tuple):
return value, 200, {}
try:
data, code, headers = value
return data, code, headers
except ValueError:
pass
try:
data, code = value
return data, code, {}
except ValueError:
pass
return value, 200, {}
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_restful
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_restful/utils/crypto.py
|
import pickle
from Crypto.Cipher import AES
from base64 import b64encode, b64decode
__all__ = "encrypt", "decrypt"
BLOCK_SIZE = 16
INTERRUPT = b'\0' # something impossible to put in a string
PADDING = b'\1'
def pad(data):
return data + INTERRUPT + PADDING * (BLOCK_SIZE - (len(data) + 1) % BLOCK_SIZE)
def strip(data):
return data.rstrip(PADDING).rstrip(INTERRUPT)
def create_cipher(key, seed):
if len(seed) != 16:
raise ValueError("Choose a seed of 16 bytes")
if len(key) != 32:
raise ValueError("Choose a key of 32 bytes")
return AES.new(key, AES.MODE_CBC, seed)
def encrypt(plaintext_data, key, seed):
plaintext_data = pickle.dumps(plaintext_data, pickle.HIGHEST_PROTOCOL) # whatever you give me I need to be able to restitute it
return b64encode(create_cipher(key, seed).encrypt(pad(plaintext_data)))
def decrypt(encrypted_data, key, seed):
return pickle.loads(strip(create_cipher(key, seed).decrypt(b64decode(encrypted_data))))
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/click-7.1.2.dist-info/RECORD
|
click-7.1.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
click-7.1.2.dist-info/LICENSE.rst,sha256=morRBqOU6FO_4h9C9OctWSgZoigF2ZG18ydQKSkrZY0,1475
click-7.1.2.dist-info/METADATA,sha256=LrRgakZKV7Yg3qJqX_plu2WhFW81MzP3EqQmZhHIO8M,2868
click-7.1.2.dist-info/RECORD,,
click-7.1.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
click-7.1.2.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
click-7.1.2.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6
click/__init__.py,sha256=FkyGDQ-cbiQxP_lxgUspyFYS48f2S_pTcfKPz-d_RMo,2463
click/__pycache__/__init__.cpython-39.pyc,,
click/__pycache__/_bashcomplete.cpython-39.pyc,,
click/__pycache__/_compat.cpython-39.pyc,,
click/__pycache__/_termui_impl.cpython-39.pyc,,
click/__pycache__/_textwrap.cpython-39.pyc,,
click/__pycache__/_unicodefun.cpython-39.pyc,,
click/__pycache__/_winconsole.cpython-39.pyc,,
click/__pycache__/core.cpython-39.pyc,,
click/__pycache__/decorators.cpython-39.pyc,,
click/__pycache__/exceptions.cpython-39.pyc,,
click/__pycache__/formatting.cpython-39.pyc,,
click/__pycache__/globals.cpython-39.pyc,,
click/__pycache__/parser.cpython-39.pyc,,
click/__pycache__/termui.cpython-39.pyc,,
click/__pycache__/testing.cpython-39.pyc,,
click/__pycache__/types.cpython-39.pyc,,
click/__pycache__/utils.cpython-39.pyc,,
click/_bashcomplete.py,sha256=9J98IHQYmCAr2Jup6TDshUr5FJEen-AoQCZR0K5nKxQ,12309
click/_compat.py,sha256=AoMaYnZ-3pwtNXuHtlb6_UXsayoG0QZiHKIRy2VFezc,24169
click/_termui_impl.py,sha256=yNktUMAdjYOU1HMkq915jR3zgAzUNtGSQqSTSSMn3eQ,20702
click/_textwrap.py,sha256=ajCzkzFly5tjm9foQ5N9_MOeaYJMBjAltuFa69n4iXY,1197
click/_unicodefun.py,sha256=apLSNEBZgUsQNPMUv072zJ1swqnm0dYVT5TqcIWTt6w,4201
click/_winconsole.py,sha256=6YDu6Rq1Wxx4w9uinBMK2LHvP83aerZM9GQurlk3QDo,10010
click/core.py,sha256=V6DJzastGhrC6WTDwV9MSLwcJUdX2Uf1ypmgkjBdn_Y,77650
click/decorators.py,sha256=3TvEO_BkaHl7k6Eh1G5eC7JK4LKPdpFqH9JP0QDyTlM,11215
click/exceptions.py,sha256=3pQAyyMFzx5A3eV0Y27WtDTyGogZRbrC6_o5DjjKBbw,8118
click/formatting.py,sha256=Wb4gqFEpWaKPgAbOvnkCl8p-bEZx5KpM5ZSByhlnJNk,9281
click/globals.py,sha256=ht7u2kUGI08pAarB4e4yC8Lkkxy6gJfRZyzxEj8EbWQ,1501
click/parser.py,sha256=mFK-k58JtPpqO0AC36WAr0t5UfzEw1mvgVSyn7WCe9M,15691
click/termui.py,sha256=G7QBEKIepRIGLvNdGwBTYiEtSImRxvTO_AglVpyHH2s,23998
click/testing.py,sha256=EUEsDUqNXFgCLhZ0ZFOROpaVDA5I_rijwnNPE6qICgA,12854
click/types.py,sha256=wuubik4VqgqAw5dvbYFkDt-zSAx97y9TQXuXcVaRyQA,25045
click/utils.py,sha256=4VEcJ7iEHwjnFuzEuRtkT99o5VG3zqSD7Q2CVzv13WU,15940
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/click-7.1.2.dist-info/WHEEL
|
Wheel-Version: 1.0
Generator: bdist_wheel (0.34.2)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/click-7.1.2.dist-info/LICENSE.rst
|
Copyright 2014 Pallets
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.