repo_id
stringclasses 208
values | file_path
stringlengths 31
190
| content
stringlengths 1
2.65M
| __index_level_0__
int64 0
0
|
---|---|---|---|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/hstore.py
|
# postgresql/hstore.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
from .array import ARRAY
from ... import types as sqltypes
from ... import util
from ...sql import functions as sqlfunc
from ...sql import operators
__all__ = ("HSTORE", "hstore")
idx_precedence = operators._PRECEDENCE[operators.json_getitem_op]
GETITEM = operators.custom_op(
"->",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
HAS_KEY = operators.custom_op(
"?",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
HAS_ALL = operators.custom_op(
"?&",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
HAS_ANY = operators.custom_op(
"?|",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
CONTAINS = operators.custom_op(
"@>",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
CONTAINED_BY = operators.custom_op(
"<@",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
"""Represent the PostgreSQL HSTORE type.
The :class:`.HSTORE` type stores dictionaries containing strings, e.g.::
data_table = Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', HSTORE)
)
with engine.connect() as conn:
conn.execute(
data_table.insert(),
data = {"key1": "value1", "key2": "value2"}
)
:class:`.HSTORE` provides for a wide range of operations, including:
* Index operations::
data_table.c.data['some key'] == 'some value'
* Containment operations::
data_table.c.data.has_key('some key')
data_table.c.data.has_all(['one', 'two', 'three'])
* Concatenation::
data_table.c.data + {"k1": "v1"}
For a full list of special methods see
:class:`.HSTORE.comparator_factory`.
For usage with the SQLAlchemy ORM, it may be desirable to combine
the usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary
now part of the :mod:`sqlalchemy.ext.mutable`
extension. This extension will allow "in-place" changes to the
dictionary, e.g. addition of new keys or replacement/removal of existing
keys to/from the current dictionary, to produce events which will be
detected by the unit of work::
from sqlalchemy.ext.mutable import MutableDict
class MyClass(Base):
__tablename__ = 'data_table'
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(HSTORE))
my_object = session.query(MyClass).one()
# in-place mutation, requires Mutable extension
# in order for the ORM to detect
my_object.data['some_key'] = 'some value'
session.commit()
When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM
will not be alerted to any changes to the contents of an existing
dictionary, unless that dictionary value is re-assigned to the
HSTORE-attribute itself, thus generating a change event.
.. seealso::
:class:`.hstore` - render the PostgreSQL ``hstore()`` function.
"""
__visit_name__ = "HSTORE"
hashable = False
text_type = sqltypes.Text()
def __init__(self, text_type=None):
"""Construct a new :class:`.HSTORE`.
:param text_type: the type that should be used for indexed values.
Defaults to :class:`_types.Text`.
.. versionadded:: 1.1.0
"""
if text_type is not None:
self.text_type = text_type
class Comparator(
sqltypes.Indexable.Comparator, sqltypes.Concatenable.Comparator
):
"""Define comparison operations for :class:`.HSTORE`."""
def has_key(self, other):
"""Boolean expression. Test for presence of a key. Note that the
key may be a SQLA expression.
"""
return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
def has_all(self, other):
"""Boolean expression. Test for presence of all keys in jsonb
"""
return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
def has_any(self, other):
"""Boolean expression. Test for presence of any key in jsonb
"""
return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
def contains(self, other, **kwargs):
"""Boolean expression. Test if keys (or array) are a superset
of/contained the keys of the argument jsonb expression.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if keys are a proper subset of the
keys of the argument jsonb expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean
)
def _setup_getitem(self, index):
return GETITEM, index, self.type.text_type
def defined(self, key):
"""Boolean expression. Test for presence of a non-NULL value for
the key. Note that the key may be a SQLA expression.
"""
return _HStoreDefinedFunction(self.expr, key)
def delete(self, key):
"""HStore expression. Returns the contents of this hstore with the
given key deleted. Note that the key may be a SQLA expression.
"""
if isinstance(key, dict):
key = _serialize_hstore(key)
return _HStoreDeleteFunction(self.expr, key)
def slice(self, array):
"""HStore expression. Returns a subset of an hstore defined by
array of keys.
"""
return _HStoreSliceFunction(self.expr, array)
def keys(self):
"""Text array expression. Returns array of keys."""
return _HStoreKeysFunction(self.expr)
def vals(self):
"""Text array expression. Returns array of values."""
return _HStoreValsFunction(self.expr)
def array(self):
"""Text array expression. Returns array of alternating keys and
values.
"""
return _HStoreArrayFunction(self.expr)
def matrix(self):
"""Text array expression. Returns array of [key, value] pairs."""
return _HStoreMatrixFunction(self.expr)
comparator_factory = Comparator
def bind_processor(self, dialect):
if util.py2k:
encoding = dialect.encoding
def process(value):
if isinstance(value, dict):
return _serialize_hstore(value).encode(encoding)
else:
return value
else:
def process(value):
if isinstance(value, dict):
return _serialize_hstore(value)
else:
return value
return process
def result_processor(self, dialect, coltype):
if util.py2k:
encoding = dialect.encoding
def process(value):
if value is not None:
return _parse_hstore(value.decode(encoding))
else:
return value
else:
def process(value):
if value is not None:
return _parse_hstore(value)
else:
return value
return process
class hstore(sqlfunc.GenericFunction):
"""Construct an hstore value within a SQL expression using the
PostgreSQL ``hstore()`` function.
The :class:`.hstore` function accepts one or two arguments as described
in the PostgreSQL documentation.
E.g.::
from sqlalchemy.dialects.postgresql import array, hstore
select([hstore('key1', 'value1')])
select([
hstore(
array(['key1', 'key2', 'key3']),
array(['value1', 'value2', 'value3'])
)
])
.. seealso::
:class:`.HSTORE` - the PostgreSQL ``HSTORE`` datatype.
"""
type = HSTORE
name = "hstore"
class _HStoreDefinedFunction(sqlfunc.GenericFunction):
type = sqltypes.Boolean
name = "defined"
class _HStoreDeleteFunction(sqlfunc.GenericFunction):
type = HSTORE
name = "delete"
class _HStoreSliceFunction(sqlfunc.GenericFunction):
type = HSTORE
name = "slice"
class _HStoreKeysFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = "akeys"
class _HStoreValsFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = "avals"
class _HStoreArrayFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = "hstore_to_array"
class _HStoreMatrixFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = "hstore_to_matrix"
#
# parsing. note that none of this is used with the psycopg2 backend,
# which provides its own native extensions.
#
# My best guess at the parsing rules of hstore literals, since no formal
# grammar is given. This is mostly reverse engineered from PG's input parser
# behavior.
HSTORE_PAIR_RE = re.compile(
r"""
(
"(?P<key> (\\ . | [^"])* )" # Quoted key
)
[ ]* => [ ]* # Pair operator, optional adjoining whitespace
(
(?P<value_null> NULL ) # NULL value
| "(?P<value> (\\ . | [^"])* )" # Quoted value
)
""",
re.VERBOSE,
)
HSTORE_DELIMITER_RE = re.compile(
r"""
[ ]* , [ ]*
""",
re.VERBOSE,
)
def _parse_error(hstore_str, pos):
"""format an unmarshalling error."""
ctx = 20
hslen = len(hstore_str)
parsed_tail = hstore_str[max(pos - ctx - 1, 0) : min(pos, hslen)]
residual = hstore_str[min(pos, hslen) : min(pos + ctx + 1, hslen)]
if len(parsed_tail) > ctx:
parsed_tail = "[...]" + parsed_tail[1:]
if len(residual) > ctx:
residual = residual[:-1] + "[...]"
return "After %r, could not parse residual at position %d: %r" % (
parsed_tail,
pos,
residual,
)
def _parse_hstore(hstore_str):
"""Parse an hstore from its literal string representation.
Attempts to approximate PG's hstore input parsing rules as closely as
possible. Although currently this is not strictly necessary, since the
current implementation of hstore's output syntax is stricter than what it
accepts as input, the documentation makes no guarantees that will always
be the case.
"""
result = {}
pos = 0
pair_match = HSTORE_PAIR_RE.match(hstore_str)
while pair_match is not None:
key = pair_match.group("key").replace(r"\"", '"').replace("\\\\", "\\")
if pair_match.group("value_null"):
value = None
else:
value = (
pair_match.group("value")
.replace(r"\"", '"')
.replace("\\\\", "\\")
)
result[key] = value
pos += pair_match.end()
delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:])
if delim_match is not None:
pos += delim_match.end()
pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:])
if pos != len(hstore_str):
raise ValueError(_parse_error(hstore_str, pos))
return result
def _serialize_hstore(val):
"""Serialize a dictionary into an hstore literal. Keys and values must
both be strings (except None for values).
"""
def esc(s, position):
if position == "value" and s is None:
return "NULL"
elif isinstance(s, util.string_types):
return '"%s"' % s.replace("\\", "\\\\").replace('"', r"\"")
else:
raise ValueError(
"%r in %s position is not a string." % (s, position)
)
return ", ".join(
"%s=>%s" % (esc(k, "key"), esc(v, "value")) for k, v in val.items()
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/oracle/zxjdbc.py
|
# oracle/zxjdbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: oracle+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: oracle+zxjdbc://user:pass@host/dbname
:driverurl: http://www.oracle.com/technetwork/database/features/jdbc/index-091264.html
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
""" # noqa
import collections
import decimal
import re
from .base import OracleCompiler
from .base import OracleDialect
from .base import OracleExecutionContext
from ... import sql
from ... import types as sqltypes
from ... import util
from ...connectors.zxJDBC import ZxJDBCConnector
from ...engine import result as _result
from ...sql import expression
SQLException = zxJDBC = None
class _ZxJDBCDate(sqltypes.Date):
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
else:
return value.date()
return process
class _ZxJDBCNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
# XXX: does the dialect return Decimal or not???
# if it does (in all cases), we could use a None processor as well as
# the to_float generic processor
if self.asdecimal:
def process(value):
if isinstance(value, decimal.Decimal):
return value
else:
return decimal.Decimal(str(value))
else:
def process(value):
if isinstance(value, decimal.Decimal):
return float(value)
else:
return value
return process
class OracleCompiler_zxjdbc(OracleCompiler):
def returning_clause(self, stmt, returning_cols):
self.returning_cols = list(
expression._select_iterables(returning_cols)
)
# within_columns_clause=False so that labels (foo AS bar) don't render
columns = [
self.process(c, within_columns_clause=False)
for c in self.returning_cols
]
if not hasattr(self, "returning_parameters"):
self.returning_parameters = []
binds = []
for i, col in enumerate(self.returning_cols):
dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(
self.dialect.dbapi
)
self.returning_parameters.append((i + 1, dbtype))
bindparam = sql.bindparam(
"ret_%d" % i, value=ReturningParam(dbtype)
)
self.binds[bindparam.key] = bindparam
binds.append(
self.bindparam_string(self._truncate_bindparam(bindparam))
)
return "RETURNING " + ", ".join(columns) + " INTO " + ", ".join(binds)
class OracleExecutionContext_zxjdbc(OracleExecutionContext):
def pre_exec(self):
if hasattr(self.compiled, "returning_parameters"):
# prepare a zxJDBC statement so we can grab its underlying
# OraclePreparedStatement's getReturnResultSet later
self.statement = self.cursor.prepare(self.statement)
def get_result_proxy(self):
if hasattr(self.compiled, "returning_parameters"):
rrs = None
try:
try:
rrs = self.statement.__statement__.getReturnResultSet()
next(rrs)
except SQLException as sqle:
msg = "%s [SQLCode: %d]" % (
sqle.getMessage(),
sqle.getErrorCode(),
)
if sqle.getSQLState() is not None:
msg += " [SQLState: %s]" % sqle.getSQLState()
raise zxJDBC.Error(msg)
else:
row = tuple(
self.cursor.datahandler.getPyObject(rrs, index, dbtype)
for index, dbtype in self.compiled.returning_parameters
)
return ReturningResultProxy(self, row)
finally:
if rrs is not None:
try:
rrs.close()
except SQLException:
pass
self.statement.close()
return _result.ResultProxy(self)
def create_cursor(self):
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class ReturningResultProxy(_result.FullyBufferedResultProxy):
"""ResultProxy backed by the RETURNING ResultSet results."""
def __init__(self, context, returning_row):
self._returning_row = returning_row
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
ret = []
for c in self.context.compiled.returning_cols:
if hasattr(c, "name"):
ret.append((c.name, c.type))
else:
ret.append((c.anon_label, c.type))
return ret
def _buffer_rows(self):
return collections.deque([self._returning_row])
class ReturningParam(object):
"""A bindparam value representing a RETURNING parameter.
Specially handled by OracleReturningDataHandler.
"""
def __init__(self, type_):
self.type = type_
def __eq__(self, other):
if isinstance(other, ReturningParam):
return self.type == other.type
return NotImplemented
def __ne__(self, other):
if isinstance(other, ReturningParam):
return self.type != other.type
return NotImplemented
def __repr__(self):
kls = self.__class__
return "<%s.%s object at 0x%x type=%s>" % (
kls.__module__,
kls.__name__,
id(self),
self.type,
)
class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
jdbc_db_name = "oracle"
jdbc_driver_name = "oracle.jdbc.OracleDriver"
statement_compiler = OracleCompiler_zxjdbc
execution_ctx_cls = OracleExecutionContext_zxjdbc
colspecs = util.update_copy(
OracleDialect.colspecs,
{sqltypes.Date: _ZxJDBCDate, sqltypes.Numeric: _ZxJDBCNumeric},
)
def __init__(self, *args, **kwargs):
super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs)
global SQLException, zxJDBC
from java.sql import SQLException
from com.ziclix.python.sql import zxJDBC
from com.ziclix.python.sql.handler import OracleDataHandler
class OracleReturningDataHandler(OracleDataHandler):
"""zxJDBC DataHandler that specially handles ReturningParam."""
def setJDBCObject(self, statement, index, object_, dbtype=None):
if type(object_) is ReturningParam:
statement.registerReturnParameter(index, object_.type)
elif dbtype is None:
OracleDataHandler.setJDBCObject(
self, statement, index, object_
)
else:
OracleDataHandler.setJDBCObject(
self, statement, index, object_, dbtype
)
self.DataHandler = OracleReturningDataHandler
def initialize(self, connection):
super(OracleDialect_zxjdbc, self).initialize(connection)
self.implicit_returning = connection.connection.driverversion >= "10.2"
def _create_jdbc_url(self, url):
return "jdbc:oracle:thin:@%s:%s:%s" % (
url.host,
url.port or 1521,
url.database,
)
def _get_server_version_info(self, connection):
version = re.search(
r"Release ([\d\.]+)", connection.connection.dbversion
).group(1)
return tuple(int(x) for x in version.split("."))
dialect = OracleDialect_zxjdbc
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/oracle/__init__.py
|
# oracle/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base # noqa
from . import cx_oracle # noqa
from . import zxjdbc # noqa
from .base import BFILE
from .base import BINARY_DOUBLE
from .base import BINARY_FLOAT
from .base import BLOB
from .base import CHAR
from .base import CLOB
from .base import DATE
from .base import DOUBLE_PRECISION
from .base import FLOAT
from .base import INTERVAL
from .base import LONG
from .base import NCHAR
from .base import NCLOB
from .base import NUMBER
from .base import NVARCHAR
from .base import NVARCHAR2
from .base import RAW
from .base import ROWID
from .base import TIMESTAMP
from .base import VARCHAR
from .base import VARCHAR2
base.dialect = dialect = cx_oracle.dialect
__all__ = (
"VARCHAR",
"NVARCHAR",
"CHAR",
"NCHAR",
"DATE",
"NUMBER",
"BLOB",
"BFILE",
"CLOB",
"NCLOB",
"TIMESTAMP",
"RAW",
"FLOAT",
"DOUBLE_PRECISION",
"BINARY_DOUBLE",
"BINARY_FLOAT",
"LONG",
"dialect",
"INTERVAL",
"VARCHAR2",
"NVARCHAR2",
"ROWID",
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/oracle/provision.py
|
from ... import create_engine
from ... import exc
from ...engine import url as sa_url
from ...testing.provision import configure_follower
from ...testing.provision import create_db
from ...testing.provision import drop_db
from ...testing.provision import follower_url_from_main
from ...testing.provision import log
from ...testing.provision import run_reap_dbs
from ...testing.provision import temp_table_keyword_args
from ...testing.provision import update_db_opts
@create_db.for_db("oracle")
def _oracle_create_db(cfg, eng, ident):
# NOTE: make sure you've run "ALTER DATABASE default tablespace users" or
# similar, so that the default tablespace is not "system"; reflection will
# fail otherwise
with eng.connect() as conn:
conn.execute("create user %s identified by xe" % ident)
conn.execute("create user %s_ts1 identified by xe" % ident)
conn.execute("create user %s_ts2 identified by xe" % ident)
conn.execute("grant dba to %s" % (ident,))
conn.execute("grant unlimited tablespace to %s" % ident)
conn.execute("grant unlimited tablespace to %s_ts1" % ident)
conn.execute("grant unlimited tablespace to %s_ts2" % ident)
@configure_follower.for_db("oracle")
def _oracle_configure_follower(config, ident):
config.test_schema = "%s_ts1" % ident
config.test_schema_2 = "%s_ts2" % ident
def _ora_drop_ignore(conn, dbname):
try:
conn.execute("drop user %s cascade" % dbname)
log.info("Reaped db: %s", dbname)
return True
except exc.DatabaseError as err:
log.warning("couldn't drop db: %s", err)
return False
@drop_db.for_db("oracle")
def _oracle_drop_db(cfg, eng, ident):
with eng.connect() as conn:
# cx_Oracle seems to occasionally leak open connections when a large
# suite it run, even if we confirm we have zero references to
# connection objects.
# while there is a "kill session" command in Oracle,
# it unfortunately does not release the connection sufficiently.
_ora_drop_ignore(conn, ident)
_ora_drop_ignore(conn, "%s_ts1" % ident)
_ora_drop_ignore(conn, "%s_ts2" % ident)
@update_db_opts.for_db("oracle")
def _oracle_update_db_opts(db_url, db_opts):
pass
@run_reap_dbs.for_db("oracle")
def _reap_oracle_dbs(url, idents):
log.info("db reaper connecting to %r", url)
eng = create_engine(url)
with eng.connect() as conn:
log.info("identifiers in file: %s", ", ".join(idents))
to_reap = conn.execute(
"select u.username from all_users u where username "
"like 'TEST_%' and not exists (select username "
"from v$session where username=u.username)"
)
all_names = {username.lower() for (username,) in to_reap}
to_drop = set()
for name in all_names:
if name.endswith("_ts1") or name.endswith("_ts2"):
continue
elif name in idents:
to_drop.add(name)
if "%s_ts1" % name in all_names:
to_drop.add("%s_ts1" % name)
if "%s_ts2" % name in all_names:
to_drop.add("%s_ts2" % name)
dropped = total = 0
for total, username in enumerate(to_drop, 1):
if _ora_drop_ignore(conn, username):
dropped += 1
log.info(
"Dropped %d out of %d stale databases detected", dropped, total
)
@follower_url_from_main.for_db("oracle")
def _oracle_follower_url_from_main(url, ident):
url = sa_url.make_url(url)
url.username = ident
url.password = "xe"
return url
@temp_table_keyword_args.for_db("oracle")
def _oracle_temp_table_keyword_args(cfg, eng):
return {
"prefixes": ["GLOBAL TEMPORARY"],
"oracle_on_commit": "PRESERVE ROWS",
}
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/oracle/cx_oracle.py
|
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: oracle+cx_oracle
:name: cx-Oracle
:dbapi: cx_oracle
:connectstring: oracle+cx_oracle://user:pass@host:port/dbname[?key=value&key=value...]
:url: https://oracle.github.io/python-cx_Oracle/
DSN vs. Hostname connections
-----------------------------
The dialect will connect to a DSN if no database name portion is presented,
such as::
engine = create_engine("oracle+cx_oracle://scott:tiger@oracle1120/?encoding=UTF-8&nencoding=UTF-8")
Above, ``oracle1120`` is passed to cx_Oracle as an Oracle datasource name.
Alternatively, if a database name is present, the ``cx_Oracle.makedsn()``
function is used to create an ad-hoc "datasource" name assuming host
and port::
engine = create_engine("oracle+cx_oracle://scott:tiger@hostname:1521/dbname?encoding=UTF-8&nencoding=UTF-8")
Above, the DSN would be created as follows::
>>> import cx_Oracle
>>> cx_Oracle.makedsn("hostname", 1521, sid="dbname")
'(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=hostname)(PORT=1521))(CONNECT_DATA=(SID=dbname)))'
The ``service_name`` parameter, also consumed by ``cx_Oracle.makedsn()``, may
be specified in the URL query string, e.g. ``?service_name=my_service``.
Passing cx_Oracle connect arguments
-----------------------------------
Additional connection arguments can usually be passed via the URL
query string; particular symbols like ``cx_Oracle.SYSDBA`` are intercepted
and converted to the correct symbol::
e = create_engine(
"oracle+cx_oracle://user:pass@dsn?encoding=UTF-8&nencoding=UTF-8&mode=SYSDBA&events=true")
.. versionchanged:: 1.3 the cx_oracle dialect now accepts all argument names
within the URL string itself, to be passed to the cx_Oracle DBAPI. As
was the case earlier but not correctly documented, the
:paramref:`_sa.create_engine.connect_args` parameter also accepts all
cx_Oracle DBAPI connect arguments.
To pass arguments directly to ``.connect()`` wihtout using the query
string, use the :paramref:`_sa.create_engine.connect_args` dictionary.
Any cx_Oracle parameter value and/or constant may be passed, such as::
import cx_Oracle
e = create_engine(
"oracle+cx_oracle://user:pass@dsn",
connect_args={
"encoding": "UTF-8",
"nencoding": "UTF-8",
"mode": cx_Oracle.SYSDBA,
"events": True
}
)
Options consumed by the SQLAlchemy cx_Oracle dialect outside of the driver
--------------------------------------------------------------------------
There are also options that are consumed by the SQLAlchemy cx_oracle dialect
itself. These options are always passed directly to :func:`_sa.create_engine`
, such as::
e = create_engine(
"oracle+cx_oracle://user:pass@dsn", coerce_to_unicode=False)
The parameters accepted by the cx_oracle dialect are as follows:
* ``arraysize`` - set the cx_oracle.arraysize value on cursors, defaulted
to 50. This setting is significant with cx_Oracle as the contents of LOB
objects are only readable within a "live" row (e.g. within a batch of
50 rows).
* ``auto_convert_lobs`` - defaults to True; See :ref:`cx_oracle_lob`.
* ``coerce_to_unicode`` - see :ref:`cx_oracle_unicode` for detail.
* ``coerce_to_decimal`` - see :ref:`cx_oracle_numeric` for detail.
* ``encoding_errors`` - see :ref:`cx_oracle_unicode_encoding_errors` for detail.
.. _cx_oracle_unicode:
Unicode
-------
As is the case for all DBAPIs under Python 3, all strings are inherently
Unicode strings. Under Python 2, cx_Oracle also supports Python Unicode
objects directly. In all cases however, the driver requires an explcit
encoding configuration.
Ensuring the Correct Client Encoding
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The long accepted standard for establishing client encoding for nearly all
Oracle related software is via the `NLS_LANG <https://www.oracle.com/database/technologies/faq-nls-lang.html>`_
environment variable. cx_Oracle like most other Oracle drivers will use
this environment variable as the source of its encoding configuration. The
format of this variable is idiosyncratic; a typical value would be
``AMERICAN_AMERICA.AL32UTF8``.
The cx_Oracle driver also supports a programmatic alternative which is to
pass the ``encoding`` and ``nencoding`` parameters directly to its
``.connect()`` function. These can be present in the URL as follows::
engine = create_engine("oracle+cx_oracle://scott:tiger@oracle1120/?encoding=UTF-8&nencoding=UTF-8")
For the meaning of the ``encoding`` and ``nencoding`` parameters, please
consult
`Characters Sets and National Language Support (NLS) <https://cx-oracle.readthedocs.io/en/latest/user_guide/globalization.html#globalization>`_.
.. seealso::
`Characters Sets and National Language Support (NLS) <https://cx-oracle.readthedocs.io/en/latest/user_guide/globalization.html#globalization>`_
- in the cx_Oracle documentation.
Unicode-specific Column datatypes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The Core expression language handles unicode data by use of the :class:`.Unicode`
and :class:`.UnicodeText`
datatypes. These types correspond to the VARCHAR2 and CLOB Oracle datatypes by
default. When using these datatypes with Unicode data, it is expected that
the Oracle database is configured with a Unicode-aware character set, as well
as that the ``NLS_LANG`` environment variable is set appropriately, so that
the VARCHAR2 and CLOB datatypes can accommodate the data.
In the case that the Oracle database is not configured with a Unicode character
set, the two options are to use the :class:`_types.NCHAR` and
:class:`_oracle.NCLOB` datatypes explicitly, or to pass the flag
``use_nchar_for_unicode=True`` to :func:`_sa.create_engine`,
which will cause the
SQLAlchemy dialect to use NCHAR/NCLOB for the :class:`.Unicode` /
:class:`.UnicodeText` datatypes instead of VARCHAR/CLOB.
.. versionchanged:: 1.3 The :class:`.Unicode` and :class:`.UnicodeText`
datatypes now correspond to the ``VARCHAR2`` and ``CLOB`` Oracle datatypes
unless the ``use_nchar_for_unicode=True`` is passed to the dialect
when :func:`_sa.create_engine` is called.
Unicode Coercion of result rows under Python 2
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When result sets are fetched that include strings, under Python 3 the cx_Oracle
DBAPI returns all strings as Python Unicode objects, since Python 3 only has a
Unicode string type. This occurs for data fetched from datatypes such as
VARCHAR2, CHAR, CLOB, NCHAR, NCLOB, etc. In order to provide cross-
compatibility under Python 2, the SQLAlchemy cx_Oracle dialect will add
Unicode-conversion to string data under Python 2 as well. Historically, this
made use of converters that were supplied by cx_Oracle but were found to be
non-performant; SQLAlchemy's own converters are used for the string to Unicode
conversion under Python 2. To disable the Python 2 Unicode conversion for
VARCHAR2, CHAR, and CLOB, the flag ``coerce_to_unicode=False`` can be passed to
:func:`_sa.create_engine`.
.. versionchanged:: 1.3 Unicode conversion is applied to all string values
by default under python 2. The ``coerce_to_unicode`` now defaults to True
and can be set to False to disable the Unicode coercion of strings that are
delivered as VARCHAR2/CHAR/CLOB data.
.. _cx_oracle_unicode_encoding_errors:
Encoding Errors
^^^^^^^^^^^^^^^
For the unusual case that data in the Oracle database is present with a broken
encoding, the dialect accepts a parameter ``encoding_errors`` which will be
passed to Unicode decoding functions in order to affect how decoding errors are
handled. The value is ultimately consumed by the Python `decode
<https://docs.python.org/3/library/stdtypes.html#bytes.decode>`_ function, and
is passed both via cx_Oracle's ``encodingErrors`` parameter consumed by
``Cursor.var()``, as well as SQLAlchemy's own decoding function, as the
cx_Oracle dialect makes use of both under different circumstances.
.. versionadded:: 1.3.11
.. _cx_oracle_setinputsizes:
Fine grained control over cx_Oracle data binding performance with setinputsizes
-------------------------------------------------------------------------------
The cx_Oracle DBAPI has a deep and fundamental reliance upon the usage of the
DBAPI ``setinputsizes()`` call. The purpose of this call is to establish the
datatypes that are bound to a SQL statement for Python values being passed as
parameters. While virtually no other DBAPI assigns any use to the
``setinputsizes()`` call, the cx_Oracle DBAPI relies upon it heavily in its
interactions with the Oracle client interface, and in some scenarios it is not
possible for SQLAlchemy to know exactly how data should be bound, as some
settings can cause profoundly different performance characteristics, while
altering the type coercion behavior at the same time.
Users of the cx_Oracle dialect are **strongly encouraged** to read through
cx_Oracle's list of built-in datatype symbols at
http://cx-oracle.readthedocs.io/en/latest/module.html#types.
Note that in some cases, significant performance degradation can occur when
using these types vs. not, in particular when specifying ``cx_Oracle.CLOB``.
On the SQLAlchemy side, the :meth:`.DialectEvents.do_setinputsizes` event can
be used both for runtime visibility (e.g. logging) of the setinputsizes step as
well as to fully control how ``setinputsizes()`` is used on a per-statement
basis.
.. versionadded:: 1.2.9 Added :meth:`.DialectEvents.setinputsizes`
Example 1 - logging all setinputsizes calls
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following example illustrates how to log the intermediary values from a
SQLAlchemy perspective before they are converted to the raw ``setinputsizes()``
parameter dictionary. The keys of the dictionary are :class:`.BindParameter`
objects which have a ``.key`` and a ``.type`` attribute::
from sqlalchemy import create_engine, event
engine = create_engine("oracle+cx_oracle://scott:tiger@host/xe")
@event.listens_for(engine, "do_setinputsizes")
def _log_setinputsizes(inputsizes, cursor, statement, parameters, context):
for bindparam, dbapitype in inputsizes.items():
log.info(
"Bound parameter name: %s SQLAlchemy type: %r "
"DBAPI object: %s",
bindparam.key, bindparam.type, dbapitype)
Example 2 - remove all bindings to CLOB
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``CLOB`` datatype in cx_Oracle incurs a significant performance overhead,
however is set by default for the ``Text`` type within the SQLAlchemy 1.2
series. This setting can be modified as follows::
from sqlalchemy import create_engine, event
from cx_Oracle import CLOB
engine = create_engine("oracle+cx_oracle://scott:tiger@host/xe")
@event.listens_for(engine, "do_setinputsizes")
def _remove_clob(inputsizes, cursor, statement, parameters, context):
for bindparam, dbapitype in list(inputsizes.items()):
if dbapitype is CLOB:
del inputsizes[bindparam]
.. _cx_oracle_returning:
RETURNING Support
-----------------
The cx_Oracle dialect implements RETURNING using OUT parameters.
The dialect supports RETURNING fully, however cx_Oracle 6 is recommended
for complete support.
.. _cx_oracle_lob:
LOB Objects
-----------
cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy
converts these to strings so that the interface of the Binary type is
consistent with that of other backends, which takes place within a cx_Oracle
outputtypehandler.
cx_Oracle prior to version 6 would require that LOB objects be read before
a new batch of rows would be read, as determined by the ``cursor.arraysize``.
As of the 6 series, this limitation has been lifted. Nevertheless, because
SQLAlchemy pre-reads these LOBs up front, this issue is avoided in any case.
To disable the auto "read()" feature of the dialect, the flag
``auto_convert_lobs=False`` may be passed to :func:`_sa.create_engine`. Under
the cx_Oracle 5 series, having this flag turned off means there is the chance
of reading from a stale LOB object if not read as it is fetched. With
cx_Oracle 6, this issue is resolved.
.. versionchanged:: 1.2 the LOB handling system has been greatly simplified
internally to make use of outputtypehandlers, and no longer makes use
of alternate "buffered" result set objects.
Two Phase Transactions Not Supported
-------------------------------------
Two phase transactions are **not supported** under cx_Oracle due to poor
driver support. As of cx_Oracle 6.0b1, the interface for
two phase transactions has been changed to be more of a direct pass-through
to the underlying OCI layer with less automation. The additional logic
to support this system is not implemented in SQLAlchemy.
.. _cx_oracle_numeric:
Precision Numerics
------------------
SQLAlchemy's numeric types can handle receiving and returning values as Python
``Decimal`` objects or float objects. When a :class:`.Numeric` object, or a
subclass such as :class:`.Float`, :class:`_oracle.DOUBLE_PRECISION` etc. is in
use, the :paramref:`.Numeric.asdecimal` flag determines if values should be
coerced to ``Decimal`` upon return, or returned as float objects. To make
matters more complicated under Oracle, Oracle's ``NUMBER`` type can also
represent integer values if the "scale" is zero, so the Oracle-specific
:class:`_oracle.NUMBER` type takes this into account as well.
The cx_Oracle dialect makes extensive use of connection- and cursor-level
"outputtypehandler" callables in order to coerce numeric values as requested.
These callables are specific to the specific flavor of :class:`.Numeric` in
use, as well as if no SQLAlchemy typing objects are present. There are
observed scenarios where Oracle may sends incomplete or ambiguous information
about the numeric types being returned, such as a query where the numeric types
are buried under multiple levels of subquery. The type handlers do their best
to make the right decision in all cases, deferring to the underlying cx_Oracle
DBAPI for all those cases where the driver can make the best decision.
When no typing objects are present, as when executing plain SQL strings, a
default "outputtypehandler" is present which will generally return numeric
values which specify precision and scale as Python ``Decimal`` objects. To
disable this coercion to decimal for performance reasons, pass the flag
``coerce_to_decimal=False`` to :func:`_sa.create_engine`::
engine = create_engine("oracle+cx_oracle://dsn", coerce_to_decimal=False)
The ``coerce_to_decimal`` flag only impacts the results of plain string
SQL staements that are not otherwise associated with a :class:`.Numeric`
SQLAlchemy type (or a subclass of such).
.. versionchanged:: 1.2 The numeric handling system for cx_Oracle has been
reworked to take advantage of newer cx_Oracle features as well
as better integration of outputtypehandlers.
""" # noqa
from __future__ import absolute_import
import collections
import decimal
import random
import re
from . import base as oracle
from .base import OracleCompiler
from .base import OracleDialect
from .base import OracleExecutionContext
from ... import exc
from ... import processors
from ... import types as sqltypes
from ... import util
from ...engine import result as _result
from ...util import compat
class _OracleInteger(sqltypes.Integer):
def get_dbapi_type(self, dbapi):
# see https://github.com/oracle/python-cx_Oracle/issues/
# 208#issuecomment-409715955
return int
def _cx_oracle_var(self, dialect, cursor):
cx_Oracle = dialect.dbapi
return cursor.var(
cx_Oracle.STRING, 255, arraysize=cursor.arraysize, outconverter=int
)
def _cx_oracle_outputtypehandler(self, dialect):
def handler(cursor, name, default_type, size, precision, scale):
return self._cx_oracle_var(dialect, cursor)
return handler
class _OracleNumeric(sqltypes.Numeric):
is_number = False
def bind_processor(self, dialect):
if self.scale == 0:
return None
elif self.asdecimal:
processor = processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
def process(value):
if isinstance(value, (int, float)):
return processor(value)
elif value is not None and value.is_infinite():
return float(value)
else:
return value
return process
else:
return processors.to_float
def result_processor(self, dialect, coltype):
return None
def _cx_oracle_outputtypehandler(self, dialect):
cx_Oracle = dialect.dbapi
is_cx_oracle_6 = dialect._is_cx_oracle_6
def handler(cursor, name, default_type, size, precision, scale):
outconverter = None
if precision:
if self.asdecimal:
if default_type == cx_Oracle.NATIVE_FLOAT:
# receiving float and doing Decimal after the fact
# allows for float("inf") to be handled
type_ = default_type
outconverter = decimal.Decimal
elif is_cx_oracle_6:
type_ = decimal.Decimal
else:
type_ = cx_Oracle.STRING
outconverter = dialect._to_decimal
else:
if self.is_number and scale == 0:
# integer. cx_Oracle is observed to handle the widest
# variety of ints when no directives are passed,
# from 5.2 to 7.0. See [ticket:4457]
return None
else:
type_ = cx_Oracle.NATIVE_FLOAT
else:
if self.asdecimal:
if default_type == cx_Oracle.NATIVE_FLOAT:
type_ = default_type
outconverter = decimal.Decimal
elif is_cx_oracle_6:
type_ = decimal.Decimal
else:
type_ = cx_Oracle.STRING
outconverter = dialect._to_decimal
else:
if self.is_number and scale == 0:
# integer. cx_Oracle is observed to handle the widest
# variety of ints when no directives are passed,
# from 5.2 to 7.0. See [ticket:4457]
return None
else:
type_ = cx_Oracle.NATIVE_FLOAT
return cursor.var(
type_,
255,
arraysize=cursor.arraysize,
outconverter=outconverter,
)
return handler
class _OracleBinaryFloat(_OracleNumeric):
def get_dbapi_type(self, dbapi):
return dbapi.NATIVE_FLOAT
class _OracleBINARY_FLOAT(_OracleBinaryFloat, oracle.BINARY_FLOAT):
pass
class _OracleBINARY_DOUBLE(_OracleBinaryFloat, oracle.BINARY_DOUBLE):
pass
class _OracleNUMBER(_OracleNumeric):
is_number = True
class _OracleDate(sqltypes.Date):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return value.date()
else:
return value
return process
# TODO: the names used across CHAR / VARCHAR / NCHAR / NVARCHAR
# here are inconsistent and not very good
class _OracleChar(sqltypes.CHAR):
def get_dbapi_type(self, dbapi):
return dbapi.FIXED_CHAR
class _OracleNChar(sqltypes.NCHAR):
def get_dbapi_type(self, dbapi):
return dbapi.FIXED_NCHAR
class _OracleUnicodeStringNCHAR(oracle.NVARCHAR2):
def get_dbapi_type(self, dbapi):
return dbapi.NCHAR
class _OracleUnicodeStringCHAR(sqltypes.Unicode):
def get_dbapi_type(self, dbapi):
return None
class _OracleUnicodeTextNCLOB(oracle.NCLOB):
def get_dbapi_type(self, dbapi):
return dbapi.NCLOB
class _OracleUnicodeTextCLOB(sqltypes.UnicodeText):
def get_dbapi_type(self, dbapi):
return dbapi.CLOB
class _OracleText(sqltypes.Text):
def get_dbapi_type(self, dbapi):
return dbapi.CLOB
class _OracleLong(oracle.LONG):
def get_dbapi_type(self, dbapi):
return dbapi.LONG_STRING
class _OracleString(sqltypes.String):
pass
class _OracleEnum(sqltypes.Enum):
def bind_processor(self, dialect):
enum_proc = sqltypes.Enum.bind_processor(self, dialect)
def process(value):
raw_str = enum_proc(value)
return raw_str
return process
class _OracleBinary(sqltypes.LargeBinary):
def get_dbapi_type(self, dbapi):
return dbapi.BLOB
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if not dialect.auto_convert_lobs:
return None
else:
return super(_OracleBinary, self).result_processor(
dialect, coltype
)
class _OracleInterval(oracle.INTERVAL):
def get_dbapi_type(self, dbapi):
return dbapi.INTERVAL
class _OracleRaw(oracle.RAW):
pass
class _OracleRowid(oracle.ROWID):
def get_dbapi_type(self, dbapi):
return dbapi.ROWID
class OracleCompiler_cx_oracle(OracleCompiler):
_oracle_cx_sql_compiler = True
def bindparam_string(self, name, **kw):
quote = getattr(name, "quote", None)
if (
quote is True
or quote is not False
and self.preparer._bindparam_requires_quotes(name)
):
if kw.get("expanding", False):
raise exc.CompileError(
"Can't use expanding feature with parameter name "
"%r on Oracle; it requires quoting which is not supported "
"in this context." % name
)
quoted_name = '"%s"' % name
self._quoted_bind_names[name] = quoted_name
return OracleCompiler.bindparam_string(self, quoted_name, **kw)
else:
return OracleCompiler.bindparam_string(self, name, **kw)
class OracleExecutionContext_cx_oracle(OracleExecutionContext):
out_parameters = None
def _setup_quoted_bind_names(self):
quoted_bind_names = self.compiled._quoted_bind_names
if quoted_bind_names:
for param in self.parameters:
for fromname, toname in quoted_bind_names.items():
param[toname] = param[fromname]
del param[fromname]
def _handle_out_parameters(self):
# if a single execute, check for outparams
if len(self.compiled_parameters) == 1:
quoted_bind_names = self.compiled._quoted_bind_names
for bindparam in self.compiled.binds.values():
if bindparam.isoutparam:
name = self.compiled.bind_names[bindparam]
type_impl = bindparam.type.dialect_impl(self.dialect)
if hasattr(type_impl, "_cx_oracle_var"):
self.out_parameters[name] = type_impl._cx_oracle_var(
self.dialect, self.cursor
)
else:
dbtype = type_impl.get_dbapi_type(self.dialect.dbapi)
if dbtype is None:
raise exc.InvalidRequestError(
"Cannot create out parameter for parameter "
"%r - its type %r is not supported by"
" cx_oracle" % (bindparam.key, bindparam.type)
)
self.out_parameters[name] = self.cursor.var(dbtype)
self.parameters[0][
quoted_bind_names.get(name, name)
] = self.out_parameters[name]
def _generate_cursor_outputtype_handler(self):
output_handlers = {}
for (keyname, name, objects, type_) in self.compiled._result_columns:
handler = type_._cached_custom_processor(
self.dialect,
"cx_oracle_outputtypehandler",
self._get_cx_oracle_type_handler,
)
if handler:
denormalized_name = self.dialect.denormalize_name(keyname)
output_handlers[denormalized_name] = handler
if output_handlers:
default_handler = self._dbapi_connection.outputtypehandler
def output_type_handler(
cursor, name, default_type, size, precision, scale
):
if name in output_handlers:
return output_handlers[name](
cursor, name, default_type, size, precision, scale
)
else:
return default_handler(
cursor, name, default_type, size, precision, scale
)
self.cursor.outputtypehandler = output_type_handler
def _get_cx_oracle_type_handler(self, impl):
if hasattr(impl, "_cx_oracle_outputtypehandler"):
return impl._cx_oracle_outputtypehandler(self.dialect)
else:
return None
def pre_exec(self):
if not getattr(self.compiled, "_oracle_cx_sql_compiler", False):
return
self.out_parameters = {}
if self.compiled._quoted_bind_names:
self._setup_quoted_bind_names()
self.set_input_sizes(
self.compiled._quoted_bind_names,
include_types=self.dialect._include_setinputsizes,
)
self._handle_out_parameters()
self._generate_cursor_outputtype_handler()
def create_cursor(self):
c = self._dbapi_connection.cursor()
if self.dialect.arraysize:
c.arraysize = self.dialect.arraysize
return c
def get_result_proxy(self):
if self.out_parameters and self.compiled.returning:
returning_params = [
self.dialect._returningval(self.out_parameters["ret_%d" % i])
for i in range(len(self.out_parameters))
]
return ReturningResultProxy(self, returning_params)
result = _result.ResultProxy(self)
if self.out_parameters:
if (
self.compiled_parameters is not None
and len(self.compiled_parameters) == 1
):
result.out_parameters = out_parameters = {}
for bind, name in self.compiled.bind_names.items():
if name in self.out_parameters:
type_ = bind.type
impl_type = type_.dialect_impl(self.dialect)
dbapi_type = impl_type.get_dbapi_type(
self.dialect.dbapi
)
result_processor = impl_type.result_processor(
self.dialect, dbapi_type
)
if result_processor is not None:
out_parameters[name] = result_processor(
self.dialect._paramval(
self.out_parameters[name]
)
)
else:
out_parameters[name] = self.dialect._paramval(
self.out_parameters[name]
)
else:
result.out_parameters = dict(
(k, self._dialect._paramval(v))
for k, v in self.out_parameters.items()
)
return result
class ReturningResultProxy(_result.FullyBufferedResultProxy):
"""Result proxy which stuffs the _returning clause + outparams
into the fetch."""
def __init__(self, context, returning_params):
self._returning_params = returning_params
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
returning = self.context.compiled.returning
return [
(getattr(col, "name", col.anon_label), None) for col in returning
]
def _buffer_rows(self):
return collections.deque([tuple(self._returning_params)])
class OracleDialect_cx_oracle(OracleDialect):
execution_ctx_cls = OracleExecutionContext_cx_oracle
statement_compiler = OracleCompiler_cx_oracle
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_unicode_statements = True
supports_unicode_binds = True
driver = "cx_oracle"
colspecs = {
sqltypes.Numeric: _OracleNumeric,
sqltypes.Float: _OracleNumeric,
oracle.BINARY_FLOAT: _OracleBINARY_FLOAT,
oracle.BINARY_DOUBLE: _OracleBINARY_DOUBLE,
sqltypes.Integer: _OracleInteger,
oracle.NUMBER: _OracleNUMBER,
sqltypes.Date: _OracleDate,
sqltypes.LargeBinary: _OracleBinary,
sqltypes.Boolean: oracle._OracleBoolean,
sqltypes.Interval: _OracleInterval,
oracle.INTERVAL: _OracleInterval,
sqltypes.Text: _OracleText,
sqltypes.String: _OracleString,
sqltypes.UnicodeText: _OracleUnicodeTextCLOB,
sqltypes.CHAR: _OracleChar,
sqltypes.NCHAR: _OracleNChar,
sqltypes.Enum: _OracleEnum,
oracle.LONG: _OracleLong,
oracle.RAW: _OracleRaw,
sqltypes.Unicode: _OracleUnicodeStringCHAR,
sqltypes.NVARCHAR: _OracleUnicodeStringNCHAR,
oracle.NCLOB: _OracleUnicodeTextNCLOB,
oracle.ROWID: _OracleRowid,
}
execute_sequence_format = list
_cx_oracle_threaded = None
@util.deprecated_params(
threaded=(
"1.3",
"The 'threaded' parameter to the cx_oracle dialect "
"is deprecated as a dialect-level argument, and will be removed "
"in a future release. As of version 1.3, it defaults to False "
"rather than True. The 'threaded' option can be passed to "
"cx_Oracle directly in the URL query string passed to "
":func:`_sa.create_engine`.",
)
)
def __init__(
self,
auto_convert_lobs=True,
coerce_to_unicode=True,
coerce_to_decimal=True,
arraysize=50,
encoding_errors=None,
threaded=None,
**kwargs
):
OracleDialect.__init__(self, **kwargs)
self.arraysize = arraysize
self.encoding_errors = encoding_errors
if threaded is not None:
self._cx_oracle_threaded = threaded
self.auto_convert_lobs = auto_convert_lobs
self.coerce_to_unicode = coerce_to_unicode
self.coerce_to_decimal = coerce_to_decimal
if self._use_nchar_for_unicode:
self.colspecs = self.colspecs.copy()
self.colspecs[sqltypes.Unicode] = _OracleUnicodeStringNCHAR
self.colspecs[sqltypes.UnicodeText] = _OracleUnicodeTextNCLOB
cx_Oracle = self.dbapi
if cx_Oracle is None:
self._include_setinputsizes = {}
self.cx_oracle_ver = (0, 0, 0)
else:
self.cx_oracle_ver = self._parse_cx_oracle_ver(cx_Oracle.version)
if self.cx_oracle_ver < (5, 2) and self.cx_oracle_ver > (0, 0, 0):
raise exc.InvalidRequestError(
"cx_Oracle version 5.2 and above are supported"
)
self._include_setinputsizes = {
cx_Oracle.DATETIME,
cx_Oracle.NCLOB,
cx_Oracle.CLOB,
cx_Oracle.LOB,
cx_Oracle.NCHAR,
cx_Oracle.FIXED_NCHAR,
cx_Oracle.BLOB,
cx_Oracle.FIXED_CHAR,
cx_Oracle.TIMESTAMP,
_OracleInteger,
_OracleBINARY_FLOAT,
_OracleBINARY_DOUBLE,
}
self._paramval = lambda value: value.getvalue()
# https://github.com/oracle/python-cx_Oracle/issues/176#issuecomment-386821291
# https://github.com/oracle/python-cx_Oracle/issues/224
self._values_are_lists = self.cx_oracle_ver >= (6, 3)
if self._values_are_lists:
cx_Oracle.__future__.dml_ret_array_val = True
def _returningval(value):
try:
return value.values[0][0]
except IndexError:
return None
self._returningval = _returningval
else:
self._returningval = self._paramval
self._is_cx_oracle_6 = self.cx_oracle_ver >= (6,)
@property
def _cursor_var_unicode_kwargs(self):
if self.encoding_errors:
if self.cx_oracle_ver >= (6, 4):
return {"encodingErrors": self.encoding_errors}
else:
util.warn(
"cx_oracle version %r does not support encodingErrors"
% (self.cx_oracle_ver,)
)
return {}
def _parse_cx_oracle_ver(self, version):
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", version)
if m:
return tuple(int(x) for x in m.group(1, 2, 3) if x is not None)
else:
return (0, 0, 0)
@classmethod
def dbapi(cls):
import cx_Oracle
return cx_Oracle
def initialize(self, connection):
super(OracleDialect_cx_oracle, self).initialize(connection)
if self._is_oracle_8:
self.supports_unicode_binds = False
self._detect_decimal_char(connection)
def _detect_decimal_char(self, connection):
# we have the option to change this setting upon connect,
# or just look at what it is upon connect and convert.
# to minimize the chance of interference with changes to
# NLS_TERRITORY or formatting behavior of the DB, we opt
# to just look at it
self._decimal_char = connection.scalar(
"select value from nls_session_parameters "
"where parameter = 'NLS_NUMERIC_CHARACTERS'"
)[0]
if self._decimal_char != ".":
_detect_decimal = self._detect_decimal
_to_decimal = self._to_decimal
self._detect_decimal = lambda value: _detect_decimal(
value.replace(self._decimal_char, ".")
)
self._to_decimal = lambda value: _to_decimal(
value.replace(self._decimal_char, ".")
)
def _detect_decimal(self, value):
if "." in value:
return self._to_decimal(value)
else:
return int(value)
_to_decimal = decimal.Decimal
def _generate_connection_outputtype_handler(self):
"""establish the default outputtypehandler established at the
connection level.
"""
dialect = self
cx_Oracle = dialect.dbapi
number_handler = _OracleNUMBER(
asdecimal=True
)._cx_oracle_outputtypehandler(dialect)
float_handler = _OracleNUMBER(
asdecimal=False
)._cx_oracle_outputtypehandler(dialect)
def output_type_handler(
cursor, name, default_type, size, precision, scale
):
if (
default_type == cx_Oracle.NUMBER
and default_type is not cx_Oracle.NATIVE_FLOAT
):
if not dialect.coerce_to_decimal:
return None
elif precision == 0 and scale in (0, -127):
# ambiguous type, this occurs when selecting
# numbers from deep subqueries
return cursor.var(
cx_Oracle.STRING,
255,
outconverter=dialect._detect_decimal,
arraysize=cursor.arraysize,
)
elif precision and scale > 0:
return number_handler(
cursor, name, default_type, size, precision, scale
)
else:
return float_handler(
cursor, name, default_type, size, precision, scale
)
# allow all strings to come back natively as Unicode
elif (
dialect.coerce_to_unicode
and default_type in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR,)
and default_type is not cx_Oracle.CLOB
and default_type is not cx_Oracle.NCLOB
):
if compat.py2k:
outconverter = processors.to_unicode_processor_factory(
dialect.encoding, errors=dialect.encoding_errors
)
return cursor.var(
cx_Oracle.STRING,
size,
cursor.arraysize,
outconverter=outconverter,
)
else:
return cursor.var(
util.text_type,
size,
cursor.arraysize,
**dialect._cursor_var_unicode_kwargs
)
elif dialect.auto_convert_lobs and default_type in (
cx_Oracle.CLOB,
cx_Oracle.NCLOB,
):
if compat.py2k:
outconverter = processors.to_unicode_processor_factory(
dialect.encoding, errors=dialect.encoding_errors
)
return cursor.var(
cx_Oracle.LONG_STRING,
size,
cursor.arraysize,
outconverter=outconverter,
)
else:
return cursor.var(
cx_Oracle.LONG_STRING,
size,
cursor.arraysize,
**dialect._cursor_var_unicode_kwargs
)
elif dialect.auto_convert_lobs and default_type in (
cx_Oracle.BLOB,
):
return cursor.var(
cx_Oracle.LONG_BINARY, size, cursor.arraysize,
)
return output_type_handler
def on_connect(self):
output_type_handler = self._generate_connection_outputtype_handler()
def on_connect(conn):
conn.outputtypehandler = output_type_handler
return on_connect
def create_connect_args(self, url):
opts = dict(url.query)
# deprecated in 1.3
for opt in ("use_ansi", "auto_convert_lobs"):
if opt in opts:
util.warn_deprecated(
"cx_oracle dialect option %r should only be passed to "
"create_engine directly, not within the URL string" % opt
)
util.coerce_kw_type(opts, opt, bool)
setattr(self, opt, opts.pop(opt))
database = url.database
service_name = opts.pop("service_name", None)
if database or service_name:
# if we have a database, then we have a remote host
port = url.port
if port:
port = int(port)
else:
port = 1521
if database and service_name:
raise exc.InvalidRequestError(
'"service_name" option shouldn\'t '
'be used with a "database" part of the url'
)
if database:
makedsn_kwargs = {"sid": database}
if service_name:
makedsn_kwargs = {"service_name": service_name}
dsn = self.dbapi.makedsn(url.host, port, **makedsn_kwargs)
else:
# we have a local tnsname
dsn = url.host
if dsn is not None:
opts["dsn"] = dsn
if url.password is not None:
opts["password"] = url.password
if url.username is not None:
opts["user"] = url.username
if self._cx_oracle_threaded is not None:
opts.setdefault("threaded", self._cx_oracle_threaded)
def convert_cx_oracle_constant(value):
if isinstance(value, util.string_types):
try:
int_val = int(value)
except ValueError:
value = value.upper()
return getattr(self.dbapi, value)
else:
return int_val
else:
return value
util.coerce_kw_type(opts, "mode", convert_cx_oracle_constant)
util.coerce_kw_type(opts, "threaded", bool)
util.coerce_kw_type(opts, "events", bool)
util.coerce_kw_type(opts, "purity", convert_cx_oracle_constant)
return ([], opts)
def _get_server_version_info(self, connection):
return tuple(int(x) for x in connection.connection.version.split("."))
def is_disconnect(self, e, connection, cursor):
(error,) = e.args
if isinstance(
e, (self.dbapi.InterfaceError, self.dbapi.DatabaseError)
) and "not connected" in str(e):
return True
if hasattr(error, "code"):
# ORA-00028: your session has been killed
# ORA-03114: not connected to ORACLE
# ORA-03113: end-of-file on communication channel
# ORA-03135: connection lost contact
# ORA-01033: ORACLE initialization or shutdown in progress
# ORA-02396: exceeded maximum idle time, please connect again
# TODO: Others ?
return error.code in (28, 3114, 3113, 3135, 1033, 2396)
else:
return False
@util.deprecated(
"1.2",
"The create_xid() method of the cx_Oracle dialect is deprecated and "
"will be removed in a future release. "
"Two-phase transaction support is no longer functional "
"in SQLAlchemy's cx_Oracle dialect as of cx_Oracle 6.0b1, which no "
"longer supports the API that SQLAlchemy relied upon.",
)
def create_xid(self):
"""create a two-phase transaction ID.
this id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). its format is unspecified.
"""
id_ = random.randint(0, 2 ** 128)
return (0x1234, "%032x" % id_, "%032x" % 9)
def do_executemany(self, cursor, statement, parameters, context=None):
if isinstance(parameters, tuple):
parameters = list(parameters)
cursor.executemany(statement, parameters)
def do_begin_twophase(self, connection, xid):
connection.connection.begin(*xid)
def do_prepare_twophase(self, connection, xid):
result = connection.connection.prepare()
connection.info["cx_oracle_prepared"] = result
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
self.do_rollback(connection.connection)
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
self.do_commit(connection.connection)
else:
oci_prepared = connection.info["cx_oracle_prepared"]
if oci_prepared:
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
connection.info.pop("cx_oracle_prepared", None)
def set_isolation_level(self, connection, level):
if hasattr(connection, "connection"):
dbapi_connection = connection.connection
else:
dbapi_connection = connection
if level == "AUTOCOMMIT":
dbapi_connection.autocommit = True
else:
dbapi_connection.autocommit = False
super(OracleDialect_cx_oracle, self).set_isolation_level(
dbapi_connection, level
)
dialect = OracleDialect_cx_oracle
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/oracle/base.py
|
# oracle/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: oracle
:name: Oracle
Oracle version 8 through current (11g at the time of this writing) are
supported.
Auto Increment Behavior
-----------------------
SQLAlchemy Table objects which include integer primary keys are usually
assumed to have "autoincrementing" behavior, meaning they can generate their
own primary key values upon INSERT. Since Oracle has no "autoincrement"
feature, SQLAlchemy relies upon sequences to produce these values. With the
Oracle dialect, *a sequence must always be explicitly specified to enable
autoincrement*. This is divergent with the majority of documentation
examples which assume the usage of an autoincrement-capable database. To
specify sequences, use the sqlalchemy.schema.Sequence object which is passed
to a Column construct::
t = Table('mytable', metadata,
Column('id', Integer, Sequence('id_seq'), primary_key=True),
Column(...), ...
)
This step is also required when using table reflection, i.e. autoload=True::
t = Table('mytable', metadata,
Column('id', Integer, Sequence('id_seq'), primary_key=True),
autoload=True
)
Transaction Isolation Level / Autocommit
----------------------------------------
The Oracle database supports "READ COMMITTED" and "SERIALIZABLE" modes
of isolation, however the SQLAlchemy Oracle dialect currently only has
explicit support for "READ COMMITTED". It is possible to emit a
"SET TRANSACTION" statement on a connection in order to use SERIALIZABLE
isolation, however the SQLAlchemy dialect will remain unaware of this setting,
such as if the :meth:`_engine.Connection.get_isolation_level` method is used;
this method is hardcoded to return "READ COMMITTED" right now.
The AUTOCOMMIT isolation level is also supported by the cx_Oracle dialect.
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="AUTOCOMMIT"
)
Valid values for ``isolation_level`` include:
* ``READ COMMITTED``
* ``AUTOCOMMIT``
.. versionadded:: 1.3.16 added support for AUTOCOMMIT to the cx_oracle dialect
as well as the notion of a default isolation level, currently harcoded
to "READ COMMITTED".
Identifier Casing
-----------------
In Oracle, the data dictionary represents all case insensitive identifier
names using UPPERCASE text. SQLAlchemy on the other hand considers an
all-lower case identifier name to be case insensitive. The Oracle dialect
converts all case insensitive identifiers to and from those two formats during
schema level communication, such as reflection of tables and indexes. Using
an UPPERCASE name on the SQLAlchemy side indicates a case sensitive
identifier, and SQLAlchemy will quote the name - this will cause mismatches
against data dictionary data received from Oracle, so unless identifier names
have been truly created as case sensitive (i.e. using quoted names), all
lowercase names should be used on the SQLAlchemy side.
.. _oracle_max_identifier_lengths:
Max Identifier Lengths
----------------------
Oracle has changed the default max identifier length as of Oracle Server
version 12.2. Prior to this version, the length was 30, and for 12.2 and
greater it is now 128. This change impacts SQLAlchemy in the area of
generated SQL label names as well as the generation of constraint names,
particularly in the case where the constraint naming convention feature
described at :ref:`constraint_naming_conventions` is being used.
To assist with this change and others, Oracle includes the concept of a
"compatibility" version, which is a version number that is independent of the
actual server version in order to assist with migration of Oracle databases,
and may be configured within the Oracle server itself. This compatibility
version is retrieved using the query ``SELECT value FROM v$parameter WHERE
name = 'compatible';``. The SQLAlchemy Oracle dialect, when tasked with
determining the default max identifier length, will attempt to use this query
upon first connect in order to determine the effective compatibility version of
the server, which determines what the maximum allowed identifier length is for
the server. If the table is not available, the server version information is
used instead.
For the duration of the SQLAlchemy 1.3 series, the default max identifier
length will remain at 30, even if compatibility version 12.2 or greater is in
use. When the newer version is detected, a warning will be emitted upon first
connect, which refers the user to make use of the
:paramref:`_sa.create_engine.max_identifier_length`
parameter in order to assure
forwards compatibility with SQLAlchemy 1.4, which will be changing this value
to 128 when compatibility version 12.2 or greater is detected.
Using :paramref:`_sa.create_engine.max_identifier_length`,
the effective identifier
length used by the SQLAlchemy dialect will be used as given, overriding the
current default value of 30, so that when Oracle 12.2 or greater is used, the
newer identifier length may be taken advantage of::
engine = create_engine(
"oracle+cx_oracle://scott:tiger@oracle122",
max_identifier_length=128)
The maximum identifier length comes into play both when generating anonymized
SQL labels in SELECT statements, but more crucially when generating constraint
names from a naming convention. It is this area that has created the need for
SQLAlchemy to change this default conservatively. For example, the following
naming convention produces two very different constraint names based on the
identifier length::
from sqlalchemy import Column
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy.dialects import oracle
from sqlalchemy.schema import CreateIndex
m = MetaData(naming_convention={"ix": "ix_%(column_0N_name)s"})
t = Table(
"t",
m,
Column("some_column_name_1", Integer),
Column("some_column_name_2", Integer),
Column("some_column_name_3", Integer),
)
ix = Index(
None,
t.c.some_column_name_1,
t.c.some_column_name_2,
t.c.some_column_name_3,
)
oracle_dialect = oracle.dialect(max_identifier_length=30)
print(CreateIndex(ix).compile(dialect=oracle_dialect))
With an identifier length of 30, the above CREATE INDEX looks like::
CREATE INDEX ix_some_column_name_1s_70cd ON t
(some_column_name_1, some_column_name_2, some_column_name_3)
However with length=128, it becomes::
CREATE INDEX ix_some_column_name_1some_column_name_2some_column_name_3 ON t
(some_column_name_1, some_column_name_2, some_column_name_3)
The implication here is that by upgrading SQLAlchemy to version 1.4 on
an existing Oracle 12.2 or greater database, the generation of constraint
names will change, which can impact the behavior of database migrations.
A key example is a migration that wishes to "DROP CONSTRAINT" on a name that
was previously generated with the shorter length. This migration will fail
when the identifier length is changed without the name of the index or
constraint first being adjusted.
Therefore, applications are strongly advised to make use of
:paramref:`_sa.create_engine.max_identifier_length`
in order to maintain control
of the generation of truncated names, and to fully review and test all database
migrations in a staging environment when changing this value to ensure that the
impact of this change has been mitigated.
.. versionadded:: 1.3.9 Added the
:paramref:`_sa.create_engine.max_identifier_length` parameter; the Oracle
dialect now detects compatibility version 12.2 or greater and warns
about upcoming max identitifier length changes in SQLAlchemy 1.4.
LIMIT/OFFSET Support
--------------------
Oracle has no support for the LIMIT or OFFSET keywords. SQLAlchemy uses
a wrapped subquery approach in conjunction with ROWNUM. The exact methodology
is taken from
http://www.oracle.com/technetwork/issue-archive/2006/06-sep/o56asktom-086197.html .
There are two options which affect its behavior:
* the "FIRST ROWS()" optimization keyword is not used by default. To enable
the usage of this optimization directive, specify ``optimize_limits=True``
to :func:`_sa.create_engine`.
* the values passed for the limit/offset are sent as bound parameters. Some
users have observed that Oracle produces a poor query plan when the values
are sent as binds and not rendered literally. To render the limit/offset
values literally within the SQL statement, specify
``use_binds_for_limits=False`` to :func:`_sa.create_engine`.
Some users have reported better performance when the entirely different
approach of a window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to
provide LIMIT/OFFSET (note that the majority of users don't observe this).
To suit this case the method used for LIMIT/OFFSET can be replaced entirely.
See the recipe at
http://www.sqlalchemy.org/trac/wiki/UsageRecipes/WindowFunctionsByDefault
which installs a select compiler that overrides the generation of limit/offset
with a window function.
.. _oracle_returning:
RETURNING Support
-----------------
The Oracle database supports a limited form of RETURNING, in order to retrieve
result sets of matched rows from INSERT, UPDATE and DELETE statements.
Oracle's RETURNING..INTO syntax only supports one row being returned, as it
relies upon OUT parameters in order to function. In addition, supported
DBAPIs have further limitations (see :ref:`cx_oracle_returning`).
SQLAlchemy's "implicit returning" feature, which employs RETURNING within an
INSERT and sometimes an UPDATE statement in order to fetch newly generated
primary key values and other SQL defaults and expressions, is normally enabled
on the Oracle backend. By default, "implicit returning" typically only
fetches the value of a single ``nextval(some_seq)`` expression embedded into
an INSERT in order to increment a sequence within an INSERT statement and get
the value back at the same time. To disable this feature across the board,
specify ``implicit_returning=False`` to :func:`_sa.create_engine`::
engine = create_engine("oracle://scott:tiger@dsn",
implicit_returning=False)
Implicit returning can also be disabled on a table-by-table basis as a table
option::
# Core Table
my_table = Table("my_table", metadata, ..., implicit_returning=False)
# declarative
class MyClass(Base):
__tablename__ = 'my_table'
__table_args__ = {"implicit_returning": False}
.. seealso::
:ref:`cx_oracle_returning` - additional cx_oracle-specific restrictions on
implicit returning.
ON UPDATE CASCADE
-----------------
Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based
solution is available at
http://asktom.oracle.com/tkyte/update_cascade/index.html .
When using the SQLAlchemy ORM, the ORM has limited ability to manually issue
cascading updates - specify ForeignKey objects using the
"deferrable=True, initially='deferred'" keyword arguments,
and specify "passive_updates=False" on each relationship().
Oracle 8 Compatibility
----------------------
When Oracle 8 is detected, the dialect internally configures itself to the
following behaviors:
* the use_ansi flag is set to False. This has the effect of converting all
JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN
makes use of Oracle's (+) operator.
* the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when
the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are
issued instead. This because these types don't seem to work correctly on
Oracle 8 even though they are available. The
:class:`~sqlalchemy.types.NVARCHAR` and
:class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate
NVARCHAR2 and NCLOB.
* the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy
encodes all Python unicode objects to "string" before passing in as bind
parameters.
Synonym/DBLINK Reflection
-------------------------
When using reflection with Table objects, the dialect can optionally search
for tables indicated by synonyms, either in local or remote schemas or
accessed over DBLINK, by passing the flag ``oracle_resolve_synonyms=True`` as
a keyword argument to the :class:`_schema.Table` construct::
some_table = Table('some_table', autoload=True,
autoload_with=some_engine,
oracle_resolve_synonyms=True)
When this flag is set, the given name (such as ``some_table`` above) will
be searched not just in the ``ALL_TABLES`` view, but also within the
``ALL_SYNONYMS`` view to see if this name is actually a synonym to another
name. If the synonym is located and refers to a DBLINK, the oracle dialect
knows how to locate the table's information using DBLINK syntax(e.g.
``@dblink``).
``oracle_resolve_synonyms`` is accepted wherever reflection arguments are
accepted, including methods such as :meth:`_schema.MetaData.reflect` and
:meth:`_reflection.Inspector.get_columns`.
If synonyms are not in use, this flag should be left disabled.
.. _oracle_constraint_reflection:
Constraint Reflection
---------------------
The Oracle dialect can return information about foreign key, unique, and
CHECK constraints, as well as indexes on tables.
Raw information regarding these constraints can be acquired using
:meth:`_reflection.Inspector.get_foreign_keys`,
:meth:`_reflection.Inspector.get_unique_constraints`,
:meth:`_reflection.Inspector.get_check_constraints`, and
:meth:`_reflection.Inspector.get_indexes`.
.. versionchanged:: 1.2 The Oracle dialect can now reflect UNIQUE and
CHECK constraints.
When using reflection at the :class:`_schema.Table` level, the
:class:`_schema.Table`
will also include these constraints.
Note the following caveats:
* When using the :meth:`_reflection.Inspector.get_check_constraints` method,
Oracle
builds a special "IS NOT NULL" constraint for columns that specify
"NOT NULL". This constraint is **not** returned by default; to include
the "IS NOT NULL" constraints, pass the flag ``include_all=True``::
from sqlalchemy import create_engine, inspect
engine = create_engine("oracle+cx_oracle://s:t@dsn")
inspector = inspect(engine)
all_check_constraints = inspector.get_check_constraints(
"some_table", include_all=True)
* in most cases, when reflecting a :class:`_schema.Table`,
a UNIQUE constraint will
**not** be available as a :class:`.UniqueConstraint` object, as Oracle
mirrors unique constraints with a UNIQUE index in most cases (the exception
seems to be when two or more unique constraints represent the same columns);
the :class:`_schema.Table` will instead represent these using
:class:`.Index`
with the ``unique=True`` flag set.
* Oracle creates an implicit index for the primary key of a table; this index
is **excluded** from all index results.
* the list of columns reflected for an index will not include column names
that start with SYS_NC.
Table names with SYSTEM/SYSAUX tablespaces
-------------------------------------------
The :meth:`_reflection.Inspector.get_table_names` and
:meth:`_reflection.Inspector.get_temp_table_names`
methods each return a list of table names for the current engine. These methods
are also part of the reflection which occurs within an operation such as
:meth:`_schema.MetaData.reflect`. By default,
these operations exclude the ``SYSTEM``
and ``SYSAUX`` tablespaces from the operation. In order to change this, the
default list of tablespaces excluded can be changed at the engine level using
the ``exclude_tablespaces`` parameter::
# exclude SYSAUX and SOME_TABLESPACE, but not SYSTEM
e = create_engine(
"oracle://scott:tiger@xe",
exclude_tablespaces=["SYSAUX", "SOME_TABLESPACE"])
.. versionadded:: 1.1
DateTime Compatibility
----------------------
Oracle has no datatype known as ``DATETIME``, it instead has only ``DATE``,
which can actually store a date and time value. For this reason, the Oracle
dialect provides a type :class:`_oracle.DATE` which is a subclass of
:class:`.DateTime`. This type has no special behavior, and is only
present as a "marker" for this type; additionally, when a database column
is reflected and the type is reported as ``DATE``, the time-supporting
:class:`_oracle.DATE` type is used.
.. versionchanged:: 0.9.4 Added :class:`_oracle.DATE` to subclass
:class:`.DateTime`. This is a change as previous versions
would reflect a ``DATE`` column as :class:`_types.DATE`, which subclasses
:class:`.Date`. The only significance here is for schemes that are
examining the type of column for use in special Python translations or
for migrating schemas to other database backends.
.. _oracle_table_options:
Oracle Table Options
-------------------------
The CREATE TABLE phrase supports the following options with Oracle
in conjunction with the :class:`_schema.Table` construct:
* ``ON COMMIT``::
Table(
"some_table", metadata, ...,
prefixes=['GLOBAL TEMPORARY'], oracle_on_commit='PRESERVE ROWS')
.. versionadded:: 1.0.0
* ``COMPRESS``::
Table('mytable', metadata, Column('data', String(32)),
oracle_compress=True)
Table('mytable', metadata, Column('data', String(32)),
oracle_compress=6)
The ``oracle_compress`` parameter accepts either an integer compression
level, or ``True`` to use the default compression level.
.. versionadded:: 1.0.0
.. _oracle_index_options:
Oracle Specific Index Options
-----------------------------
Bitmap Indexes
~~~~~~~~~~~~~~
You can specify the ``oracle_bitmap`` parameter to create a bitmap index
instead of a B-tree index::
Index('my_index', my_table.c.data, oracle_bitmap=True)
Bitmap indexes cannot be unique and cannot be compressed. SQLAlchemy will not
check for such limitations, only the database will.
.. versionadded:: 1.0.0
Index compression
~~~~~~~~~~~~~~~~~
Oracle has a more efficient storage mode for indexes containing lots of
repeated values. Use the ``oracle_compress`` parameter to turn on key
compression::
Index('my_index', my_table.c.data, oracle_compress=True)
Index('my_index', my_table.c.data1, my_table.c.data2, unique=True,
oracle_compress=1)
The ``oracle_compress`` parameter accepts either an integer specifying the
number of prefix columns to compress, or ``True`` to use the default (all
columns for non-unique indexes, all but the last column for unique indexes).
.. versionadded:: 1.0.0
""" # noqa
from itertools import groupby
import re
from ... import Computed
from ... import exc
from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
from ...engine import default
from ...engine import reflection
from ...sql import compiler
from ...sql import expression
from ...sql import util as sql_util
from ...sql import visitors
from ...types import BLOB
from ...types import CHAR
from ...types import CLOB
from ...types import FLOAT
from ...types import INTEGER
from ...types import NCHAR
from ...types import NVARCHAR
from ...types import TIMESTAMP
from ...types import VARCHAR
RESERVED_WORDS = set(
"SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN "
"DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED "
"ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE "
"ANY TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE "
"BY ASC REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES "
"AS IN VIEW EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS "
"NOT TRIGGER ELSE CREATE INTERSECT PCTFREE DISTINCT USER "
"CONNECT SET MODE OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR "
"DECIMAL UNION PUBLIC AND START UID COMMENT CURRENT LEVEL".split()
)
NO_ARG_FNS = set(
"UID CURRENT_DATE SYSDATE USER " "CURRENT_TIME CURRENT_TIMESTAMP".split()
)
class RAW(sqltypes._Binary):
__visit_name__ = "RAW"
OracleRaw = RAW
class NCLOB(sqltypes.Text):
__visit_name__ = "NCLOB"
class VARCHAR2(VARCHAR):
__visit_name__ = "VARCHAR2"
NVARCHAR2 = NVARCHAR
class NUMBER(sqltypes.Numeric, sqltypes.Integer):
__visit_name__ = "NUMBER"
def __init__(self, precision=None, scale=None, asdecimal=None):
if asdecimal is None:
asdecimal = bool(scale and scale > 0)
super(NUMBER, self).__init__(
precision=precision, scale=scale, asdecimal=asdecimal
)
def adapt(self, impltype):
ret = super(NUMBER, self).adapt(impltype)
# leave a hint for the DBAPI handler
ret._is_oracle_number = True
return ret
@property
def _type_affinity(self):
if bool(self.scale and self.scale > 0):
return sqltypes.Numeric
else:
return sqltypes.Integer
class DOUBLE_PRECISION(sqltypes.Float):
__visit_name__ = "DOUBLE_PRECISION"
class BINARY_DOUBLE(sqltypes.Float):
__visit_name__ = "BINARY_DOUBLE"
class BINARY_FLOAT(sqltypes.Float):
__visit_name__ = "BINARY_FLOAT"
class BFILE(sqltypes.LargeBinary):
__visit_name__ = "BFILE"
class LONG(sqltypes.Text):
__visit_name__ = "LONG"
class DATE(sqltypes.DateTime):
"""Provide the oracle DATE type.
This type has no special Python behavior, except that it subclasses
:class:`_types.DateTime`; this is to suit the fact that the Oracle
``DATE`` type supports a time value.
.. versionadded:: 0.9.4
"""
__visit_name__ = "DATE"
def _compare_type_affinity(self, other):
return other._type_affinity in (sqltypes.DateTime, sqltypes.Date)
class INTERVAL(sqltypes.TypeEngine):
__visit_name__ = "INTERVAL"
def __init__(self, day_precision=None, second_precision=None):
"""Construct an INTERVAL.
Note that only DAY TO SECOND intervals are currently supported.
This is due to a lack of support for YEAR TO MONTH intervals
within available DBAPIs (cx_oracle and zxjdbc).
:param day_precision: the day precision value. this is the number of
digits to store for the day field. Defaults to "2"
:param second_precision: the second precision value. this is the
number of digits to store for the fractional seconds field.
Defaults to "6".
"""
self.day_precision = day_precision
self.second_precision = second_precision
@classmethod
def _adapt_from_generic_interval(cls, interval):
return INTERVAL(
day_precision=interval.day_precision,
second_precision=interval.second_precision,
)
@property
def _type_affinity(self):
return sqltypes.Interval
class ROWID(sqltypes.TypeEngine):
"""Oracle ROWID type.
When used in a cast() or similar, generates ROWID.
"""
__visit_name__ = "ROWID"
class _OracleBoolean(sqltypes.Boolean):
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
colspecs = {
sqltypes.Boolean: _OracleBoolean,
sqltypes.Interval: INTERVAL,
sqltypes.DateTime: DATE,
}
ischema_names = {
"VARCHAR2": VARCHAR,
"NVARCHAR2": NVARCHAR,
"CHAR": CHAR,
"NCHAR": NCHAR,
"DATE": DATE,
"NUMBER": NUMBER,
"BLOB": BLOB,
"BFILE": BFILE,
"CLOB": CLOB,
"NCLOB": NCLOB,
"TIMESTAMP": TIMESTAMP,
"TIMESTAMP WITH TIME ZONE": TIMESTAMP,
"INTERVAL DAY TO SECOND": INTERVAL,
"RAW": RAW,
"FLOAT": FLOAT,
"DOUBLE PRECISION": DOUBLE_PRECISION,
"LONG": LONG,
"BINARY_DOUBLE": BINARY_DOUBLE,
"BINARY_FLOAT": BINARY_FLOAT,
}
class OracleTypeCompiler(compiler.GenericTypeCompiler):
# Note:
# Oracle DATE == DATETIME
# Oracle does not allow milliseconds in DATE
# Oracle does not support TIME columns
def visit_datetime(self, type_, **kw):
return self.visit_DATE(type_, **kw)
def visit_float(self, type_, **kw):
return self.visit_FLOAT(type_, **kw)
def visit_unicode(self, type_, **kw):
if self.dialect._use_nchar_for_unicode:
return self.visit_NVARCHAR2(type_, **kw)
else:
return self.visit_VARCHAR2(type_, **kw)
def visit_INTERVAL(self, type_, **kw):
return "INTERVAL DAY%s TO SECOND%s" % (
type_.day_precision is not None
and "(%d)" % type_.day_precision
or "",
type_.second_precision is not None
and "(%d)" % type_.second_precision
or "",
)
def visit_LONG(self, type_, **kw):
return "LONG"
def visit_TIMESTAMP(self, type_, **kw):
if type_.timezone:
return "TIMESTAMP WITH TIME ZONE"
else:
return "TIMESTAMP"
def visit_DOUBLE_PRECISION(self, type_, **kw):
return self._generate_numeric(type_, "DOUBLE PRECISION", **kw)
def visit_BINARY_DOUBLE(self, type_, **kw):
return self._generate_numeric(type_, "BINARY_DOUBLE", **kw)
def visit_BINARY_FLOAT(self, type_, **kw):
return self._generate_numeric(type_, "BINARY_FLOAT", **kw)
def visit_FLOAT(self, type_, **kw):
# don't support conversion between decimal/binary
# precision yet
kw["no_precision"] = True
return self._generate_numeric(type_, "FLOAT", **kw)
def visit_NUMBER(self, type_, **kw):
return self._generate_numeric(type_, "NUMBER", **kw)
def _generate_numeric(
self, type_, name, precision=None, scale=None, no_precision=False, **kw
):
if precision is None:
precision = type_.precision
if scale is None:
scale = getattr(type_, "scale", None)
if no_precision or precision is None:
return name
elif scale is None:
n = "%(name)s(%(precision)s)"
return n % {"name": name, "precision": precision}
else:
n = "%(name)s(%(precision)s, %(scale)s)"
return n % {"name": name, "precision": precision, "scale": scale}
def visit_string(self, type_, **kw):
return self.visit_VARCHAR2(type_, **kw)
def visit_VARCHAR2(self, type_, **kw):
return self._visit_varchar(type_, "", "2")
def visit_NVARCHAR2(self, type_, **kw):
return self._visit_varchar(type_, "N", "2")
visit_NVARCHAR = visit_NVARCHAR2
def visit_VARCHAR(self, type_, **kw):
return self._visit_varchar(type_, "", "")
def _visit_varchar(self, type_, n, num):
if not type_.length:
return "%(n)sVARCHAR%(two)s" % {"two": num, "n": n}
elif not n and self.dialect._supports_char_length:
varchar = "VARCHAR%(two)s(%(length)s CHAR)"
return varchar % {"length": type_.length, "two": num}
else:
varchar = "%(n)sVARCHAR%(two)s(%(length)s)"
return varchar % {"length": type_.length, "two": num, "n": n}
def visit_text(self, type_, **kw):
return self.visit_CLOB(type_, **kw)
def visit_unicode_text(self, type_, **kw):
if self.dialect._use_nchar_for_unicode:
return self.visit_NCLOB(type_, **kw)
else:
return self.visit_CLOB(type_, **kw)
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_, **kw)
def visit_big_integer(self, type_, **kw):
return self.visit_NUMBER(type_, precision=19, **kw)
def visit_boolean(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_RAW(self, type_, **kw):
if type_.length:
return "RAW(%(length)s)" % {"length": type_.length}
else:
return "RAW"
def visit_ROWID(self, type_, **kw):
return "ROWID"
class OracleCompiler(compiler.SQLCompiler):
"""Oracle compiler modifies the lexical structure of Select
statements to work under non-ANSI configured Oracle databases, if
the use_ansi flag is False.
"""
compound_keywords = util.update_copy(
compiler.SQLCompiler.compound_keywords,
{expression.CompoundSelect.EXCEPT: "MINUS"},
)
def __init__(self, *args, **kwargs):
self.__wheres = {}
self._quoted_bind_names = {}
super(OracleCompiler, self).__init__(*args, **kwargs)
def visit_mod_binary(self, binary, operator, **kw):
return "mod(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_char_length_func(self, fn, **kw):
return "LENGTH" + self.function_argspec(fn, **kw)
def visit_match_op_binary(self, binary, operator, **kw):
return "CONTAINS (%s, %s)" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_true(self, expr, **kw):
return "1"
def visit_false(self, expr, **kw):
return "0"
def get_cte_preamble(self, recursive):
return "WITH"
def get_select_hint_text(self, byfroms):
return " ".join("/*+ %s */" % text for table, text in byfroms.items())
def function_argspec(self, fn, **kw):
if len(fn.clauses) > 0 or fn.name.upper() not in NO_ARG_FNS:
return compiler.SQLCompiler.function_argspec(self, fn, **kw)
else:
return ""
def default_from(self):
"""Called when a ``SELECT`` statement has no froms,
and no ``FROM`` clause is to be appended.
The Oracle compiler tacks a "FROM DUAL" to the statement.
"""
return " FROM DUAL"
def visit_join(self, join, **kwargs):
if self.dialect.use_ansi:
return compiler.SQLCompiler.visit_join(self, join, **kwargs)
else:
kwargs["asfrom"] = True
if isinstance(join.right, expression.FromGrouping):
right = join.right.element
else:
right = join.right
return (
self.process(join.left, **kwargs)
+ ", "
+ self.process(right, **kwargs)
)
def _get_nonansi_join_whereclause(self, froms):
clauses = []
def visit_join(join):
if join.isouter:
# https://docs.oracle.com/database/121/SQLRF/queries006.htm#SQLRF52354
# "apply the outer join operator (+) to all columns of B in
# the join condition in the WHERE clause" - that is,
# unconditionally regardless of operator or the other side
def visit_binary(binary):
if isinstance(
binary.left, expression.ColumnClause
) and join.right.is_derived_from(binary.left.table):
binary.left = _OuterJoinColumn(binary.left)
elif isinstance(
binary.right, expression.ColumnClause
) and join.right.is_derived_from(binary.right.table):
binary.right = _OuterJoinColumn(binary.right)
clauses.append(
visitors.cloned_traverse(
join.onclause, {}, {"binary": visit_binary}
)
)
else:
clauses.append(join.onclause)
for j in join.left, join.right:
if isinstance(j, expression.Join):
visit_join(j)
elif isinstance(j, expression.FromGrouping):
visit_join(j.element)
for f in froms:
if isinstance(f, expression.Join):
visit_join(f)
if not clauses:
return None
else:
return sql.and_(*clauses)
def visit_outer_join_column(self, vc, **kw):
return self.process(vc.column, **kw) + "(+)"
def visit_sequence(self, seq, **kw):
return (
self.dialect.identifier_preparer.format_sequence(seq) + ".nextval"
)
def get_render_as_alias_suffix(self, alias_name_text):
"""Oracle doesn't like ``FROM table AS alias``"""
return " " + alias_name_text
def returning_clause(self, stmt, returning_cols):
columns = []
binds = []
for i, column in enumerate(
expression._select_iterables(returning_cols)
):
if self.isupdate and isinstance(column.server_default, Computed):
util.warn(
"Computed columns don't work with Oracle UPDATE "
"statements that use RETURNING; the value of the column "
"*before* the UPDATE takes place is returned. It is "
"advised to not use RETURNING with an Oracle computed "
"column. Consider setting implicit_returning to False on "
"the Table object in order to avoid implicit RETURNING "
"clauses from being generated for this Table."
)
if column.type._has_column_expression:
col_expr = column.type.column_expression(column)
else:
col_expr = column
outparam = sql.outparam("ret_%d" % i, type_=column.type)
self.binds[outparam.key] = outparam
binds.append(
self.bindparam_string(self._truncate_bindparam(outparam))
)
columns.append(self.process(col_expr, within_columns_clause=False))
self._add_to_result_map(
getattr(col_expr, "name", col_expr.anon_label),
getattr(col_expr, "name", col_expr.anon_label),
(
column,
getattr(column, "name", None),
getattr(column, "key", None),
),
column.type,
)
return "RETURNING " + ", ".join(columns) + " INTO " + ", ".join(binds)
def _TODO_visit_compound_select(self, select):
"""Need to determine how to get ``LIMIT``/``OFFSET`` into a
``UNION`` for Oracle.
"""
pass
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``rownum`` criterion.
"""
if not getattr(select, "_oracle_visit", None):
if not self.dialect.use_ansi:
froms = self._display_froms_for_select(
select, kwargs.get("asfrom", False)
)
whereclause = self._get_nonansi_join_whereclause(froms)
if whereclause is not None:
select = select.where(whereclause)
select._oracle_visit = True
limit_clause = select._limit_clause
offset_clause = select._offset_clause
if limit_clause is not None or offset_clause is not None:
# See http://www.oracle.com/technology/oramag/oracle/06-sep/\
# o56asktom.html
#
# Generalized form of an Oracle pagination query:
# select ... from (
# select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from
# ( select distinct ... where ... order by ...
# ) where ROWNUM <= :limit+:offset
# ) where ora_rn > :offset
# Outer select and "ROWNUM as ora_rn" can be dropped if
# limit=0
kwargs["select_wraps_for"] = select
select = select._generate()
select._oracle_visit = True
# Wrap the middle select and add the hint
limitselect = sql.select([c for c in select.c])
if (
limit_clause is not None
and self.dialect.optimize_limits
and select._simple_int_limit
):
limitselect = limitselect.prefix_with(
"/*+ FIRST_ROWS(%d) */" % select._limit
)
limitselect._oracle_visit = True
limitselect._is_wrapper = True
# add expressions to accommodate FOR UPDATE OF
for_update = select._for_update_arg
if for_update is not None and for_update.of:
for_update = for_update._clone()
for_update._copy_internals()
for elem in for_update.of:
select.append_column(elem)
adapter = sql_util.ClauseAdapter(select)
for_update.of = [
adapter.traverse(elem) for elem in for_update.of
]
# If needed, add the limiting clause
if limit_clause is not None:
if not self.dialect.use_binds_for_limits:
# use simple int limits, will raise an exception
# if the limit isn't specified this way
max_row = select._limit
if offset_clause is not None:
max_row += select._offset
max_row = sql.literal_column("%d" % max_row)
else:
max_row = limit_clause
if offset_clause is not None:
max_row = max_row + offset_clause
limitselect.append_whereclause(
sql.literal_column("ROWNUM") <= max_row
)
# If needed, add the ora_rn, and wrap again with offset.
if offset_clause is None:
limitselect._for_update_arg = for_update
select = limitselect
else:
limitselect = limitselect.column(
sql.literal_column("ROWNUM").label("ora_rn")
)
limitselect._oracle_visit = True
limitselect._is_wrapper = True
offsetselect = sql.select(
[c for c in limitselect.c if c.key != "ora_rn"]
)
offsetselect._oracle_visit = True
offsetselect._is_wrapper = True
if for_update is not None and for_update.of:
for elem in for_update.of:
if limitselect.corresponding_column(elem) is None:
limitselect.append_column(elem)
if not self.dialect.use_binds_for_limits:
offset_clause = sql.literal_column(
"%d" % select._offset
)
offsetselect.append_whereclause(
sql.literal_column("ora_rn") > offset_clause
)
offsetselect._for_update_arg = for_update
select = offsetselect
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
def limit_clause(self, select, **kw):
return ""
def visit_empty_set_expr(self, type_):
return "SELECT 1 FROM DUAL WHERE 1!=1"
def for_update_clause(self, select, **kw):
if self.is_subquery():
return ""
tmp = " FOR UPDATE"
if select._for_update_arg.of:
tmp += " OF " + ", ".join(
self.process(elem, **kw) for elem in select._for_update_arg.of
)
if select._for_update_arg.nowait:
tmp += " NOWAIT"
if select._for_update_arg.skip_locked:
tmp += " SKIP LOCKED"
return tmp
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "DECODE(%s, %s, 0, 1) = 1" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_isnot_distinct_from_binary(self, binary, operator, **kw):
return "DECODE(%s, %s, 0, 1) = 0" % (
self.process(binary.left),
self.process(binary.right),
)
class OracleDDLCompiler(compiler.DDLCompiler):
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
# oracle has no ON UPDATE CASCADE -
# its only available via triggers
# http://asktom.oracle.com/tkyte/update_cascade/index.html
if constraint.onupdate is not None:
util.warn(
"Oracle does not contain native UPDATE CASCADE "
"functionality - onupdates will not be rendered for foreign "
"keys. Consider using deferrable=True, initially='deferred' "
"or triggers."
)
return text
def visit_drop_table_comment(self, drop):
return "COMMENT ON TABLE %s IS ''" % self.preparer.format_table(
drop.element
)
def visit_create_index(self, create):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
if index.dialect_options["oracle"]["bitmap"]:
text += "BITMAP "
text += "INDEX %s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=True),
preparer.format_table(index.table, use_schema=True),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
if index.dialect_options["oracle"]["compress"] is not False:
if index.dialect_options["oracle"]["compress"] is True:
text += " COMPRESS"
else:
text += " COMPRESS %d" % (
index.dialect_options["oracle"]["compress"]
)
return text
def post_create_table(self, table):
table_opts = []
opts = table.dialect_options["oracle"]
if opts["on_commit"]:
on_commit_options = opts["on_commit"].replace("_", " ").upper()
table_opts.append("\n ON COMMIT %s" % on_commit_options)
if opts["compress"]:
if opts["compress"] is True:
table_opts.append("\n COMPRESS")
else:
table_opts.append("\n COMPRESS FOR %s" % (opts["compress"]))
return "".join(table_opts)
def visit_computed_column(self, generated):
text = "GENERATED ALWAYS AS (%s)" % self.sql_compiler.process(
generated.sqltext, include_table=False, literal_binds=True
)
if generated.persisted is True:
raise exc.CompileError(
"Oracle computed columns do not support 'stored' persistence; "
"set the 'persisted' flag to None or False for Oracle support."
)
elif generated.persisted is False:
text += " VIRTUAL"
return text
class OracleIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = {x.lower() for x in RESERVED_WORDS}
illegal_initial_characters = {str(dig) for dig in range(0, 10)}.union(
["_", "$"]
)
def _bindparam_requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (
lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(util.text_type(value))
)
def format_savepoint(self, savepoint):
name = savepoint.ident.lstrip("_")
return super(OracleIdentifierPreparer, self).format_savepoint(
savepoint, name
)
class OracleExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
return self._execute_scalar(
"SELECT "
+ self.dialect.identifier_preparer.format_sequence(seq)
+ ".nextval FROM DUAL",
type_,
)
class OracleDialect(default.DefaultDialect):
name = "oracle"
supports_alter = True
supports_unicode_statements = False
supports_unicode_binds = False
max_identifier_length = 30
supports_simple_order_by_label = False
cte_follows_insert = True
supports_sequences = True
sequences_optional = False
postfetch_lastrowid = False
default_paramstyle = "named"
colspecs = colspecs
ischema_names = ischema_names
requires_name_normalize = True
supports_comments = True
supports_default_values = False
supports_empty_insert = False
statement_compiler = OracleCompiler
ddl_compiler = OracleDDLCompiler
type_compiler = OracleTypeCompiler
preparer = OracleIdentifierPreparer
execution_ctx_cls = OracleExecutionContext
reflection_options = ("oracle_resolve_synonyms",)
_use_nchar_for_unicode = False
construct_arguments = [
(
sa_schema.Table,
{"resolve_synonyms": False, "on_commit": None, "compress": False},
),
(sa_schema.Index, {"bitmap": False, "compress": False}),
]
def __init__(
self,
use_ansi=True,
optimize_limits=False,
use_binds_for_limits=True,
use_nchar_for_unicode=False,
exclude_tablespaces=("SYSTEM", "SYSAUX"),
**kwargs
):
default.DefaultDialect.__init__(self, **kwargs)
self._use_nchar_for_unicode = use_nchar_for_unicode
self.use_ansi = use_ansi
self.optimize_limits = optimize_limits
self.use_binds_for_limits = use_binds_for_limits
self.exclude_tablespaces = exclude_tablespaces
def initialize(self, connection):
super(OracleDialect, self).initialize(connection)
self.implicit_returning = self.__dict__.get(
"implicit_returning", self.server_version_info > (10,)
)
if self._is_oracle_8:
self.colspecs = self.colspecs.copy()
self.colspecs.pop(sqltypes.Interval)
self.use_ansi = False
def _get_effective_compat_server_version_info(self, connection):
# dialect does not need compat levels below 12.2, so don't query
# in those cases
if self.server_version_info < (12, 2):
return self.server_version_info
try:
compat = connection.execute(
"SELECT value FROM v$parameter WHERE name = 'compatible'"
).scalar()
except exc.DBAPIError:
compat = None
if compat:
try:
return tuple(int(x) for x in compat.split("."))
except:
return self.server_version_info
else:
return self.server_version_info
@property
def _is_oracle_8(self):
return self.server_version_info and self.server_version_info < (9,)
@property
def _supports_table_compression(self):
return self.server_version_info and self.server_version_info >= (10, 1)
@property
def _supports_table_compress_for(self):
return self.server_version_info and self.server_version_info >= (11,)
@property
def _supports_char_length(self):
return not self._is_oracle_8
def do_release_savepoint(self, connection, name):
# Oracle does not support RELEASE SAVEPOINT
pass
def _check_max_identifier_length(self, connection):
if self._get_effective_compat_server_version_info(connection) >= (
12,
2,
):
util.warn(
"Oracle version %r is known to have a maximum "
"identifier length of 128, rather than the historical default "
"of 30. SQLAlchemy 1.4 will use 128 for this "
"database; please set max_identifier_length=128 "
"in create_engine() in order to "
"test the application with this new length, or set to 30 in "
"order to assure that 30 continues to be used. "
"In particular, pay close attention to the behavior of "
"database migrations as dynamically generated names may "
"change. See the section 'Max Identifier Lengths' in the "
"SQLAlchemy Oracle dialect documentation for background."
% ((self.server_version_info,))
)
# use the default
return None
def _check_unicode_returns(self, connection):
additional_tests = [
expression.cast(
expression.literal_column("'test nvarchar2 returns'"),
sqltypes.NVARCHAR(60),
)
]
return super(OracleDialect, self)._check_unicode_returns(
connection, additional_tests
)
_isolation_lookup = ["READ COMMITTED"]
def get_isolation_level(self, connection):
return "READ COMMITTED"
def set_isolation_level(self, connection, level):
# prior to adding AUTOCOMMIT support for cx_Oracle, the Oracle dialect
# had no notion of setting the isolation level. As Oracle
# does not have a straightforward way of getting the isolation level
# if a server-side transaction is not yet in progress, we currently
# hardcode to only support "READ COMMITTED" and "AUTOCOMMIT" at the
# cx_oracle level. See #5200.
pass
def has_table(self, connection, table_name, schema=None):
if not schema:
schema = self.default_schema_name
cursor = connection.execute(
sql.text(
"SELECT table_name FROM all_tables "
"WHERE table_name = :name AND owner = :schema_name"
),
name=self.denormalize_name(table_name),
schema_name=self.denormalize_name(schema),
)
return cursor.first() is not None
def has_sequence(self, connection, sequence_name, schema=None):
if not schema:
schema = self.default_schema_name
cursor = connection.execute(
sql.text(
"SELECT sequence_name FROM all_sequences "
"WHERE sequence_name = :name AND "
"sequence_owner = :schema_name"
),
name=self.denormalize_name(sequence_name),
schema_name=self.denormalize_name(schema),
)
return cursor.first() is not None
def _get_default_schema_name(self, connection):
return self.normalize_name(
connection.execute("SELECT USER FROM DUAL").scalar()
)
def _resolve_synonym(
self,
connection,
desired_owner=None,
desired_synonym=None,
desired_table=None,
):
"""search for a local synonym matching the given desired owner/name.
if desired_owner is None, attempts to locate a distinct owner.
returns the actual name, owner, dblink name, and synonym name if
found.
"""
q = (
"SELECT owner, table_owner, table_name, db_link, "
"synonym_name FROM all_synonyms WHERE "
)
clauses = []
params = {}
if desired_synonym:
clauses.append("synonym_name = :synonym_name")
params["synonym_name"] = desired_synonym
if desired_owner:
clauses.append("owner = :desired_owner")
params["desired_owner"] = desired_owner
if desired_table:
clauses.append("table_name = :tname")
params["tname"] = desired_table
q += " AND ".join(clauses)
result = connection.execute(sql.text(q), **params)
if desired_owner:
row = result.first()
if row:
return (
row["table_name"],
row["table_owner"],
row["db_link"],
row["synonym_name"],
)
else:
return None, None, None, None
else:
rows = result.fetchall()
if len(rows) > 1:
raise AssertionError(
"There are multiple tables visible to the schema, you "
"must specify owner"
)
elif len(rows) == 1:
row = rows[0]
return (
row["table_name"],
row["table_owner"],
row["db_link"],
row["synonym_name"],
)
else:
return None, None, None, None
@reflection.cache
def _prepare_reflection_args(
self,
connection,
table_name,
schema=None,
resolve_synonyms=False,
dblink="",
**kw
):
if resolve_synonyms:
actual_name, owner, dblink, synonym = self._resolve_synonym(
connection,
desired_owner=self.denormalize_name(schema),
desired_synonym=self.denormalize_name(table_name),
)
else:
actual_name, owner, dblink, synonym = None, None, None, None
if not actual_name:
actual_name = self.denormalize_name(table_name)
if dblink:
# using user_db_links here since all_db_links appears
# to have more restricted permissions.
# http://docs.oracle.com/cd/B28359_01/server.111/b28310/ds_admin005.htm
# will need to hear from more users if we are doing
# the right thing here. See [ticket:2619]
owner = connection.scalar(
sql.text(
"SELECT username FROM user_db_links " "WHERE db_link=:link"
),
link=dblink,
)
dblink = "@" + dblink
elif not owner:
owner = self.denormalize_name(schema or self.default_schema_name)
return (actual_name, owner, dblink or "", synonym)
@reflection.cache
def get_schema_names(self, connection, **kw):
s = "SELECT username FROM all_users ORDER BY username"
cursor = connection.execute(s)
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
schema = self.denormalize_name(schema or self.default_schema_name)
# note that table_names() isn't loading DBLINKed or synonym'ed tables
if schema is None:
schema = self.default_schema_name
sql_str = "SELECT table_name FROM all_tables WHERE "
if self.exclude_tablespaces:
sql_str += (
"nvl(tablespace_name, 'no tablespace') "
"NOT IN (%s) AND "
% (", ".join(["'%s'" % ts for ts in self.exclude_tablespaces]))
)
sql_str += (
"OWNER = :owner " "AND IOT_NAME IS NULL " "AND DURATION IS NULL"
)
cursor = connection.execute(sql.text(sql_str), owner=schema)
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_temp_table_names(self, connection, **kw):
schema = self.denormalize_name(self.default_schema_name)
sql_str = "SELECT table_name FROM all_tables WHERE "
if self.exclude_tablespaces:
sql_str += (
"nvl(tablespace_name, 'no tablespace') "
"NOT IN (%s) AND "
% (", ".join(["'%s'" % ts for ts in self.exclude_tablespaces]))
)
sql_str += (
"OWNER = :owner "
"AND IOT_NAME IS NULL "
"AND DURATION IS NOT NULL"
)
cursor = connection.execute(sql.text(sql_str), owner=schema)
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
schema = self.denormalize_name(schema or self.default_schema_name)
s = sql.text("SELECT view_name FROM all_views WHERE owner = :owner")
cursor = connection.execute(s, owner=self.denormalize_name(schema))
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_table_options(self, connection, table_name, schema=None, **kw):
options = {}
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
params = {"table_name": table_name}
columns = ["table_name"]
if self._supports_table_compression:
columns.append("compression")
if self._supports_table_compress_for:
columns.append("compress_for")
text = (
"SELECT %(columns)s "
"FROM ALL_TABLES%(dblink)s "
"WHERE table_name = :table_name"
)
if schema is not None:
params["owner"] = schema
text += " AND owner = :owner "
text = text % {"dblink": dblink, "columns": ", ".join(columns)}
result = connection.execute(sql.text(text), **params)
enabled = dict(DISABLED=False, ENABLED=True)
row = result.first()
if row:
if "compression" in row and enabled.get(row.compression, False):
if "compress_for" in row:
options["oracle_compress"] = row.compress_for
else:
options["oracle_compress"] = True
return options
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
"""
kw arguments can be:
oracle_resolve_synonyms
dblink
"""
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
columns = []
if self._supports_char_length:
char_length_col = "char_length"
else:
char_length_col = "data_length"
params = {"table_name": table_name}
text = """
SELECT col.column_name, col.data_type, col.%(char_length_col)s,
col.data_precision, col.data_scale, col.nullable,
col.data_default, com.comments, col.virtual_column\
FROM all_tab_cols%(dblink)s col
LEFT JOIN all_col_comments%(dblink)s com
ON col.table_name = com.table_name
AND col.column_name = com.column_name
AND col.owner = com.owner
WHERE col.table_name = :table_name
AND col.hidden_column = 'NO'
"""
if schema is not None:
params["owner"] = schema
text += " AND col.owner = :owner "
text += " ORDER BY col.column_id"
text = text % {"dblink": dblink, "char_length_col": char_length_col}
c = connection.execute(sql.text(text), **params)
for row in c:
colname = self.normalize_name(row[0])
orig_colname = row[0]
coltype = row[1]
length = row[2]
precision = row[3]
scale = row[4]
nullable = row[5] == "Y"
default = row[6]
comment = row[7]
generated = row[8]
if coltype == "NUMBER":
if precision is None and scale == 0:
coltype = INTEGER()
else:
coltype = NUMBER(precision, scale)
elif coltype == "FLOAT":
# TODO: support "precision" here as "binary_precision"
coltype = FLOAT()
elif coltype in ("VARCHAR2", "NVARCHAR2", "CHAR", "NCHAR"):
coltype = self.ischema_names.get(coltype)(length)
elif "WITH TIME ZONE" in coltype:
coltype = TIMESTAMP(timezone=True)
else:
coltype = re.sub(r"\(\d+\)", "", coltype)
try:
coltype = self.ischema_names[coltype]
except KeyError:
util.warn(
"Did not recognize type '%s' of column '%s'"
% (coltype, colname)
)
coltype = sqltypes.NULLTYPE
if generated == "YES":
computed = dict(sqltext=default)
default = None
else:
computed = None
cdict = {
"name": colname,
"type": coltype,
"nullable": nullable,
"default": default,
"autoincrement": "auto",
"comment": comment,
}
if orig_colname.lower() == orig_colname:
cdict["quote"] = True
if computed is not None:
cdict["computed"] = computed
columns.append(cdict)
return columns
@reflection.cache
def get_table_comment(
self,
connection,
table_name,
schema=None,
resolve_synonyms=False,
dblink="",
**kw
):
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
if not schema:
schema = self.default_schema_name
COMMENT_SQL = """
SELECT comments
FROM all_tab_comments
WHERE table_name = :table_name AND owner = :schema_name
"""
c = connection.execute(
sql.text(COMMENT_SQL), table_name=table_name, schema_name=schema
)
return {"text": c.scalar()}
@reflection.cache
def get_indexes(
self,
connection,
table_name,
schema=None,
resolve_synonyms=False,
dblink="",
**kw
):
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
indexes = []
params = {"table_name": table_name}
text = (
"SELECT a.index_name, a.column_name, "
"\nb.index_type, b.uniqueness, b.compression, b.prefix_length "
"\nFROM ALL_IND_COLUMNS%(dblink)s a, "
"\nALL_INDEXES%(dblink)s b "
"\nWHERE "
"\na.index_name = b.index_name "
"\nAND a.table_owner = b.table_owner "
"\nAND a.table_name = b.table_name "
"\nAND a.table_name = :table_name "
)
if schema is not None:
params["schema"] = schema
text += "AND a.table_owner = :schema "
text += "ORDER BY a.index_name, a.column_position"
text = text % {"dblink": dblink}
q = sql.text(text)
rp = connection.execute(q, **params)
indexes = []
last_index_name = None
pk_constraint = self.get_pk_constraint(
connection,
table_name,
schema,
resolve_synonyms=resolve_synonyms,
dblink=dblink,
info_cache=kw.get("info_cache"),
)
uniqueness = dict(NONUNIQUE=False, UNIQUE=True)
enabled = dict(DISABLED=False, ENABLED=True)
oracle_sys_col = re.compile(r"SYS_NC\d+\$", re.IGNORECASE)
index = None
for rset in rp:
index_name_normalized = self.normalize_name(rset.index_name)
# skip primary key index. This is refined as of
# [ticket:5421]. Note that ALL_INDEXES.GENERATED will by "Y"
# if the name of this index was generated by Oracle, however
# if a named primary key constraint was created then this flag
# is false.
if (
pk_constraint
and index_name_normalized == pk_constraint["name"]
):
continue
if rset.index_name != last_index_name:
index = dict(
name=index_name_normalized,
column_names=[],
dialect_options={},
)
indexes.append(index)
index["unique"] = uniqueness.get(rset.uniqueness, False)
if rset.index_type in ("BITMAP", "FUNCTION-BASED BITMAP"):
index["dialect_options"]["oracle_bitmap"] = True
if enabled.get(rset.compression, False):
index["dialect_options"][
"oracle_compress"
] = rset.prefix_length
# filter out Oracle SYS_NC names. could also do an outer join
# to the all_tab_columns table and check for real col names there.
if not oracle_sys_col.match(rset.column_name):
index["column_names"].append(
self.normalize_name(rset.column_name)
)
last_index_name = rset.index_name
return indexes
@reflection.cache
def _get_constraint_data(
self, connection, table_name, schema=None, dblink="", **kw
):
params = {"table_name": table_name}
text = (
"SELECT"
"\nac.constraint_name," # 0
"\nac.constraint_type," # 1
"\nloc.column_name AS local_column," # 2
"\nrem.table_name AS remote_table," # 3
"\nrem.column_name AS remote_column," # 4
"\nrem.owner AS remote_owner," # 5
"\nloc.position as loc_pos," # 6
"\nrem.position as rem_pos," # 7
"\nac.search_condition," # 8
"\nac.delete_rule" # 9
"\nFROM all_constraints%(dblink)s ac,"
"\nall_cons_columns%(dblink)s loc,"
"\nall_cons_columns%(dblink)s rem"
"\nWHERE ac.table_name = :table_name"
"\nAND ac.constraint_type IN ('R','P', 'U', 'C')"
)
if schema is not None:
params["owner"] = schema
text += "\nAND ac.owner = :owner"
text += (
"\nAND ac.owner = loc.owner"
"\nAND ac.constraint_name = loc.constraint_name"
"\nAND ac.r_owner = rem.owner(+)"
"\nAND ac.r_constraint_name = rem.constraint_name(+)"
"\nAND (rem.position IS NULL or loc.position=rem.position)"
"\nORDER BY ac.constraint_name, loc.position"
)
text = text % {"dblink": dblink}
rp = connection.execute(sql.text(text), **params)
constraint_data = rp.fetchall()
return constraint_data
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
pkeys = []
constraint_name = None
constraint_data = self._get_constraint_data(
connection,
table_name,
schema,
dblink,
info_cache=kw.get("info_cache"),
)
for row in constraint_data:
(
cons_name,
cons_type,
local_column,
remote_table,
remote_column,
remote_owner,
) = row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
if cons_type == "P":
if constraint_name is None:
constraint_name = self.normalize_name(cons_name)
pkeys.append(local_column)
return {"constrained_columns": pkeys, "name": constraint_name}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""
kw arguments can be:
oracle_resolve_synonyms
dblink
"""
requested_schema = schema # to check later on
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
constraint_data = self._get_constraint_data(
connection,
table_name,
schema,
dblink,
info_cache=kw.get("info_cache"),
)
def fkey_rec():
return {
"name": None,
"constrained_columns": [],
"referred_schema": None,
"referred_table": None,
"referred_columns": [],
"options": {},
}
fkeys = util.defaultdict(fkey_rec)
for row in constraint_data:
(
cons_name,
cons_type,
local_column,
remote_table,
remote_column,
remote_owner,
) = row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
cons_name = self.normalize_name(cons_name)
if cons_type == "R":
if remote_table is None:
# ticket 363
util.warn(
(
"Got 'None' querying 'table_name' from "
"all_cons_columns%(dblink)s - does the user have "
"proper rights to the table?"
)
% {"dblink": dblink}
)
continue
rec = fkeys[cons_name]
rec["name"] = cons_name
local_cols, remote_cols = (
rec["constrained_columns"],
rec["referred_columns"],
)
if not rec["referred_table"]:
if resolve_synonyms:
(
ref_remote_name,
ref_remote_owner,
ref_dblink,
ref_synonym,
) = self._resolve_synonym(
connection,
desired_owner=self.denormalize_name(remote_owner),
desired_table=self.denormalize_name(remote_table),
)
if ref_synonym:
remote_table = self.normalize_name(ref_synonym)
remote_owner = self.normalize_name(
ref_remote_owner
)
rec["referred_table"] = remote_table
if (
requested_schema is not None
or self.denormalize_name(remote_owner) != schema
):
rec["referred_schema"] = remote_owner
if row[9] != "NO ACTION":
rec["options"]["ondelete"] = row[9]
local_cols.append(local_column)
remote_cols.append(remote_column)
return list(fkeys.values())
@reflection.cache
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
constraint_data = self._get_constraint_data(
connection,
table_name,
schema,
dblink,
info_cache=kw.get("info_cache"),
)
unique_keys = filter(lambda x: x[1] == "U", constraint_data)
uniques_group = groupby(unique_keys, lambda x: x[0])
index_names = {
ix["name"]
for ix in self.get_indexes(connection, table_name, schema=schema)
}
return [
{
"name": name,
"column_names": cols,
"duplicates_index": name if name in index_names else None,
}
for name, cols in [
[
self.normalize_name(i[0]),
[self.normalize_name(x[2]) for x in i[1]],
]
for i in uniques_group
]
]
@reflection.cache
def get_view_definition(
self,
connection,
view_name,
schema=None,
resolve_synonyms=False,
dblink="",
**kw
):
info_cache = kw.get("info_cache")
(view_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
view_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
params = {"view_name": view_name}
text = "SELECT text FROM all_views WHERE view_name=:view_name"
if schema is not None:
text += " AND owner = :schema"
params["schema"] = schema
rp = connection.execute(sql.text(text), **params).scalar()
if rp:
if util.py2k:
rp = rp.decode(self.encoding)
return rp
else:
return None
@reflection.cache
def get_check_constraints(
self, connection, table_name, schema=None, include_all=False, **kw
):
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
constraint_data = self._get_constraint_data(
connection,
table_name,
schema,
dblink,
info_cache=kw.get("info_cache"),
)
check_constraints = filter(lambda x: x[1] == "C", constraint_data)
return [
{"name": self.normalize_name(cons[0]), "sqltext": cons[8]}
for cons in check_constraints
if include_all or not re.match(r"..+?. IS NOT NULL$", cons[8])
]
class _OuterJoinColumn(sql.ClauseElement):
__visit_name__ = "outer_join_column"
def __init__(self, column):
self.column = column
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/sqlite/pysqlcipher.py
|
# sqlite/pysqlcipher.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sqlite+pysqlcipher
:name: pysqlcipher
:dbapi: pysqlcipher
:connectstring: sqlite+pysqlcipher://:passphrase/file_path[?kdf_iter=<iter>]
:url: https://pypi.python.org/pypi/pysqlcipher
``pysqlcipher`` is a fork of the standard ``pysqlite`` driver to make
use of the `SQLCipher <https://www.zetetic.net/sqlcipher>`_ backend.
``pysqlcipher3`` is a fork of ``pysqlcipher`` for Python 3. This dialect
will attempt to import it if ``pysqlcipher`` is non-present.
.. versionadded:: 1.1.4 - added fallback import for pysqlcipher3
.. versionadded:: 0.9.9 - added pysqlcipher dialect
Driver
------
The driver here is the
`pysqlcipher <https://pypi.python.org/pypi/pysqlcipher>`_
driver, which makes use of the SQLCipher engine. This system essentially
introduces new PRAGMA commands to SQLite which allows the setting of a
passphrase and other encryption parameters, allowing the database
file to be encrypted.
`pysqlcipher3` is a fork of `pysqlcipher` with support for Python 3,
the driver is the same.
Connect Strings
---------------
The format of the connect string is in every way the same as that
of the :mod:`~sqlalchemy.dialects.sqlite.pysqlite` driver, except that the
"password" field is now accepted, which should contain a passphrase::
e = create_engine('sqlite+pysqlcipher://:testing@/foo.db')
For an absolute file path, two leading slashes should be used for the
database name::
e = create_engine('sqlite+pysqlcipher://:testing@//path/to/foo.db')
A selection of additional encryption-related pragmas supported by SQLCipher
as documented at https://www.zetetic.net/sqlcipher/sqlcipher-api/ can be passed
in the query string, and will result in that PRAGMA being called for each
new connection. Currently, ``cipher``, ``kdf_iter``
``cipher_page_size`` and ``cipher_use_hmac`` are supported::
e = create_engine('sqlite+pysqlcipher://:testing@/foo.db?cipher=aes-256-cfb&kdf_iter=64000')
Pooling Behavior
----------------
The driver makes a change to the default pool behavior of pysqlite
as described in :ref:`pysqlite_threading_pooling`. The pysqlcipher driver
has been observed to be significantly slower on connection than the
pysqlite driver, most likely due to the encryption overhead, so the
dialect here defaults to using the :class:`.SingletonThreadPool`
implementation,
instead of the :class:`.NullPool` pool used by pysqlite. As always, the pool
implementation is entirely configurable using the
:paramref:`_sa.create_engine.poolclass` parameter; the :class:`.StaticPool`
may
be more feasible for single-threaded use, or :class:`.NullPool` may be used
to prevent unencrypted connections from being held open for long periods of
time, at the expense of slower startup time for new connections.
""" # noqa
from __future__ import absolute_import
from .pysqlite import SQLiteDialect_pysqlite
from ... import pool
from ...engine import url as _url
class SQLiteDialect_pysqlcipher(SQLiteDialect_pysqlite):
driver = "pysqlcipher"
pragmas = ("kdf_iter", "cipher", "cipher_page_size", "cipher_use_hmac")
@classmethod
def dbapi(cls):
try:
from pysqlcipher import dbapi2 as sqlcipher
except ImportError as e:
try:
from pysqlcipher3 import dbapi2 as sqlcipher
except ImportError:
raise e
return sqlcipher
@classmethod
def get_pool_class(cls, url):
return pool.SingletonThreadPool
def connect(self, *cargs, **cparams):
passphrase = cparams.pop("passphrase", "")
pragmas = dict((key, cparams.pop(key, None)) for key in self.pragmas)
conn = super(SQLiteDialect_pysqlcipher, self).connect(
*cargs, **cparams
)
conn.execute('pragma key="%s"' % passphrase)
for prag, value in pragmas.items():
if value is not None:
conn.execute('pragma %s="%s"' % (prag, value))
return conn
def create_connect_args(self, url):
super_url = _url.URL(
url.drivername,
username=url.username,
host=url.host,
database=url.database,
query=url.query,
)
c_args, opts = super(
SQLiteDialect_pysqlcipher, self
).create_connect_args(super_url)
opts["passphrase"] = url.password
return c_args, opts
dialect = SQLiteDialect_pysqlcipher
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/sqlite/__init__.py
|
# sqlite/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base # noqa
from . import pysqlcipher # noqa
from . import pysqlite # noqa
from .base import BLOB
from .base import BOOLEAN
from .base import CHAR
from .base import DATE
from .base import DATETIME
from .base import DECIMAL
from .base import FLOAT
from .base import INTEGER
from .base import JSON
from .base import NUMERIC
from .base import REAL
from .base import SMALLINT
from .base import TEXT
from .base import TIME
from .base import TIMESTAMP
from .base import VARCHAR
# default dialect
base.dialect = dialect = pysqlite.dialect
__all__ = (
"BLOB",
"BOOLEAN",
"CHAR",
"DATE",
"DATETIME",
"DECIMAL",
"FLOAT",
"INTEGER",
"JSON",
"NUMERIC",
"SMALLINT",
"TEXT",
"TIME",
"TIMESTAMP",
"VARCHAR",
"REAL",
"dialect",
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/sqlite/provision.py
|
import os
from ...engine import url as sa_url
from ...testing.provision import create_db
from ...testing.provision import drop_db
from ...testing.provision import follower_url_from_main
from ...testing.provision import log
from ...testing.provision import post_configure_engine
from ...testing.provision import run_reap_dbs
from ...testing.provision import temp_table_keyword_args
@follower_url_from_main.for_db("sqlite")
def _sqlite_follower_url_from_main(url, ident):
url = sa_url.make_url(url)
if not url.database or url.database == ":memory:":
return url
else:
return sa_url.make_url("sqlite:///%s.db" % ident)
@post_configure_engine.for_db("sqlite")
def _sqlite_post_configure_engine(url, engine, follower_ident):
from sqlalchemy import event
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
# use file DBs in all cases, memory acts kind of strangely
# as an attached
if not follower_ident:
# note this test_schema.db gets created for all test runs.
# there's not any dedicated cleanup step for it. it in some
# ways corresponds to the "test.test_schema" schema that's
# expected to be already present, so for now it just stays
# in a given checkout directory.
dbapi_connection.execute(
'ATTACH DATABASE "test_schema.db" AS test_schema'
)
else:
dbapi_connection.execute(
'ATTACH DATABASE "%s_test_schema.db" AS test_schema'
% follower_ident
)
@create_db.for_db("sqlite")
def _sqlite_create_db(cfg, eng, ident):
pass
@drop_db.for_db("sqlite")
def _sqlite_drop_db(cfg, eng, ident):
for path in ["%s.db" % ident, "%s_test_schema.db" % ident]:
if os.path.exists(path):
log.info("deleting SQLite database file: %s" % path)
os.remove(path)
@temp_table_keyword_args.for_db("sqlite")
def _sqlite_temp_table_keyword_args(cfg, eng):
return {"prefixes": ["TEMPORARY"]}
@run_reap_dbs.for_db("sqlite")
def _reap_sqlite_dbs(url, idents):
log.info("db reaper connecting to %r", url)
log.info("identifiers in file: %s", ", ".join(idents))
for ident in idents:
# we don't have a config so we can't call _sqlite_drop_db due to the
# decorator
for path in ["%s.db" % ident, "%s_test_schema.db" % ident]:
if os.path.exists(path):
log.info("deleting SQLite database file: %s" % path)
os.remove(path)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/sqlite/json.py
|
from ... import types as sqltypes
class JSON(sqltypes.JSON):
"""SQLite JSON type.
SQLite supports JSON as of version 3.9 through its JSON1_ extension. Note
that JSON1_ is a
`loadable extension <https://www.sqlite.org/loadext.html>`_ and as such
may not be available, or may require run-time loading.
The :class:`_sqlite.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`_types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function wrapped in the ``JSON_QUOTE`` function at the database level.
Extracted values are quoted in order to ensure that the results are
always JSON string values.
.. versionadded:: 1.3
.. seealso::
JSON1_
.. _JSON1: https://www.sqlite.org/json1.html
"""
# Note: these objects currently match exactly those of MySQL, however since
# these are not generalizable to all JSON implementations, remain separately
# implemented for each dialect.
class _FormatTypeMixin(object):
def _format_value(self, value):
raise NotImplementedError()
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
def _format_value(self, value):
if isinstance(value, int):
value = "$[%s]" % value
else:
value = '$."%s"' % value
return value
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
def _format_value(self, value):
return "$%s" % (
"".join(
[
"[%s]" % elem if isinstance(elem, int) else '."%s"' % elem
for elem in value
]
)
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/sqlite/base.py
|
# sqlite/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: sqlite
:name: SQLite
.. _sqlite_datetime:
Date and Time Types
-------------------
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does
not provide out of the box functionality for translating values between Python
`datetime` objects and a SQLite-supported format. SQLAlchemy's own
:class:`~sqlalchemy.types.DateTime` and related types provide date formatting
and parsing functionality when SQLite is used. The implementation classes are
:class:`_sqlite.DATETIME`, :class:`_sqlite.DATE` and :class:`_sqlite.TIME`.
These types represent dates and times as ISO formatted strings, which also
nicely support ordering. There's no reliance on typical "libc" internals for
these functions so historical dates are fully supported.
Ensuring Text affinity
^^^^^^^^^^^^^^^^^^^^^^
The DDL rendered for these types is the standard ``DATE``, ``TIME``
and ``DATETIME`` indicators. However, custom storage formats can also be
applied to these types. When the
storage format is detected as containing no alpha characters, the DDL for
these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``,
so that the column continues to have textual affinity.
.. seealso::
`Type Affinity <http://www.sqlite.org/datatype3.html#affinity>`_ -
in the SQLite documentation
.. _sqlite_autoincrement:
SQLite Auto Incrementing Behavior
----------------------------------
Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html
Key concepts:
* SQLite has an implicit "auto increment" feature that takes place for any
non-composite primary-key column that is specifically created using
"INTEGER PRIMARY KEY" for the type + primary key.
* SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not**
equivalent to the implicit autoincrement feature; this keyword is not
recommended for general use. SQLAlchemy does not render this keyword
unless a special SQLite-specific directive is used (see below). However,
it still requires that the column's type is named "INTEGER".
Using the AUTOINCREMENT Keyword
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To specifically render the AUTOINCREMENT keyword on the primary key column
when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
construct::
Table('sometable', metadata,
Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
SQLite's typing model is based on naming conventions. Among other things, this
means that any type name which contains the substring ``"INT"`` will be
determined to be of "integer affinity". A type named ``"BIGINT"``,
``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by SQLite to be
of "integer" affinity. However, **the SQLite autoincrement feature, whether
implicitly or explicitly enabled, requires that the name of the column's type
is exactly the string "INTEGER"**. Therefore, if an application uses a type
like :class:`.BigInteger` for a primary key, on SQLite this type will need to
be rendered as the name ``"INTEGER"`` when emitting the initial ``CREATE
TABLE`` statement in order for the autoincrement behavior to be available.
One approach to achieve this is to use :class:`.Integer` on SQLite
only using :meth:`.TypeEngine.with_variant`::
table = Table(
"my_table", metadata,
Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True)
)
Another is to use a subclass of :class:`.BigInteger` that overrides its DDL
name to be ``INTEGER`` when compiled against SQLite::
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
class SLBigInteger(BigInteger):
pass
@compiles(SLBigInteger, 'sqlite')
def bi_c(element, compiler, **kw):
return "INTEGER"
@compiles(SLBigInteger)
def bi_c(element, compiler, **kw):
return compiler.visit_BIGINT(element, **kw)
table = Table(
"my_table", metadata,
Column("id", SLBigInteger(), primary_key=True)
)
.. seealso::
:meth:`.TypeEngine.with_variant`
:ref:`sqlalchemy.ext.compiler_toplevel`
`Datatypes In SQLite Version 3 <http://sqlite.org/datatype3.html>`_
.. _sqlite_concurrency:
Database Locking Behavior / Concurrency
---------------------------------------
SQLite is not designed for a high level of write concurrency. The database
itself, being a file, is locked completely during write operations within
transactions, meaning exactly one "connection" (in reality a file handle)
has exclusive access to the database during this period - all other
"connections" will be blocked during this time.
The Python DBAPI specification also calls for a connection model that is
always in a transaction; there is no ``connection.begin()`` method,
only ``connection.commit()`` and ``connection.rollback()``, upon which a
new transaction is to be begun immediately. This may seem to imply
that the SQLite driver would in theory allow only a single filehandle on a
particular database file at any time; however, there are several
factors both within SQLite itself as well as within the pysqlite driver
which loosen this restriction significantly.
However, no matter what locking modes are used, SQLite will still always
lock the database file once a transaction is started and DML (e.g. INSERT,
UPDATE, DELETE) has at least been emitted, and this will block
other transactions at least at the point that they also attempt to emit DML.
By default, the length of time on this block is very short before it times out
with an error.
This behavior becomes more critical when used in conjunction with the
SQLAlchemy ORM. SQLAlchemy's :class:`.Session` object by default runs
within a transaction, and with its autoflush model, may emit DML preceding
any SELECT statement. This may lead to a SQLite database that locks
more quickly than is expected. The locking mode of SQLite and the pysqlite
driver can be manipulated to some degree, however it should be noted that
achieving a high degree of write-concurrency with SQLite is a losing battle.
For more information on SQLite's lack of write concurrency by design, please
see
`Situations Where Another RDBMS May Work Better - High Concurrency
<http://www.sqlite.org/whentouse.html>`_ near the bottom of the page.
The following subsections introduce areas that are impacted by SQLite's
file-based architecture and additionally will usually require workarounds to
work when using the pysqlite driver.
.. _sqlite_isolation_level:
Transaction Isolation Level / Autocommit
----------------------------------------
SQLite supports "transaction isolation" in a non-standard way, along two
axes. One is that of the
`PRAGMA read_uncommitted <http://www.sqlite.org/pragma.html#pragma_read_uncommitted>`_
instruction. This setting can essentially switch SQLite between its
default mode of ``SERIALIZABLE`` isolation, and a "dirty read" isolation
mode normally referred to as ``READ UNCOMMITTED``.
SQLAlchemy ties into this PRAGMA statement using the
:paramref:`_sa.create_engine.isolation_level` parameter of
:func:`_sa.create_engine`.
Valid values for this parameter when used with SQLite are ``"SERIALIZABLE"``
and ``"READ UNCOMMITTED"`` corresponding to a value of 0 and 1, respectively.
SQLite defaults to ``SERIALIZABLE``, however its behavior is impacted by
the pysqlite driver's default behavior.
When using the pysqlite driver, the ``"AUTOCOMMIT"`` isolation level is also
available, which will alter the pysqlite connection using the ``.isolation_level``
attribute on the DBAPI connection and set it to None for the duration
of the setting.
.. versionadded:: 1.3.16 added support for SQLite AUTOCOMMIT isolation level
when using the pysqlite / sqlite3 SQLite driver.
The other axis along which SQLite's transactional locking is impacted is
via the nature of the ``BEGIN`` statement used. The three varieties
are "deferred", "immediate", and "exclusive", as described at
`BEGIN TRANSACTION <http://sqlite.org/lang_transaction.html>`_. A straight
``BEGIN`` statement uses the "deferred" mode, where the database file is
not locked until the first read or write operation, and read access remains
open to other transactions until the first write operation. But again,
it is critical to note that the pysqlite driver interferes with this behavior
by *not even emitting BEGIN* until the first write operation.
.. warning::
SQLite's transactional scope is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
SAVEPOINT Support
----------------------------
SQLite supports SAVEPOINTs, which only function once a transaction is
begun. SQLAlchemy's SAVEPOINT support is available using the
:meth:`_engine.Connection.begin_nested` method at the Core level, and
:meth:`.Session.begin_nested` at the ORM level. However, SAVEPOINTs
won't work at all with pysqlite unless workarounds are taken.
.. warning::
SQLite's SAVEPOINT feature is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
Transactional DDL
----------------------------
The SQLite database supports transactional :term:`DDL` as well.
In this case, the pysqlite driver is not only failing to start transactions,
it also is ending any existing transaction when DDL is detected, so again,
workarounds are required.
.. warning::
SQLite's transactional DDL is impacted by unresolved issues
in the pysqlite driver, which fails to emit BEGIN and additionally
forces a COMMIT to cancel any transaction when DDL is encountered.
See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
.. _sqlite_foreign_keys:
Foreign Key Support
-------------------
SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
however by default these constraints have no effect on the operation of the
table.
Constraint checking on SQLite has three prerequisites:
* At least version 3.6.19 of SQLite must be in use
* The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
or SQLITE_OMIT_TRIGGER symbols enabled.
* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all
connections before use.
SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for
new connections through the usage of events::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
.. warning::
When SQLite foreign keys are enabled, it is **not possible**
to emit CREATE or DROP statements for tables that contain
mutually-dependent foreign key constraints;
to emit the DDL for these tables requires that ALTER TABLE be used to
create or drop these constraints separately, for which SQLite has
no support.
.. seealso::
`SQLite Foreign Key Support <http://www.sqlite.org/foreignkeys.html>`_
- on the SQLite web site.
:ref:`event_toplevel` - SQLAlchemy event API.
:ref:`use_alter` - more information on SQLAlchemy's facilities for handling
mutually-dependent foreign key constraints.
.. _sqlite_on_conflict_ddl:
ON CONFLICT support for constraints
-----------------------------------
SQLite supports a non-standard clause known as ON CONFLICT which can be applied
to primary key, unique, check, and not null constraints. In DDL, it is
rendered either within the "CONSTRAINT" clause or within the column definition
itself depending on the location of the target constraint. To render this
clause within DDL, the extension parameter ``sqlite_on_conflict`` can be
specified with a string conflict resolution algorithm within the
:class:`.PrimaryKeyConstraint`, :class:`.UniqueConstraint`,
:class:`.CheckConstraint` objects. Within the :class:`_schema.Column` object,
there
are individual parameters ``sqlite_on_conflict_not_null``,
``sqlite_on_conflict_primary_key``, ``sqlite_on_conflict_unique`` which each
correspond to the three types of relevant constraint types that can be
indicated from a :class:`_schema.Column` object.
.. seealso::
`ON CONFLICT <https://www.sqlite.org/lang_conflict.html>`_ - in the SQLite
documentation
.. versionadded:: 1.3
The ``sqlite_on_conflict`` parameters accept a string argument which is just
the resolution name to be chosen, which on SQLite can be one of ROLLBACK,
ABORT, FAIL, IGNORE, and REPLACE. For example, to add a UNIQUE constraint
that specifies the IGNORE algorithm::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer),
UniqueConstraint('id', 'data', sqlite_on_conflict='IGNORE')
)
The above renders CREATE TABLE DDL as::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER,
PRIMARY KEY (id),
UNIQUE (id, data) ON CONFLICT IGNORE
)
When using the :paramref:`_schema.Column.unique`
flag to add a UNIQUE constraint
to a single column, the ``sqlite_on_conflict_unique`` parameter can
be added to the :class:`_schema.Column` as well, which will be added to the
UNIQUE constraint in the DDL::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer, unique=True,
sqlite_on_conflict_unique='IGNORE')
)
rendering::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER,
PRIMARY KEY (id),
UNIQUE (data) ON CONFLICT IGNORE
)
To apply the FAIL algorithm for a NOT NULL constraint,
``sqlite_on_conflict_not_null`` is used::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer, nullable=False,
sqlite_on_conflict_not_null='FAIL')
)
this renders the column inline ON CONFLICT phrase::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER NOT NULL ON CONFLICT FAIL,
PRIMARY KEY (id)
)
Similarly, for an inline primary key, use ``sqlite_on_conflict_primary_key``::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True,
sqlite_on_conflict_primary_key='FAIL')
)
SQLAlchemy renders the PRIMARY KEY constraint separately, so the conflict
resolution algorithm is applied to the constraint itself::
CREATE TABLE some_table (
id INTEGER NOT NULL,
PRIMARY KEY (id) ON CONFLICT FAIL
)
.. _sqlite_type_reflection:
Type Reflection
---------------
SQLite types are unlike those of most other database backends, in that
the string name of the type usually does not correspond to a "type" in a
one-to-one fashion. Instead, SQLite links per-column typing behavior
to one of five so-called "type affinities" based on a string matching
pattern for the type.
SQLAlchemy's reflection process, when inspecting types, uses a simple
lookup table to link the keywords returned to provided SQLAlchemy types.
This lookup table is present within the SQLite dialect as it is for all
other dialects. However, the SQLite dialect has a different "fallback"
routine for when a particular type name is not located in the lookup map;
it instead implements the SQLite "type affinity" scheme located at
http://www.sqlite.org/datatype3.html section 2.1.
The provided typemap will make direct associations from an exact string
name match for the following types:
:class:`_types.BIGINT`, :class:`_types.BLOB`,
:class:`_types.BOOLEAN`, :class:`_types.BOOLEAN`,
:class:`_types.CHAR`, :class:`_types.DATE`,
:class:`_types.DATETIME`, :class:`_types.FLOAT`,
:class:`_types.DECIMAL`, :class:`_types.FLOAT`,
:class:`_types.INTEGER`, :class:`_types.INTEGER`,
:class:`_types.NUMERIC`, :class:`_types.REAL`,
:class:`_types.SMALLINT`, :class:`_types.TEXT`,
:class:`_types.TIME`, :class:`_types.TIMESTAMP`,
:class:`_types.VARCHAR`, :class:`_types.NVARCHAR`,
:class:`_types.NCHAR`
When a type name does not match one of the above types, the "type affinity"
lookup is used instead:
* :class:`_types.INTEGER` is returned if the type name includes the
string ``INT``
* :class:`_types.TEXT` is returned if the type name includes the
string ``CHAR``, ``CLOB`` or ``TEXT``
* :class:`_types.NullType` is returned if the type name includes the
string ``BLOB``
* :class:`_types.REAL` is returned if the type name includes the string
``REAL``, ``FLOA`` or ``DOUB``.
* Otherwise, the :class:`_types.NUMERIC` type is used.
.. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting
columns.
.. _sqlite_partial_index:
Partial Indexes
---------------
A partial index, e.g. one which uses a WHERE clause, can be specified
with the DDL system using the argument ``sqlite_where``::
tbl = Table('testtbl', m, Column('data', Integer))
idx = Index('test_idx1', tbl.c.data,
sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10))
The index will be rendered at create time as::
CREATE INDEX test_idx1 ON testtbl (data)
WHERE data > 5 AND data < 10
.. versionadded:: 0.9.9
.. _sqlite_dotted_column_names:
Dotted Column Names
-------------------
Using table or column names that explicitly have periods in them is
**not recommended**. While this is generally a bad idea for relational
databases in general, as the dot is a syntactically significant character,
the SQLite driver up until version **3.10.0** of SQLite has a bug which
requires that SQLAlchemy filter out these dots in result sets.
.. versionchanged:: 1.1
The following SQLite issue has been resolved as of version 3.10.0
of SQLite. SQLAlchemy as of **1.1** automatically disables its internal
workarounds based on detection of this version.
The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
import sqlite3
assert sqlite3.sqlite_version_info < (3, 10, 0), "bug is fixed in this version"
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
cursor.execute("create table x (a integer, b integer)")
cursor.execute("insert into x (a, b) values (1, 1)")
cursor.execute("insert into x (a, b) values (2, 2)")
cursor.execute("select x.a, x.b from x")
assert [c[0] for c in cursor.description] == ['a', 'b']
cursor.execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert [c[0] for c in cursor.description] == ['a', 'b'], \
[c[0] for c in cursor.description]
The second assertion fails::
Traceback (most recent call last):
File "test.py", line 19, in <module>
[c[0] for c in cursor.description]
AssertionError: ['x.a', 'x.b']
Where above, the driver incorrectly reports the names of the columns
including the name of the table, which is entirely inconsistent vs.
when the UNION is not present.
SQLAlchemy relies upon column names being predictable in how they match
to the original statement, so the SQLAlchemy dialect has no choice but
to filter these out::
from sqlalchemy import create_engine
eng = create_engine("sqlite://")
conn = eng.connect()
conn.execute("create table x (a integer, b integer)")
conn.execute("insert into x (a, b) values (1, 1)")
conn.execute("insert into x (a, b) values (2, 2)")
result = conn.execute("select x.a, x.b from x")
assert result.keys() == ["a", "b"]
result = conn.execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["a", "b"]
Note that above, even though SQLAlchemy filters out the dots, *both
names are still addressable*::
>>> row = result.first()
>>> row["a"]
1
>>> row["x.a"]
1
>>> row["b"]
1
>>> row["x.b"]
1
Therefore, the workaround applied by SQLAlchemy only impacts
:meth:`_engine.ResultProxy.keys` and :meth:`.RowProxy.keys()`
in the public API. In
the very specific case where an application is forced to use column names that
contain dots, and the functionality of :meth:`_engine.ResultProxy.keys` and
:meth:`.RowProxy.keys()` is required to return these dotted names unmodified,
the ``sqlite_raw_colnames`` execution option may be provided, either on a
per-:class:`_engine.Connection` basis::
result = conn.execution_options(sqlite_raw_colnames=True).execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["x.a", "x.b"]
or on a per-:class:`_engine.Engine` basis::
engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True})
When using the per-:class:`_engine.Engine` execution option, note that
**Core and ORM queries that use UNION may not function properly**.
""" # noqa
import datetime
import numbers
import re
from .json import JSON
from .json import JSONIndexType
from .json import JSONPathType
from ... import exc
from ... import processors
from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
from ...engine import default
from ...engine import reflection
from ...sql import ColumnElement
from ...sql import compiler
from ...types import BLOB # noqa
from ...types import BOOLEAN # noqa
from ...types import CHAR # noqa
from ...types import DECIMAL # noqa
from ...types import FLOAT # noqa
from ...types import INTEGER # noqa
from ...types import NUMERIC # noqa
from ...types import REAL # noqa
from ...types import SMALLINT # noqa
from ...types import TEXT # noqa
from ...types import TIMESTAMP # noqa
from ...types import VARCHAR # noqa
class _SQliteJson(JSON):
def result_processor(self, dialect, coltype):
default_processor = super(_SQliteJson, self).result_processor(
dialect, coltype
)
def process(value):
try:
return default_processor(value)
except TypeError:
if isinstance(value, numbers.Number):
return value
else:
raise
return process
class _DateTimeMixin(object):
_reg = None
_storage_format = None
def __init__(self, storage_format=None, regexp=None, **kw):
super(_DateTimeMixin, self).__init__(**kw)
if regexp is not None:
self._reg = re.compile(regexp)
if storage_format is not None:
self._storage_format = storage_format
@property
def format_is_text_affinity(self):
"""return True if the storage format will automatically imply
a TEXT affinity.
If the storage format contains no non-numeric characters,
it will imply a NUMERIC storage format on SQLite; in this case,
the type will generate its DDL as DATE_CHAR, DATETIME_CHAR,
TIME_CHAR.
.. versionadded:: 1.0.0
"""
spec = self._storage_format % {
"year": 0,
"month": 0,
"day": 0,
"hour": 0,
"minute": 0,
"second": 0,
"microsecond": 0,
}
return bool(re.search(r"[^0-9]", spec))
def adapt(self, cls, **kw):
if issubclass(cls, _DateTimeMixin):
if self._storage_format:
kw["storage_format"] = self._storage_format
if self._reg:
kw["regexp"] = self._reg
return super(_DateTimeMixin, self).adapt(cls, **kw)
def literal_processor(self, dialect):
bp = self.bind_processor(dialect)
def process(value):
return "'%s'" % bp(value)
return process
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
r"""Represent a Python datetime object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
2011-03-15 12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(storage_format="%(year)04d/%(month)02d/%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d",
regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)"
)
:param storage_format: format string which will be applied to the dict
with keys year, month, day, hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python datetime() constructor as keyword arguments.
Otherwise, if positional groups are used, the datetime() constructor
is called with positional arguments via
``*map(int, match_obj.groups(0))``.
""" # noqa
_storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
)
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super(DATETIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d"
)
def bind_processor(self, dialect):
datetime_datetime = datetime.datetime
datetime_date = datetime.date
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_datetime):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
elif isinstance(value, datetime_date):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": 0,
"minute": 0,
"second": 0,
"microsecond": 0,
}
else:
raise TypeError(
"SQLite DateTime type only accepts Python "
"datetime and date objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.datetime
)
else:
return processors.str_to_datetime
class DATE(_DateTimeMixin, sqltypes.Date):
r"""Represent a Python date object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d"
e.g.::
2011-03-15
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATE
d = DATE(
storage_format="%(month)02d/%(day)02d/%(year)04d",
regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)")
)
:param storage_format: format string which will be applied to the
dict with keys year, month, and day.
:param regexp: regular expression which will be applied to
incoming result rows. If the regexp contains named groups, the
resulting match dict is applied to the Python date() constructor
as keyword arguments. Otherwise, if positional groups are used, the
date() constructor is called with positional arguments via
``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(year)04d-%(month)02d-%(day)02d"
def bind_processor(self, dialect):
datetime_date = datetime.date
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_date):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
}
else:
raise TypeError(
"SQLite Date type only accepts Python "
"date objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.date
)
else:
return processors.str_to_date
class TIME(_DateTimeMixin, sqltypes.Time):
r"""Represent a Python time object in SQLite using a string.
The default string storage format is::
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import TIME
t = TIME(storage_format="%(hour)02d-%(minute)02d-"
"%(second)02d-%(microsecond)06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
)
:param storage_format: format string which will be applied to the dict
with keys hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python time() constructor as keyword arguments. Otherwise,
if positional groups are used, the time() constructor is called with
positional arguments via ``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super(TIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d"
def bind_processor(self, dialect):
datetime_time = datetime.time
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_time):
return format_ % {
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
else:
raise TypeError(
"SQLite Time type only accepts Python "
"time objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.time
)
else:
return processors.str_to_time
colspecs = {
sqltypes.Date: DATE,
sqltypes.DateTime: DATETIME,
sqltypes.JSON: _SQliteJson,
sqltypes.JSON.JSONIndexType: JSONIndexType,
sqltypes.JSON.JSONPathType: JSONPathType,
sqltypes.Time: TIME,
}
ischema_names = {
"BIGINT": sqltypes.BIGINT,
"BLOB": sqltypes.BLOB,
"BOOL": sqltypes.BOOLEAN,
"BOOLEAN": sqltypes.BOOLEAN,
"CHAR": sqltypes.CHAR,
"DATE": sqltypes.DATE,
"DATE_CHAR": sqltypes.DATE,
"DATETIME": sqltypes.DATETIME,
"DATETIME_CHAR": sqltypes.DATETIME,
"DOUBLE": sqltypes.FLOAT,
"DECIMAL": sqltypes.DECIMAL,
"FLOAT": sqltypes.FLOAT,
"INT": sqltypes.INTEGER,
"INTEGER": sqltypes.INTEGER,
"JSON": JSON,
"NUMERIC": sqltypes.NUMERIC,
"REAL": sqltypes.REAL,
"SMALLINT": sqltypes.SMALLINT,
"TEXT": sqltypes.TEXT,
"TIME": sqltypes.TIME,
"TIME_CHAR": sqltypes.TIME,
"TIMESTAMP": sqltypes.TIMESTAMP,
"VARCHAR": sqltypes.VARCHAR,
"NVARCHAR": sqltypes.NVARCHAR,
"NCHAR": sqltypes.NCHAR,
}
class SQLiteCompiler(compiler.SQLCompiler):
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
"month": "%m",
"day": "%d",
"year": "%Y",
"second": "%S",
"hour": "%H",
"doy": "%j",
"minute": "%M",
"epoch": "%s",
"dow": "%w",
"week": "%W",
},
)
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_localtimestamp_func(self, func, **kw):
return 'DATETIME(CURRENT_TIMESTAMP, "localtime")'
def visit_true(self, expr, **kw):
return "1"
def visit_false(self, expr, **kw):
return "0"
def visit_char_length_func(self, fn, **kw):
return "length%s" % self.function_argspec(fn)
def visit_cast(self, cast, **kwargs):
if self.dialect.supports_cast:
return super(SQLiteCompiler, self).visit_cast(cast, **kwargs)
else:
return self.process(cast.clause, **kwargs)
def visit_extract(self, extract, **kw):
try:
return "CAST(STRFTIME('%s', %s) AS INTEGER)" % (
self.extract_map[extract.field],
self.process(extract.expr, **kw),
)
except KeyError as err:
util.raise_(
exc.CompileError(
"%s is not a valid extract argument." % extract.field
),
replace_context=err,
)
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT " + self.process(sql.literal(-1))
text += " OFFSET " + self.process(select._offset_clause, **kw)
else:
text += " OFFSET " + self.process(sql.literal(0), **kw)
return text
def for_update_clause(self, select, **kw):
# sqlite has no "FOR UPDATE" AFAICT
return ""
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "%s IS NOT %s" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_isnot_distinct_from_binary(self, binary, operator, **kw):
return "%s IS %s" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_json_getitem_op_binary(self, binary, operator, **kw):
if binary.type._type_affinity is sqltypes.JSON:
expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))"
else:
expr = "JSON_EXTRACT(%s, %s)"
return expr % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
if binary.type._type_affinity is sqltypes.JSON:
expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))"
else:
expr = "JSON_EXTRACT(%s, %s)"
return expr % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_empty_set_expr(self, element_types):
return "SELECT %s FROM (SELECT %s) WHERE 1!=1" % (
", ".join("1" for type_ in element_types or [INTEGER()]),
", ".join("1" for type_ in element_types or [INTEGER()]),
)
class SQLiteDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
coltype = self.dialect.type_compiler.process(
column.type, type_expression=column
)
colspec = self.preparer.format_column(column) + " " + coltype
default = self.get_column_default_string(column)
if default is not None:
if isinstance(column.server_default.arg, ColumnElement):
default = "(" + default + ")"
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
on_conflict_clause = column.dialect_options["sqlite"][
"on_conflict_not_null"
]
if on_conflict_clause is not None:
colspec += " ON CONFLICT " + on_conflict_clause
if column.primary_key:
if (
column.autoincrement is True
and len(column.table.primary_key.columns) != 1
):
raise exc.CompileError(
"SQLite does not support autoincrement for "
"composite primary keys"
)
if (
column.table.dialect_options["sqlite"]["autoincrement"]
and len(column.table.primary_key.columns) == 1
and issubclass(column.type._type_affinity, sqltypes.Integer)
and not column.foreign_keys
):
colspec += " PRIMARY KEY"
on_conflict_clause = column.dialect_options["sqlite"][
"on_conflict_primary_key"
]
if on_conflict_clause is not None:
colspec += " ON CONFLICT " + on_conflict_clause
colspec += " AUTOINCREMENT"
if column.computed is not None:
colspec += " " + self.process(column.computed)
return colspec
def visit_primary_key_constraint(self, constraint):
# for columns with sqlite_autoincrement=True,
# the PRIMARY KEY constraint can only be inline
# with the column itself.
if len(constraint.columns) == 1:
c = list(constraint)[0]
if (
c.primary_key
and c.table.dialect_options["sqlite"]["autoincrement"]
and issubclass(c.type._type_affinity, sqltypes.Integer)
and not c.foreign_keys
):
return None
text = super(SQLiteDDLCompiler, self).visit_primary_key_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is None and len(constraint.columns) == 1:
on_conflict_clause = list(constraint)[0].dialect_options["sqlite"][
"on_conflict_primary_key"
]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_unique_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_unique_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is None and len(constraint.columns) == 1:
on_conflict_clause = list(constraint)[0].dialect_options["sqlite"][
"on_conflict_unique"
]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_check_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_check_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_column_check_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_column_check_constraint(
constraint
)
if constraint.dialect_options["sqlite"]["on_conflict"] is not None:
raise exc.CompileError(
"SQLite does not support on conflict clause for "
"column check constraint"
)
return text
def visit_foreign_key_constraint(self, constraint):
local_table = constraint.elements[0].parent.table
remote_table = constraint.elements[0].column.table
if local_table.schema != remote_table.schema:
return None
else:
return super(SQLiteDDLCompiler, self).visit_foreign_key_constraint(
constraint
)
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table, use_schema=False)
def visit_create_index(
self, create, include_schema=False, include_table_schema=True
):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=True),
preparer.format_table(index.table, use_schema=False),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
whereclause = index.dialect_options["sqlite"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
whereclause, include_table=False, literal_binds=True
)
text += " WHERE " + where_compiled
return text
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
def visit_DATETIME(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_DATETIME(type_)
else:
return "DATETIME_CHAR"
def visit_DATE(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_DATE(type_)
else:
return "DATE_CHAR"
def visit_TIME(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_TIME(type_)
else:
return "TIME_CHAR"
def visit_JSON(self, type_, **kw):
# note this name provides NUMERIC affinity, not TEXT.
# should not be an issue unless the JSON value consists of a single
# numeric value. JSONTEXT can be used if this case is required.
return "JSON"
class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set(
[
"add",
"after",
"all",
"alter",
"analyze",
"and",
"as",
"asc",
"attach",
"autoincrement",
"before",
"begin",
"between",
"by",
"cascade",
"case",
"cast",
"check",
"collate",
"column",
"commit",
"conflict",
"constraint",
"create",
"cross",
"current_date",
"current_time",
"current_timestamp",
"database",
"default",
"deferrable",
"deferred",
"delete",
"desc",
"detach",
"distinct",
"drop",
"each",
"else",
"end",
"escape",
"except",
"exclusive",
"exists",
"explain",
"false",
"fail",
"for",
"foreign",
"from",
"full",
"glob",
"group",
"having",
"if",
"ignore",
"immediate",
"in",
"index",
"indexed",
"initially",
"inner",
"insert",
"instead",
"intersect",
"into",
"is",
"isnull",
"join",
"key",
"left",
"like",
"limit",
"match",
"natural",
"not",
"notnull",
"null",
"of",
"offset",
"on",
"or",
"order",
"outer",
"plan",
"pragma",
"primary",
"query",
"raise",
"references",
"reindex",
"rename",
"replace",
"restrict",
"right",
"rollback",
"row",
"select",
"set",
"table",
"temp",
"temporary",
"then",
"to",
"transaction",
"trigger",
"true",
"union",
"unique",
"update",
"using",
"vacuum",
"values",
"view",
"virtual",
"when",
"where",
]
)
class SQLiteExecutionContext(default.DefaultExecutionContext):
@util.memoized_property
def _preserve_raw_colnames(self):
return (
not self.dialect._broken_dotted_colnames
or self.execution_options.get("sqlite_raw_colnames", False)
)
def _translate_colname(self, colname):
# TODO: detect SQLite version 3.10.0 or greater;
# see [ticket:3633]
# adjust for dotted column names. SQLite
# in the case of UNION may store col names as
# "tablename.colname", or if using an attached database,
# "database.tablename.colname", in cursor.description
if not self._preserve_raw_colnames and "." in colname:
return colname.split(".")[-1], colname
else:
return colname, None
class SQLiteDialect(default.DefaultDialect):
name = "sqlite"
supports_alter = False
supports_unicode_statements = True
supports_unicode_binds = True
supports_default_values = True
supports_empty_insert = False
supports_cast = True
supports_multivalues_insert = True
tuple_in_values = True
default_paramstyle = "qmark"
execution_ctx_cls = SQLiteExecutionContext
statement_compiler = SQLiteCompiler
ddl_compiler = SQLiteDDLCompiler
type_compiler = SQLiteTypeCompiler
preparer = SQLiteIdentifierPreparer
ischema_names = ischema_names
colspecs = colspecs
isolation_level = None
construct_arguments = [
(sa_schema.Table, {"autoincrement": False}),
(sa_schema.Index, {"where": None}),
(
sa_schema.Column,
{
"on_conflict_primary_key": None,
"on_conflict_not_null": None,
"on_conflict_unique": None,
},
),
(sa_schema.Constraint, {"on_conflict": None}),
]
_broken_fk_pragma_quotes = False
_broken_dotted_colnames = False
@util.deprecated_params(
_json_serializer=(
"1.3.7",
"The _json_serializer argument to the SQLite dialect has "
"been renamed to the correct name of json_serializer. The old "
"argument name will be removed in a future release.",
),
_json_deserializer=(
"1.3.7",
"The _json_deserializer argument to the SQLite dialect has "
"been renamed to the correct name of json_deserializer. The old "
"argument name will be removed in a future release.",
),
)
def __init__(
self,
isolation_level=None,
native_datetime=False,
json_serializer=None,
json_deserializer=None,
_json_serializer=None,
_json_deserializer=None,
**kwargs
):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
if _json_serializer:
json_serializer = _json_serializer
if _json_deserializer:
json_deserializer = _json_deserializer
self._json_serializer = json_serializer
self._json_deserializer = json_deserializer
# this flag used by pysqlite dialect, and perhaps others in the
# future, to indicate the driver is handling date/timestamp
# conversions (and perhaps datetime/time as well on some hypothetical
# driver ?)
self.native_datetime = native_datetime
if self.dbapi is not None:
self.supports_right_nested_joins = (
self.dbapi.sqlite_version_info >= (3, 7, 16)
)
self._broken_dotted_colnames = self.dbapi.sqlite_version_info < (
3,
10,
0,
)
self.supports_default_values = self.dbapi.sqlite_version_info >= (
3,
3,
8,
)
self.supports_cast = self.dbapi.sqlite_version_info >= (3, 2, 3)
self.supports_multivalues_insert = (
# http://www.sqlite.org/releaselog/3_7_11.html
self.dbapi.sqlite_version_info
>= (3, 7, 11)
)
# see http://www.sqlalchemy.org/trac/ticket/2568
# as well as http://www.sqlite.org/src/info/600482d161
self._broken_fk_pragma_quotes = self.dbapi.sqlite_version_info < (
3,
6,
14,
)
_isolation_lookup = {"READ UNCOMMITTED": 1, "SERIALIZABLE": 0}
def set_isolation_level(self, connection, level):
try:
isolation_level = self._isolation_lookup[level.replace("_", " ")]
except KeyError as err:
util.raise_(
exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
),
replace_context=err,
)
cursor = connection.cursor()
cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level)
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute("PRAGMA read_uncommitted")
res = cursor.fetchone()
if res:
value = res[0]
else:
# http://www.sqlite.org/changes.html#version_3_3_3
# "Optional READ UNCOMMITTED isolation (instead of the
# default isolation level of SERIALIZABLE) and
# table level locking when database connections
# share a common cache.""
# pre-SQLite 3.3.0 default to 0
value = 0
cursor.close()
if value == 0:
return "SERIALIZABLE"
elif value == 1:
return "READ UNCOMMITTED"
else:
assert False, "Unknown isolation level %s" % value
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
@reflection.cache
def get_schema_names(self, connection, **kw):
s = "PRAGMA database_list"
dl = connection.execute(s)
return [db[1] for db in dl if db[1] != "temp"]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s " "WHERE type='table' ORDER BY name") % (
master,
)
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_table_names(self, connection, **kw):
s = (
"SELECT name FROM sqlite_temp_master "
"WHERE type='table' ORDER BY name "
)
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_view_names(self, connection, **kw):
s = (
"SELECT name FROM sqlite_temp_master "
"WHERE type='view' ORDER BY name "
)
rs = connection.execute(s)
return [row[0] for row in rs]
def has_table(self, connection, table_name, schema=None):
info = self._get_table_pragma(
connection, "table_info", table_name, schema=schema
)
return bool(info)
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s " "WHERE type='view' ORDER BY name") % (
master,
)
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
s = ("SELECT sql FROM %s WHERE name = '%s'" "AND type='view'") % (
master,
view_name,
)
rs = connection.execute(s)
else:
try:
s = (
"SELECT sql FROM "
" (SELECT * FROM sqlite_master UNION ALL "
" SELECT * FROM sqlite_temp_master) "
"WHERE name = '%s' "
"AND type='view'"
) % view_name
rs = connection.execute(s)
except exc.DBAPIError:
s = (
"SELECT sql FROM sqlite_master WHERE name = '%s' "
"AND type='view'"
) % view_name
rs = connection.execute(s)
result = rs.fetchall()
if result:
return result[0].sql
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
pragma = "table_info"
# computed columns are threaded as hidden, they require table_xinfo
if self.server_version_info >= (3, 31):
pragma = "table_xinfo"
info = self._get_table_pragma(
connection, pragma, table_name, schema=schema
)
columns = []
tablesql = None
for row in info:
name = row[1]
type_ = row[2].upper()
nullable = not row[3]
default = row[4]
primary_key = row[5]
hidden = row[6] if pragma == "table_xinfo" else 0
# hidden has value 0 for normal columns, 1 for hidden columns,
# 2 for computed virtual columns and 3 for computed stored columns
# https://www.sqlite.org/src/info/069351b85f9a706f60d3e98fbc8aaf40c374356b967c0464aede30ead3d9d18b
if hidden == 1:
continue
generated = bool(hidden)
persisted = hidden == 3
if tablesql is None and generated:
tablesql = self._get_table_sql(
connection, table_name, schema, **kw
)
columns.append(
self._get_column_info(
name,
type_,
nullable,
default,
primary_key,
generated,
persisted,
tablesql,
)
)
return columns
def _get_column_info(
self,
name,
type_,
nullable,
default,
primary_key,
generated,
persisted,
tablesql,
):
if generated:
# the type of a column "cc INTEGER GENERATED ALWAYS AS (1 + 42)"
# somehow is "INTEGER GENERATED ALWAYS"
type_ = re.sub("generated", "", type_, flags=re.IGNORECASE)
type_ = re.sub("always", "", type_, flags=re.IGNORECASE).strip()
coltype = self._resolve_type_affinity(type_)
if default is not None:
default = util.text_type(default)
colspec = {
"name": name,
"type": coltype,
"nullable": nullable,
"default": default,
"autoincrement": "auto",
"primary_key": primary_key,
}
if generated:
sqltext = ""
if tablesql:
pattern = r"[^,]*\s+AS\s+\(([^,]*)\)\s*(?:virtual|stored)?"
match = re.search(
re.escape(name) + pattern, tablesql, re.IGNORECASE
)
if match:
sqltext = match.group(1)
colspec["computed"] = {"sqltext": sqltext, "persisted": persisted}
return colspec
def _resolve_type_affinity(self, type_):
"""Return a data type from a reflected column, using affinity tules.
SQLite's goal for universal compatibility introduces some complexity
during reflection, as a column's defined type might not actually be a
type that SQLite understands - or indeed, my not be defined *at all*.
Internally, SQLite handles this with a 'data type affinity' for each
column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER',
'REAL', or 'NONE' (raw bits). The algorithm that determines this is
listed in http://www.sqlite.org/datatype3.html section 2.1.
This method allows SQLAlchemy to support that algorithm, while still
providing access to smarter reflection utilities by regcognizing
column definitions that SQLite only supports through affinity (like
DATE and DOUBLE).
"""
match = re.match(r"([\w ]+)(\(.*?\))?", type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = ""
args = ""
if coltype in self.ischema_names:
coltype = self.ischema_names[coltype]
elif "INT" in coltype:
coltype = sqltypes.INTEGER
elif "CHAR" in coltype or "CLOB" in coltype or "TEXT" in coltype:
coltype = sqltypes.TEXT
elif "BLOB" in coltype or not coltype:
coltype = sqltypes.NullType
elif "REAL" in coltype or "FLOA" in coltype or "DOUB" in coltype:
coltype = sqltypes.REAL
else:
coltype = sqltypes.NUMERIC
if args is not None:
args = re.findall(r"(\d+)", args)
try:
coltype = coltype(*[int(a) for a in args])
except TypeError:
util.warn(
"Could not instantiate type %s with "
"reflected arguments %s; using no arguments."
% (coltype, args)
)
coltype = coltype()
else:
coltype = coltype()
return coltype
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
constraint_name = None
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data:
PK_PATTERN = r"CONSTRAINT (\w+) PRIMARY KEY"
result = re.search(PK_PATTERN, table_data, re.I)
constraint_name = result.group(1) if result else None
cols = self.get_columns(connection, table_name, schema, **kw)
pkeys = []
for col in cols:
if col["primary_key"]:
pkeys.append(col["name"])
return {"constrained_columns": pkeys, "name": constraint_name}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# sqlite makes this *extremely difficult*.
# First, use the pragma to get the actual FKs.
pragma_fks = self._get_table_pragma(
connection, "foreign_key_list", table_name, schema=schema
)
fks = {}
for row in pragma_fks:
(numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4])
if not rcol:
# no referred column, which means it was not named in the
# original DDL. The referred columns of the foreign key
# constraint are therefore the primary key of the referred
# table.
referred_pk = self.get_pk_constraint(
connection, rtbl, schema=schema, **kw
)
# note that if table doesnt exist, we still get back a record,
# just it has no columns in it
referred_columns = referred_pk["constrained_columns"]
else:
# note we use this list only if this is the first column
# in the constraint. for subsequent columns we ignore the
# list and append "rcol" if present.
referred_columns = []
if self._broken_fk_pragma_quotes:
rtbl = re.sub(r"^[\"\[`\']|[\"\]`\']$", "", rtbl)
if numerical_id in fks:
fk = fks[numerical_id]
else:
fk = fks[numerical_id] = {
"name": None,
"constrained_columns": [],
"referred_schema": schema,
"referred_table": rtbl,
"referred_columns": referred_columns,
"options": {},
}
fks[numerical_id] = fk
fk["constrained_columns"].append(lcol)
if rcol:
fk["referred_columns"].append(rcol)
def fk_sig(constrained_columns, referred_table, referred_columns):
return (
tuple(constrained_columns)
+ (referred_table,)
+ tuple(referred_columns)
)
# then, parse the actual SQL and attempt to find DDL that matches
# the names as well. SQLite saves the DDL in whatever format
# it was typed in as, so need to be liberal here.
keys_by_signature = dict(
(
fk_sig(
fk["constrained_columns"],
fk["referred_table"],
fk["referred_columns"],
),
fk,
)
for fk in fks.values()
)
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data is None:
# system tables, etc.
return []
def parse_fks():
FK_PATTERN = (
r"(?:CONSTRAINT (\w+) +)?"
r"FOREIGN KEY *\( *(.+?) *\) +"
r'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\) *'
r"((?:ON (?:DELETE|UPDATE) "
r"(?:SET NULL|SET DEFAULT|CASCADE|RESTRICT|NO ACTION) *)*)"
)
for match in re.finditer(FK_PATTERN, table_data, re.I):
(
constraint_name,
constrained_columns,
referred_quoted_name,
referred_name,
referred_columns,
onupdatedelete,
) = match.group(1, 2, 3, 4, 5, 6)
constrained_columns = list(
self._find_cols_in_sig(constrained_columns)
)
if not referred_columns:
referred_columns = constrained_columns
else:
referred_columns = list(
self._find_cols_in_sig(referred_columns)
)
referred_name = referred_quoted_name or referred_name
options = {}
for token in re.split(r" *\bON\b *", onupdatedelete.upper()):
if token.startswith("DELETE"):
options["ondelete"] = token[6:].strip()
elif token.startswith("UPDATE"):
options["onupdate"] = token[6:].strip()
yield (
constraint_name,
constrained_columns,
referred_name,
referred_columns,
options,
)
fkeys = []
for (
constraint_name,
constrained_columns,
referred_name,
referred_columns,
options,
) in parse_fks():
sig = fk_sig(constrained_columns, referred_name, referred_columns)
if sig not in keys_by_signature:
util.warn(
"WARNING: SQL-parsed foreign key constraint "
"'%s' could not be located in PRAGMA "
"foreign_keys for table %s" % (sig, table_name)
)
continue
key = keys_by_signature.pop(sig)
key["name"] = constraint_name
key["options"] = options
fkeys.append(key)
# assume the remainders are the unnamed, inline constraints, just
# use them as is as it's extremely difficult to parse inline
# constraints
fkeys.extend(keys_by_signature.values())
return fkeys
def _find_cols_in_sig(self, sig):
for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I):
yield match.group(1) or match.group(2)
@reflection.cache
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
auto_index_by_sig = {}
for idx in self.get_indexes(
connection,
table_name,
schema=schema,
include_auto_indexes=True,
**kw
):
if not idx["name"].startswith("sqlite_autoindex"):
continue
sig = tuple(idx["column_names"])
auto_index_by_sig[sig] = idx
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw
)
if not table_data:
return []
unique_constraints = []
def parse_uqs():
UNIQUE_PATTERN = r'(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)'
INLINE_UNIQUE_PATTERN = (
r'(?:(".+?")|([a-z0-9]+)) ' r"+[a-z0-9_ ]+? +UNIQUE"
)
for match in re.finditer(UNIQUE_PATTERN, table_data, re.I):
name, cols = match.group(1, 2)
yield name, list(self._find_cols_in_sig(cols))
# we need to match inlines as well, as we seek to differentiate
# a UNIQUE constraint from a UNIQUE INDEX, even though these
# are kind of the same thing :)
for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I):
cols = list(
self._find_cols_in_sig(match.group(1) or match.group(2))
)
yield None, cols
for name, cols in parse_uqs():
sig = tuple(cols)
if sig in auto_index_by_sig:
auto_index_by_sig.pop(sig)
parsed_constraint = {"name": name, "column_names": cols}
unique_constraints.append(parsed_constraint)
# NOTE: auto_index_by_sig might not be empty here,
# the PRIMARY KEY may have an entry.
return unique_constraints
@reflection.cache
def get_check_constraints(self, connection, table_name, schema=None, **kw):
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw
)
if not table_data:
return []
CHECK_PATTERN = r"(?:CONSTRAINT (\w+) +)?" r"CHECK *\( *(.+) *\),? *"
check_constraints = []
# NOTE: we aren't using re.S here because we actually are
# taking advantage of each CHECK constraint being all on one
# line in the table definition in order to delineate. This
# necessarily makes assumptions as to how the CREATE TABLE
# was emitted.
for match in re.finditer(CHECK_PATTERN, table_data, re.I):
check_constraints.append(
{"sqltext": match.group(2), "name": match.group(1)}
)
return check_constraints
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
pragma_indexes = self._get_table_pragma(
connection, "index_list", table_name, schema=schema
)
indexes = []
include_auto_indexes = kw.pop("include_auto_indexes", False)
for row in pragma_indexes:
# ignore implicit primary key index.
# http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html
if not include_auto_indexes and row[1].startswith(
"sqlite_autoindex"
):
continue
indexes.append(dict(name=row[1], column_names=[], unique=row[2]))
# loop thru unique indexes to get the column names.
for idx in list(indexes):
pragma_index = self._get_table_pragma(
connection, "index_info", idx["name"]
)
for row in pragma_index:
if row[2] is None:
util.warn(
"Skipped unsupported reflection of "
"expression-based index %s" % idx["name"]
)
indexes.remove(idx)
break
else:
idx["column_names"].append(row[2])
return indexes
@reflection.cache
def _get_table_sql(self, connection, table_name, schema=None, **kw):
if schema:
schema_expr = "%s." % (
self.identifier_preparer.quote_identifier(schema)
)
else:
schema_expr = ""
try:
s = (
"SELECT sql FROM "
" (SELECT * FROM %(schema)ssqlite_master UNION ALL "
" SELECT * FROM %(schema)ssqlite_temp_master) "
"WHERE name = '%(table)s' "
"AND type = 'table'"
% {"schema": schema_expr, "table": table_name}
)
rs = connection.execute(s)
except exc.DBAPIError:
s = (
"SELECT sql FROM %(schema)ssqlite_master "
"WHERE name = '%(table)s' "
"AND type = 'table'"
% {"schema": schema_expr, "table": table_name}
)
rs = connection.execute(s)
return rs.scalar()
def _get_table_pragma(self, connection, pragma, table_name, schema=None):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
statements = ["PRAGMA %s." % quote(schema)]
else:
# because PRAGMA looks in all attached databases if no schema
# given, need to specify "main" schema, however since we want
# 'temp' tables in the same namespace as 'main', need to run
# the PRAGMA twice
statements = ["PRAGMA main.", "PRAGMA temp."]
qtable = quote(table_name)
for statement in statements:
statement = "%s%s(%s)" % (statement, pragma, qtable)
cursor = connection.execute(statement)
if not cursor._soft_closed:
# work around SQLite issue whereby cursor.description
# is blank when PRAGMA returns no rows:
# http://www.sqlite.org/cvstrac/tktview?tn=1884
result = cursor.fetchall()
else:
result = []
if result:
return result
else:
return []
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/sqlite/pysqlite.py
|
# sqlite/pysqlite.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: sqlite+pysqlite
:name: pysqlite
:dbapi: sqlite3
:connectstring: sqlite+pysqlite:///file_path
:url: http://docs.python.org/library/sqlite3.html
Note that ``pysqlite`` is the same driver as the ``sqlite3``
module included with the Python distribution.
Driver
------
The ``sqlite3`` Python DBAPI is standard on all modern Python versions;
for cPython and Pypy, no additional installation is necessary.
Connect Strings
---------------
The file specification for the SQLite database is taken as the "database"
portion of the URL. Note that the format of a SQLAlchemy url is::
driver://user:pass@host/database
This means that the actual filename to be used starts with the characters to
the **right** of the third slash. So connecting to a relative filepath
looks like::
# relative path
e = create_engine('sqlite:///path/to/database.db')
An absolute path, which is denoted by starting with a slash, means you
need **four** slashes::
# absolute path
e = create_engine('sqlite:////path/to/database.db')
To use a Windows path, regular drive specifications and backslashes can be
used. Double backslashes are probably needed::
# absolute path on Windows
e = create_engine('sqlite:///C:\\path\\to\\database.db')
The sqlite ``:memory:`` identifier is the default if no filepath is
present. Specify ``sqlite://`` and nothing else::
# in-memory database
e = create_engine('sqlite://')
.. _pysqlite_uri_connections:
URI Connections
^^^^^^^^^^^^^^^
Modern versions of SQLite support an alternative system of connecting using a
`driver level URI <https://www.sqlite.org/uri.html>`_, which has the advantage
that additional driver-level arguments can be passed including options such as
"read only". The Python sqlite3 driver supports this mode under modern Python
3 versions. The SQLAlchemy pysqlite driver supports this mode of use by
specifing "uri=true" in the URL query string. The SQLite-level "URI" is kept
as the "database" portion of the SQLAlchemy url (that is, following a slash)::
e = create_engine("sqlite:///file:path/to/database?mode=ro&uri=true")
.. note:: The "uri=true" parameter must appear in the **query string**
of the URL. It will not currently work as expected if it is only
present in the :paramref:`_sa.create_engine.connect_args`
parameter dictionary.
The logic reconciles the simultaneous presence of SQLAlchemy's query string and
SQLite's query string by separating out the parameters that belong to the
Python sqlite3 driver vs. those that belong to the SQLite URI. This is
achieved through the use of a fixed list of parameters known to be accepted by
the Python side of the driver. For example, to include a URL that indicates
the Python sqlite3 "timeout" and "check_same_thread" parameters, along with the
SQLite "mode" and "nolock" parameters, they can all be passed together on the
query string::
e = create_engine(
"sqlite:///file:path/to/database?"
"check_same_thread=true&timeout=10&mode=ro&nolock=1&uri=true"
)
Above, the pysqlite / sqlite3 DBAPI would be passed arguments as::
sqlite3.connect(
"file:path/to/database?mode=ro&nolock=1",
check_same_thread=True, timeout=10, uri=True
)
Regarding future parameters added to either the Python or native drivers. new
parameter names added to the SQLite URI scheme should be automatically
accommodated by this scheme. New parameter names added to the Python driver
side can be accommodated by specifying them in the
:paramref:`_sa.create_engine.connect_args` dictionary,
until dialect support is
added by SQLAlchemy. For the less likely case that the native SQLite driver
adds a new parameter name that overlaps with one of the existing, known Python
driver parameters (such as "timeout" perhaps), SQLAlchemy's dialect would
require adjustment for the URL scheme to continue to support this.
As is always the case for all SQLAlchemy dialects, the entire "URL" process
can be bypassed in :func:`_sa.create_engine` through the use of the
:paramref:`_sa.create_engine.creator`
parameter which allows for a custom callable
that creates a Python sqlite3 driver level connection directly.
.. versionadded:: 1.3.9
.. seealso::
`Uniform Resource Identifiers <https://www.sqlite.org/uri.html>`_ - in
the SQLite documentation
Compatibility with sqlite3 "native" date and datetime types
-----------------------------------------------------------
The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
sqlite3.PARSE_COLNAMES options, which have the effect of any column
or expression explicitly cast as "date" or "timestamp" will be converted
to a Python date or datetime object. The date and datetime types provided
with the pysqlite dialect are not currently compatible with these options,
since they render the ISO date/datetime including microseconds, which
pysqlite's driver does not. Additionally, SQLAlchemy does not at
this time automatically render the "cast" syntax required for the
freestanding functions "current_timestamp" and "current_date" to return
datetime/date types natively. Unfortunately, pysqlite
does not provide the standard DBAPI types in ``cursor.description``,
leaving SQLAlchemy with no way to detect these types on the fly
without expensive per-row type checks.
Keeping in mind that pysqlite's parsing option is not recommended,
nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine('sqlite://',
connect_args={'detect_types':
sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
native_datetime=True
)
With this flag enabled, the DATE and TIMESTAMP types (but note - not the
DATETIME or TIME types...confused yet ?) will not perform any bind parameter
or result processing. Execution of "func.current_date()" will return a string.
"func.current_timestamp()" is registered as returning a DATETIME type in
SQLAlchemy, so this function still receives SQLAlchemy-level result
processing.
.. _pysqlite_threading_pooling:
Threading/Pooling Behavior
---------------------------
Pysqlite's default behavior is to prohibit the usage of a single connection
in more than one thread. This is originally intended to work with older
versions of SQLite that did not support multithreaded operation under
various circumstances. In particular, older SQLite versions
did not allow a ``:memory:`` database to be used in multiple threads
under any circumstances.
Pysqlite does include a now-undocumented flag known as
``check_same_thread`` which will disable this check, however note that
pysqlite connections are still not safe to use in concurrently in multiple
threads. In particular, any statement execution calls would need to be
externally mutexed, as Pysqlite does not provide for thread-safe propagation
of error messages among other things. So while even ``:memory:`` databases
can be shared among threads in modern SQLite, Pysqlite doesn't provide enough
thread-safety to make this usage worth it.
SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
* When a ``:memory:`` SQLite database is specified, the dialect by default
will use :class:`.SingletonThreadPool`. This pool maintains a single
connection per thread, so that all access to the engine within the current
thread use the same ``:memory:`` database - other threads would access a
different ``:memory:`` database.
* When a file-based database is specified, the dialect will use
:class:`.NullPool` as the source of connections. This pool closes and
discards connections which are returned to the pool immediately. SQLite
file-based connections have extremely low overhead, so pooling is not
necessary. The scheme also prevents a connection from being used again in
a different thread and works best with SQLite's coarse-grained file locking.
Using a Memory Database in Multiple Threads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To use a ``:memory:`` database in a multithreaded scenario, the same
connection object must be shared among threads, since the database exists
only within the scope of that connection. The
:class:`.StaticPool` implementation will maintain a single connection
globally, and the ``check_same_thread`` flag can be passed to Pysqlite
as ``False``::
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
connect_args={'check_same_thread':False},
poolclass=StaticPool)
Note that using a ``:memory:`` database in multiple threads requires a recent
version of SQLite.
Using Temporary Tables with SQLite
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Due to the way SQLite deals with temporary tables, if you wish to use a
temporary table in a file-based SQLite database across multiple checkouts
from the connection pool, such as when using an ORM :class:`.Session` where
the temporary table should continue to remain after :meth:`.Session.commit` or
:meth:`.Session.rollback` is called, a pool which maintains a single
connection must be used. Use :class:`.SingletonThreadPool` if the scope is
only needed within the current thread, or :class:`.StaticPool` is scope is
needed within multiple threads for this case::
# maintain the same connection per thread
from sqlalchemy.pool import SingletonThreadPool
engine = create_engine('sqlite:///mydb.db',
poolclass=SingletonThreadPool)
# maintain the same connection across all threads
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite:///mydb.db',
poolclass=StaticPool)
Note that :class:`.SingletonThreadPool` should be configured for the number
of threads that are to be used; beyond that number, connections will be
closed out in a non deterministic way.
Unicode
-------
The pysqlite driver only returns Python ``unicode`` objects in result sets,
never plain strings, and accommodates ``unicode`` objects within bound
parameter values in all cases. Regardless of the SQLAlchemy string type in
use, string-based result values will by Python ``unicode`` in Python 2.
The :class:`.Unicode` type should still be used to indicate those columns that
require unicode, however, so that non-``unicode`` values passed inadvertently
will emit a warning. Pysqlite will emit an error if a non-``unicode`` string
is passed containing non-ASCII characters.
Dealing with Mixed String / Binary Columns in Python 3
------------------------------------------------------
The SQLite database is weakly typed, and as such it is possible when using
binary values, which in Python 3 are represented as ``b'some string'``, that a
particular SQLite database can have data values within different rows where
some of them will be returned as a ``b''`` value by the Pysqlite driver, and
others will be returned as Python strings, e.g. ``''`` values. This situation
is not known to occur if the SQLAlchemy :class:`.LargeBinary` datatype is used
consistently, however if a particular SQLite database has data that was
inserted using the Pysqlite driver directly, or when using the SQLAlchemy
:class:`.String` type which was later changed to :class:`.LargeBinary`, the
table will not be consistently readable because SQLAlchemy's
:class:`.LargeBinary` datatype does not handle strings so it has no way of
"encoding" a value that is in string format.
To deal with a SQLite table that has mixed string / binary data in the
same column, use a custom type that will check each row individually::
# note this is Python 3 only
from sqlalchemy import String
from sqlalchemy import TypeDecorator
class MixedBinary(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
if isinstance(value, str):
value = bytes(value, 'utf-8')
elif value is not None:
value = bytes(value)
return value
Then use the above ``MixedBinary`` datatype in the place where
:class:`.LargeBinary` would normally be used.
.. _pysqlite_serializable:
Serializable isolation / Savepoints / Transactional DDL
-------------------------------------------------------
In the section :ref:`sqlite_concurrency`, we refer to the pysqlite
driver's assortment of issues that prevent several features of SQLite
from working correctly. The pysqlite DBAPI driver has several
long-standing bugs which impact the correctness of its transactional
behavior. In its default mode of operation, SQLite features such as
SERIALIZABLE isolation, transactional DDL, and SAVEPOINT support are
non-functional, and in order to use these features, workarounds must
be taken.
The issue is essentially that the driver attempts to second-guess the user's
intent, failing to start transactions and sometimes ending them prematurely, in
an effort to minimize the SQLite databases's file locking behavior, even
though SQLite itself uses "shared" locks for read-only activities.
SQLAlchemy chooses to not alter this behavior by default, as it is the
long-expected behavior of the pysqlite driver; if and when the pysqlite
driver attempts to repair these issues, that will be more of a driver towards
defaults for SQLAlchemy.
The good news is that with a few events, we can implement transactional
support fully, by disabling pysqlite's feature entirely and emitting BEGIN
ourselves. This is achieved using two event listeners::
from sqlalchemy import create_engine, event
engine = create_engine("sqlite:///myfile.db")
@event.listens_for(engine, "connect")
def do_connect(dbapi_connection, connection_record):
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
dbapi_connection.isolation_level = None
@event.listens_for(engine, "begin")
def do_begin(conn):
# emit our own BEGIN
conn.execute("BEGIN")
.. warning:: When using the above recipe, it is advised to not use the
:paramref:`.Connection.execution_options.isolation_level` setting on
:class:`_engine.Connection` and :func:`_sa.create_engine`
with the SQLite driver,
as this function necessarily will also alter the ".isolation_level" setting.
Above, we intercept a new pysqlite connection and disable any transactional
integration. Then, at the point at which SQLAlchemy knows that transaction
scope is to begin, we emit ``"BEGIN"`` ourselves.
When we take control of ``"BEGIN"``, we can also control directly SQLite's
locking modes, introduced at
`BEGIN TRANSACTION <http://sqlite.org/lang_transaction.html>`_,
by adding the desired locking mode to our ``"BEGIN"``::
@event.listens_for(engine, "begin")
def do_begin(conn):
conn.execute("BEGIN EXCLUSIVE")
.. seealso::
`BEGIN TRANSACTION <http://sqlite.org/lang_transaction.html>`_ -
on the SQLite site
`sqlite3 SELECT does not BEGIN a transaction <http://bugs.python.org/issue9924>`_ -
on the Python bug tracker
`sqlite3 module breaks transactions and potentially corrupts data <http://bugs.python.org/issue10740>`_ -
on the Python bug tracker
""" # noqa
import os
from .base import DATE
from .base import DATETIME
from .base import SQLiteDialect
from ... import exc
from ... import pool
from ... import types as sqltypes
from ... import util
class _SQLite_pysqliteTimeStamp(DATETIME):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATETIME.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATETIME.result_processor(self, dialect, coltype)
class _SQLite_pysqliteDate(DATE):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATE.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATE.result_processor(self, dialect, coltype)
class SQLiteDialect_pysqlite(SQLiteDialect):
default_paramstyle = "qmark"
colspecs = util.update_copy(
SQLiteDialect.colspecs,
{
sqltypes.Date: _SQLite_pysqliteDate,
sqltypes.TIMESTAMP: _SQLite_pysqliteTimeStamp,
},
)
if not util.py2k:
description_encoding = None
driver = "pysqlite"
@classmethod
def dbapi(cls):
if util.py2k:
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError:
try:
from sqlite3 import dbapi2 as sqlite
except ImportError as e:
raise e
else:
from sqlite3 import dbapi2 as sqlite
return sqlite
@classmethod
def _is_url_file_db(cls, url):
if url.database and url.database != ":memory:":
return True
else:
return False
@classmethod
def get_pool_class(cls, url):
if cls._is_url_file_db(url):
return pool.NullPool
else:
return pool.SingletonThreadPool
def _get_server_version_info(self, connection):
return self.dbapi.sqlite_version_info
def set_isolation_level(self, connection, level):
if hasattr(connection, "connection"):
dbapi_connection = connection.connection
else:
dbapi_connection = connection
if level == "AUTOCOMMIT":
dbapi_connection.isolation_level = None
else:
dbapi_connection.isolation_level = ""
return super(SQLiteDialect_pysqlite, self).set_isolation_level(
connection, level
)
def create_connect_args(self, url):
if url.username or url.password or url.host or url.port:
raise exc.ArgumentError(
"Invalid SQLite URL: %s\n"
"Valid SQLite URL forms are:\n"
" sqlite:///:memory: (or, sqlite://)\n"
" sqlite:///relative/path/to/file.db\n"
" sqlite:////absolute/path/to/file.db" % (url,)
)
# theoretically, this list can be augmented, at least as far as
# parameter names accepted by sqlite3/pysqlite, using
# inspect.getfullargspec(). for the moment this seems like overkill
# as these parameters don't change very often, and as always,
# parameters passed to connect_args will always go to the
# sqlite3/pysqlite driver.
pysqlite_args = [
("uri", bool),
("timeout", float),
("isolation_level", str),
("detect_types", int),
("check_same_thread", bool),
("cached_statements", int),
]
opts = url.query
pysqlite_opts = {}
for key, type_ in pysqlite_args:
util.coerce_kw_type(opts, key, type_, dest=pysqlite_opts)
if pysqlite_opts.get("uri", False):
uri_opts = opts.copy()
# here, we are actually separating the parameters that go to
# sqlite3/pysqlite vs. those that go the SQLite URI. What if
# two names conflict? again, this seems to be not the case right
# now, and in the case that new names are added to
# either side which overlap, again the sqlite3/pysqlite parameters
# can be passed through connect_args instead of in the URL.
# If SQLite native URIs add a parameter like "timeout" that
# we already have listed here for the python driver, then we need
# to adjust for that here.
for key, type_ in pysqlite_args:
uri_opts.pop(key, None)
filename = url.database
if uri_opts:
# sorting of keys is for unit test support
filename += "?" + (
"&".join(
"%s=%s" % (key, uri_opts[key])
for key in sorted(uri_opts)
)
)
else:
filename = url.database or ":memory:"
if filename != ":memory:":
filename = os.path.abspath(filename)
return ([filename], pysqlite_opts)
def is_disconnect(self, e, connection, cursor):
return isinstance(
e, self.dbapi.ProgrammingError
) and "Cannot operate on a closed database." in str(e)
dialect = SQLiteDialect_pysqlite
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/enumerated.py
|
# mysql/enumerated.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
from .types import _StringType
from ... import exc
from ... import sql
from ... import util
from ...sql import sqltypes
class _EnumeratedValues(_StringType):
def _init_values(self, values, kw):
self.quoting = kw.pop("quoting", "auto")
if self.quoting == "auto" and len(values):
# What quoting character are we using?
q = None
for e in values:
if len(e) == 0:
self.quoting = "unquoted"
break
elif q is None:
q = e[0]
if len(e) == 1 or e[0] != q or e[-1] != q:
self.quoting = "unquoted"
break
else:
self.quoting = "quoted"
if self.quoting == "quoted":
util.warn_deprecated(
"Manually quoting %s value literals is deprecated. Supply "
"unquoted values and use the quoting= option in cases of "
"ambiguity." % self.__class__.__name__
)
values = self._strip_values(values)
self._enumerated_values = values
length = max([len(v) for v in values] + [0])
return values, length
@classmethod
def _strip_values(cls, values):
strip_values = []
for a in values:
if a[0:1] == '"' or a[0:1] == "'":
# strip enclosing quotes and unquote interior
a = a[1:-1].replace(a[0] * 2, a[0])
strip_values.append(a)
return strip_values
class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum, _EnumeratedValues):
"""MySQL ENUM type."""
__visit_name__ = "ENUM"
native_enum = True
def __init__(self, *enums, **kw):
"""Construct an ENUM.
E.g.::
Column('myenum', ENUM("foo", "bar", "baz"))
:param enums: The range of valid values for this ENUM. Values will be
quoted when generating the schema according to the quoting flag (see
below). This object may also be a PEP-435-compliant enumerated
type.
.. versionadded: 1.1 added support for PEP-435-compliant enumerated
types.
:param strict: This flag has no effect.
.. versionchanged:: The MySQL ENUM type as well as the base Enum
type now validates all Python data values.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
:param quoting: Defaults to 'auto': automatically determine enum value
quoting. If all enum values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema - this usage is deprecated.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
"""
kw.pop("strict", None)
self._enum_init(enums, kw)
_StringType.__init__(self, length=self.length, **kw)
@classmethod
def adapt_emulated_to_native(cls, impl, **kw):
"""Produce a MySQL native :class:`.mysql.ENUM` from plain
:class:`.Enum`.
"""
kw.setdefault("validate_strings", impl.validate_strings)
kw.setdefault("values_callable", impl.values_callable)
return cls(**kw)
def _setup_for_values(self, values, objects, kw):
values, length = self._init_values(values, kw)
return super(ENUM, self)._setup_for_values(values, objects, kw)
def _object_value_for_elem(self, elem):
# mysql sends back a blank string for any value that
# was persisted that was not in the enums; that is, it does no
# validation on the incoming data, it "truncates" it to be
# the blank string. Return it straight.
if elem == "":
return elem
else:
return super(ENUM, self)._object_value_for_elem(elem)
def __repr__(self):
return util.generic_repr(
self, to_inspect=[ENUM, _StringType, sqltypes.Enum]
)
class SET(_EnumeratedValues):
"""MySQL SET type."""
__visit_name__ = "SET"
def __init__(self, *values, **kw):
"""Construct a SET.
E.g.::
Column('myset', SET("foo", "bar", "baz"))
The list of potential values is required in the case that this
set will be used to generate DDL for a table, or if the
:paramref:`.SET.retrieve_as_bitwise` flag is set to True.
:param values: The range of valid values for this SET.
:param convert_unicode: Same flag as that of
:paramref:`.String.convert_unicode`.
:param collation: same as that of :paramref:`.String.collation`
:param charset: same as that of :paramref:`.VARCHAR.charset`.
:param ascii: same as that of :paramref:`.VARCHAR.ascii`.
:param unicode: same as that of :paramref:`.VARCHAR.unicode`.
:param binary: same as that of :paramref:`.VARCHAR.binary`.
:param quoting: Defaults to 'auto': automatically determine set value
quoting. If all values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema - this usage is deprecated.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
.. versionadded:: 0.9.0
:param retrieve_as_bitwise: if True, the data for the set type will be
persisted and selected using an integer value, where a set is coerced
into a bitwise mask for persistence. MySQL allows this mode which
has the advantage of being able to store values unambiguously,
such as the blank string ``''``. The datatype will appear
as the expression ``col + 0`` in a SELECT statement, so that the
value is coerced into an integer value in result sets.
This flag is required if one wishes
to persist a set that can store the blank string ``''`` as a value.
.. warning::
When using :paramref:`.mysql.SET.retrieve_as_bitwise`, it is
essential that the list of set values is expressed in the
**exact same order** as exists on the MySQL database.
.. versionadded:: 1.0.0
"""
self.retrieve_as_bitwise = kw.pop("retrieve_as_bitwise", False)
values, length = self._init_values(values, kw)
self.values = tuple(values)
if not self.retrieve_as_bitwise and "" in values:
raise exc.ArgumentError(
"Can't use the blank value '' in a SET without "
"setting retrieve_as_bitwise=True"
)
if self.retrieve_as_bitwise:
self._bitmap = dict(
(value, 2 ** idx) for idx, value in enumerate(self.values)
)
self._bitmap.update(
(2 ** idx, value) for idx, value in enumerate(self.values)
)
kw.setdefault("length", length)
super(SET, self).__init__(**kw)
def column_expression(self, colexpr):
if self.retrieve_as_bitwise:
return sql.type_coerce(
sql.type_coerce(colexpr, sqltypes.Integer) + 0, self
)
else:
return colexpr
def result_processor(self, dialect, coltype):
if self.retrieve_as_bitwise:
def process(value):
if value is not None:
value = int(value)
return set(util.map_bits(self._bitmap.__getitem__, value))
else:
return None
else:
super_convert = super(SET, self).result_processor(dialect, coltype)
def process(value):
if isinstance(value, util.string_types):
# MySQLdb returns a string, let's parse
if super_convert:
value = super_convert(value)
return set(re.findall(r"[^,]+", value))
else:
# mysql-connector-python does a naive
# split(",") which throws in an empty string
if value is not None:
value.discard("")
return value
return process
def bind_processor(self, dialect):
super_convert = super(SET, self).bind_processor(dialect)
if self.retrieve_as_bitwise:
def process(value):
if value is None:
return None
elif isinstance(value, util.int_types + util.string_types):
if super_convert:
return super_convert(value)
else:
return value
else:
int_value = 0
for v in value:
int_value |= self._bitmap[v]
return int_value
else:
def process(value):
# accept strings and int (actually bitflag) values directly
if value is not None and not isinstance(
value, util.int_types + util.string_types
):
value = ",".join(value)
if super_convert:
return super_convert(value)
else:
return value
return process
def adapt(self, impltype, **kw):
kw["retrieve_as_bitwise"] = self.retrieve_as_bitwise
return util.constructor_copy(self, impltype, *self.values, **kw)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/oursql.py
|
# mysql/oursql.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+oursql
:name: OurSQL
:dbapi: oursql
:connectstring: mysql+oursql://<user>:<password>@<host>[:<port>]/<dbname>
:url: http://packages.python.org/oursql/
.. note::
The OurSQL MySQL dialect is legacy and is no longer supported upstream,
and is **not tested as part of SQLAlchemy's continuous integration**.
The recommended MySQL dialects are mysqlclient and PyMySQL.
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
"""
from .base import BIT
from .base import MySQLDialect
from .base import MySQLExecutionContext
from ... import types as sqltypes
from ... import util
class _oursqlBIT(BIT):
def result_processor(self, dialect, coltype):
"""oursql already converts mysql bits, so."""
return None
class MySQLExecutionContext_oursql(MySQLExecutionContext):
@property
def plain_query(self):
return self.execution_options.get("_oursql_plain_query", False)
class MySQLDialect_oursql(MySQLDialect):
driver = "oursql"
if util.py2k:
supports_unicode_binds = True
supports_unicode_statements = True
supports_native_decimal = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
execution_ctx_cls = MySQLExecutionContext_oursql
colspecs = util.update_copy(
MySQLDialect.colspecs, {sqltypes.Time: sqltypes.Time, BIT: _oursqlBIT}
)
@classmethod
def dbapi(cls):
return __import__("oursql")
def do_execute(self, cursor, statement, parameters, context=None):
"""Provide an implementation of
*cursor.execute(statement, parameters)*."""
if context and context.plain_query:
cursor.execute(statement, plain_query=True)
else:
cursor.execute(statement, parameters)
def do_begin(self, connection):
connection.cursor().execute("BEGIN", plain_query=True)
def _xa_query(self, connection, query, xid):
if util.py2k:
arg = connection.connection._escape_string(xid)
else:
charset = self._connection_charset
arg = connection.connection._escape_string(
xid.encode(charset)
).decode(charset)
arg = "'%s'" % arg
connection.execution_options(_oursql_plain_query=True).execute(
query % arg
)
# Because mysql is bad, these methods have to be
# reimplemented to use _PlainQuery. Basically, some queries
# refuse to return any data if they're run through
# the parameterized query API, or refuse to be parameterized
# in the first place.
def do_begin_twophase(self, connection, xid):
self._xa_query(connection, "XA BEGIN %s", xid)
def do_prepare_twophase(self, connection, xid):
self._xa_query(connection, "XA END %s", xid)
self._xa_query(connection, "XA PREPARE %s", xid)
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
self._xa_query(connection, "XA END %s", xid)
self._xa_query(connection, "XA ROLLBACK %s", xid)
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
self._xa_query(connection, "XA COMMIT %s", xid)
# Q: why didn't we need all these "plain_query" overrides earlier ?
# am i on a newer/older version of OurSQL ?
def has_table(self, connection, table_name, schema=None):
return MySQLDialect.has_table(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema,
)
def get_table_options(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_table_options(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_columns(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_columns(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_view_names(self, connection, schema=None, **kw):
return MySQLDialect.get_view_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema=schema,
**kw
)
def get_table_names(self, connection, schema=None, **kw):
return MySQLDialect.get_table_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema,
)
def get_schema_names(self, connection, **kw):
return MySQLDialect.get_schema_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
**kw
)
def initialize(self, connection):
return MySQLDialect.initialize(
self, connection.execution_options(_oursql_plain_query=True)
)
def _show_create_table(
self, connection, table, charset=None, full_name=None
):
return MySQLDialect._show_create_table(
self,
connection._contextual_connect(
close_with_result=True
).execution_options(_oursql_plain_query=True),
table,
charset,
full_name,
)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.ProgrammingError):
return (
e.errno is None
and "cursor" not in e.args[1]
and e.args[1].endswith("closed")
)
else:
return e.errno in (2006, 2013, 2014, 2045, 2055)
def create_connect_args(self, url):
opts = url.translate_connect_args(
database="db", username="user", password="passwd"
)
opts.update(url.query)
util.coerce_kw_type(opts, "port", int)
util.coerce_kw_type(opts, "compress", bool)
util.coerce_kw_type(opts, "autoping", bool)
util.coerce_kw_type(opts, "raise_on_warnings", bool)
util.coerce_kw_type(opts, "default_charset", bool)
if opts.pop("default_charset", False):
opts["charset"] = None
else:
util.coerce_kw_type(opts, "charset", str)
opts["use_unicode"] = opts.get("use_unicode", True)
util.coerce_kw_type(opts, "use_unicode", bool)
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
opts.setdefault("found_rows", True)
ssl = {}
for key in [
"ssl_ca",
"ssl_key",
"ssl_cert",
"ssl_capath",
"ssl_cipher",
]:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts["ssl"] = ssl
return [[], opts]
def _extract_error_code(self, exception):
return exception.errno
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
return connection.connection.charset
def _compat_fetchall(self, rp, charset=None):
"""oursql isn't super-broken like MySQLdb, yaaay."""
return rp.fetchall()
def _compat_fetchone(self, rp, charset=None):
"""oursql isn't super-broken like MySQLdb, yaaay."""
return rp.fetchone()
def _compat_first(self, rp, charset=None):
return rp.first()
dialect = MySQLDialect_oursql
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/mysqlconnector.py
|
# mysql/mysqlconnector.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql+mysqlconnector
:name: MySQL Connector/Python
:dbapi: myconnpy
:connectstring: mysql+mysqlconnector://<user>:<password>@<host>[:<port>]/<dbname>
:url: https://pypi.org/project/mysql-connector-python/
.. note::
The MySQL Connector/Python DBAPI has had many issues since its release,
some of which may remain unresolved, and the mysqlconnector dialect is
**not tested as part of SQLAlchemy's continuous integration**.
The recommended MySQL dialects are mysqlclient and PyMySQL.
""" # noqa
import re
from .base import BIT
from .base import MySQLCompiler
from .base import MySQLDialect
from .base import MySQLExecutionContext
from .base import MySQLIdentifierPreparer
from ... import processors
from ... import util
class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext):
def get_lastrowid(self):
return self.cursor.lastrowid
class MySQLCompiler_mysqlconnector(MySQLCompiler):
def visit_mod_binary(self, binary, operator, **kw):
if self.dialect._mysqlconnector_double_percents:
return (
self.process(binary.left, **kw)
+ " %% "
+ self.process(binary.right, **kw)
)
else:
return (
self.process(binary.left, **kw)
+ " % "
+ self.process(binary.right, **kw)
)
def post_process_text(self, text):
if self.dialect._mysqlconnector_double_percents:
return text.replace("%", "%%")
else:
return text
def escape_literal_column(self, text):
if self.dialect._mysqlconnector_double_percents:
return text.replace("%", "%%")
else:
return text
class MySQLIdentifierPreparer_mysqlconnector(MySQLIdentifierPreparer):
@property
def _double_percents(self):
return self.dialect._mysqlconnector_double_percents
@_double_percents.setter
def _double_percents(self, value):
pass
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
if self.dialect._mysqlconnector_double_percents:
return value.replace("%", "%%")
else:
return value
class _myconnpyBIT(BIT):
def result_processor(self, dialect, coltype):
"""MySQL-connector already converts mysql bits, so."""
return None
class MySQLDialect_mysqlconnector(MySQLDialect):
driver = "mysqlconnector"
supports_unicode_binds = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_native_decimal = True
default_paramstyle = "format"
execution_ctx_cls = MySQLExecutionContext_mysqlconnector
statement_compiler = MySQLCompiler_mysqlconnector
preparer = MySQLIdentifierPreparer_mysqlconnector
colspecs = util.update_copy(MySQLDialect.colspecs, {BIT: _myconnpyBIT})
def __init__(self, *arg, **kw):
super(MySQLDialect_mysqlconnector, self).__init__(*arg, **kw)
# hack description encoding since mysqlconnector randomly
# returns bytes or not
self._description_decoder = (
processors.to_conditional_unicode_processor_factory
)(self.description_encoding)
def _check_unicode_description(self, connection):
# hack description encoding since mysqlconnector randomly
# returns bytes or not
return False
@property
def description_encoding(self):
# total guess
return "latin-1"
@util.memoized_property
def supports_unicode_statements(self):
return util.py3k or self._mysqlconnector_version_info > (2, 0)
@classmethod
def dbapi(cls):
from mysql import connector
return connector
def do_ping(self, dbapi_connection):
try:
dbapi_connection.ping(False)
except self.dbapi.Error as err:
if self.is_disconnect(err, dbapi_connection, None):
return False
else:
raise
else:
return True
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
opts.update(url.query)
util.coerce_kw_type(opts, "allow_local_infile", bool)
util.coerce_kw_type(opts, "autocommit", bool)
util.coerce_kw_type(opts, "buffered", bool)
util.coerce_kw_type(opts, "compress", bool)
util.coerce_kw_type(opts, "connection_timeout", int)
util.coerce_kw_type(opts, "connect_timeout", int)
util.coerce_kw_type(opts, "consume_results", bool)
util.coerce_kw_type(opts, "force_ipv6", bool)
util.coerce_kw_type(opts, "get_warnings", bool)
util.coerce_kw_type(opts, "pool_reset_session", bool)
util.coerce_kw_type(opts, "pool_size", int)
util.coerce_kw_type(opts, "raise_on_warnings", bool)
util.coerce_kw_type(opts, "raw", bool)
util.coerce_kw_type(opts, "ssl_verify_cert", bool)
util.coerce_kw_type(opts, "use_pure", bool)
util.coerce_kw_type(opts, "use_unicode", bool)
# unfortunately, MySQL/connector python refuses to release a
# cursor without reading fully, so non-buffered isn't an option
opts.setdefault("buffered", True)
# FOUND_ROWS must be set in ClientFlag to enable
# supports_sane_rowcount.
if self.dbapi is not None:
try:
from mysql.connector.constants import ClientFlag
client_flags = opts.get(
"client_flags", ClientFlag.get_default()
)
client_flags |= ClientFlag.FOUND_ROWS
opts["client_flags"] = client_flags
except Exception:
pass
return [[], opts]
@util.memoized_property
def _mysqlconnector_version_info(self):
if self.dbapi and hasattr(self.dbapi, "__version__"):
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__)
if m:
return tuple(int(x) for x in m.group(1, 2, 3) if x is not None)
@util.memoized_property
def _mysqlconnector_double_percents(self):
return not util.py3k and self._mysqlconnector_version_info < (2, 0)
def _detect_charset(self, connection):
return connection.connection.charset
def _extract_error_code(self, exception):
return exception.errno
def is_disconnect(self, e, connection, cursor):
errnos = (2006, 2013, 2014, 2045, 2055, 2048)
exceptions = (self.dbapi.OperationalError, self.dbapi.InterfaceError)
if isinstance(e, exceptions):
return (
e.errno in errnos
or "MySQL Connection not available." in str(e)
or "Connection to MySQL is not available" in str(e)
)
else:
return False
def _compat_fetchall(self, rp, charset=None):
return rp.fetchall()
def _compat_fetchone(self, rp, charset=None):
return rp.fetchone()
_isolation_lookup = set(
[
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
"AUTOCOMMIT",
]
)
def _set_isolation_level(self, connection, level):
if level == "AUTOCOMMIT":
connection.autocommit = True
else:
connection.autocommit = False
super(MySQLDialect_mysqlconnector, self)._set_isolation_level(
connection, level
)
dialect = MySQLDialect_mysqlconnector
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/reflection.py
|
# mysql/reflection.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
from .enumerated import _EnumeratedValues
from .enumerated import SET
from .types import DATETIME
from .types import TIME
from .types import TIMESTAMP
from ... import log
from ... import types as sqltypes
from ... import util
class ReflectedState(object):
"""Stores raw information about a SHOW CREATE TABLE statement."""
def __init__(self):
self.columns = []
self.table_options = {}
self.table_name = None
self.keys = []
self.fk_constraints = []
self.ck_constraints = []
@log.class_logger
class MySQLTableDefinitionParser(object):
"""Parses the results of a SHOW CREATE TABLE statement."""
def __init__(self, dialect, preparer):
self.dialect = dialect
self.preparer = preparer
self._prep_regexes()
def parse(self, show_create, charset):
state = ReflectedState()
state.charset = charset
for line in re.split(r"\r?\n", show_create):
if line.startswith(" " + self.preparer.initial_quote):
self._parse_column(line, state)
# a regular table options line
elif line.startswith(") "):
self._parse_table_options(line, state)
# an ANSI-mode table options line
elif line == ")":
pass
elif line.startswith("CREATE "):
self._parse_table_name(line, state)
# Not present in real reflection, but may be if
# loading from a file.
elif not line:
pass
else:
type_, spec = self._parse_constraints(line)
if type_ is None:
util.warn("Unknown schema content: %r" % line)
elif type_ == "key":
state.keys.append(spec)
elif type_ == "fk_constraint":
state.fk_constraints.append(spec)
elif type_ == "ck_constraint":
state.ck_constraints.append(spec)
else:
pass
return state
def _parse_constraints(self, line):
"""Parse a KEY or CONSTRAINT line.
:param line: A line of SHOW CREATE TABLE output
"""
# KEY
m = self._re_key.match(line)
if m:
spec = m.groupdict()
# convert columns into name, length pairs
# NOTE: we may want to consider SHOW INDEX as the
# format of indexes in MySQL becomes more complex
spec["columns"] = self._parse_keyexprs(spec["columns"])
if spec["version_sql"]:
m2 = self._re_key_version_sql.match(spec["version_sql"])
if m2 and m2.groupdict()["parser"]:
spec["parser"] = m2.groupdict()["parser"]
if spec["parser"]:
spec["parser"] = self.preparer.unformat_identifiers(
spec["parser"]
)[0]
return "key", spec
# FOREIGN KEY CONSTRAINT
m = self._re_fk_constraint.match(line)
if m:
spec = m.groupdict()
spec["table"] = self.preparer.unformat_identifiers(spec["table"])
spec["local"] = [c[0] for c in self._parse_keyexprs(spec["local"])]
spec["foreign"] = [
c[0] for c in self._parse_keyexprs(spec["foreign"])
]
return "fk_constraint", spec
# CHECK constraint
m = self._re_ck_constraint.match(line)
if m:
spec = m.groupdict()
return "ck_constraint", spec
# PARTITION and SUBPARTITION
m = self._re_partition.match(line)
if m:
# Punt!
return "partition", line
# No match.
return (None, line)
def _parse_table_name(self, line, state):
"""Extract the table name.
:param line: The first line of SHOW CREATE TABLE
"""
regex, cleanup = self._pr_name
m = regex.match(line)
if m:
state.table_name = cleanup(m.group("name"))
def _parse_table_options(self, line, state):
"""Build a dictionary of all reflected table-level options.
:param line: The final line of SHOW CREATE TABLE output.
"""
options = {}
if not line or line == ")":
pass
else:
rest_of_line = line[:]
for regex, cleanup in self._pr_options:
m = regex.search(rest_of_line)
if not m:
continue
directive, value = m.group("directive"), m.group("val")
if cleanup:
value = cleanup(value)
options[directive.lower()] = value
rest_of_line = regex.sub("", rest_of_line)
for nope in ("auto_increment", "data directory", "index directory"):
options.pop(nope, None)
for opt, val in options.items():
state.table_options["%s_%s" % (self.dialect.name, opt)] = val
def _parse_column(self, line, state):
"""Extract column details.
Falls back to a 'minimal support' variant if full parse fails.
:param line: Any column-bearing line from SHOW CREATE TABLE
"""
spec = None
m = self._re_column.match(line)
if m:
spec = m.groupdict()
spec["full"] = True
else:
m = self._re_column_loose.match(line)
if m:
spec = m.groupdict()
spec["full"] = False
if not spec:
util.warn("Unknown column definition %r" % line)
return
if not spec["full"]:
util.warn("Incomplete reflection of column definition %r" % line)
name, type_, args = spec["name"], spec["coltype"], spec["arg"]
try:
col_type = self.dialect.ischema_names[type_]
except KeyError:
util.warn(
"Did not recognize type '%s' of column '%s'" % (type_, name)
)
col_type = sqltypes.NullType
# Column type positional arguments eg. varchar(32)
if args is None or args == "":
type_args = []
elif args[0] == "'" and args[-1] == "'":
type_args = self._re_csv_str.findall(args)
else:
type_args = [int(v) for v in self._re_csv_int.findall(args)]
# Column type keyword options
type_kw = {}
if issubclass(col_type, (DATETIME, TIME, TIMESTAMP)):
if type_args:
type_kw["fsp"] = type_args.pop(0)
for kw in ("unsigned", "zerofill"):
if spec.get(kw, False):
type_kw[kw] = True
for kw in ("charset", "collate"):
if spec.get(kw, False):
type_kw[kw] = spec[kw]
if issubclass(col_type, _EnumeratedValues):
type_args = _EnumeratedValues._strip_values(type_args)
if issubclass(col_type, SET) and "" in type_args:
type_kw["retrieve_as_bitwise"] = True
type_instance = col_type(*type_args, **type_kw)
col_kw = {}
# NOT NULL
col_kw["nullable"] = True
# this can be "NULL" in the case of TIMESTAMP
if spec.get("notnull", False) == "NOT NULL":
col_kw["nullable"] = False
# AUTO_INCREMENT
if spec.get("autoincr", False):
col_kw["autoincrement"] = True
elif issubclass(col_type, sqltypes.Integer):
col_kw["autoincrement"] = False
# DEFAULT
default = spec.get("default", None)
if default == "NULL":
# eliminates the need to deal with this later.
default = None
comment = spec.get("comment", None)
if comment is not None:
comment = comment.replace("\\\\", "\\").replace("''", "'")
sqltext = spec.get("generated")
if sqltext is not None:
computed = dict(sqltext=sqltext)
persisted = spec.get("persistence")
if persisted is not None:
computed["persisted"] = persisted == "STORED"
col_kw["computed"] = computed
col_d = dict(
name=name, type=type_instance, default=default, comment=comment
)
col_d.update(col_kw)
state.columns.append(col_d)
def _describe_to_create(self, table_name, columns):
"""Re-format DESCRIBE output as a SHOW CREATE TABLE string.
DESCRIBE is a much simpler reflection and is sufficient for
reflecting views for runtime use. This method formats DDL
for columns only- keys are omitted.
:param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
SHOW FULL COLUMNS FROM rows must be rearranged for use with
this function.
"""
buffer = []
for row in columns:
(name, col_type, nullable, default, extra) = [
row[i] for i in (0, 1, 2, 4, 5)
]
line = [" "]
line.append(self.preparer.quote_identifier(name))
line.append(col_type)
if not nullable:
line.append("NOT NULL")
if default:
if "auto_increment" in default:
pass
elif col_type.startswith("timestamp") and default.startswith(
"C"
):
line.append("DEFAULT")
line.append(default)
elif default == "NULL":
line.append("DEFAULT")
line.append(default)
else:
line.append("DEFAULT")
line.append("'%s'" % default.replace("'", "''"))
if extra:
line.append(extra)
buffer.append(" ".join(line))
return "".join(
[
(
"CREATE TABLE %s (\n"
% self.preparer.quote_identifier(table_name)
),
",\n".join(buffer),
"\n) ",
]
)
def _parse_keyexprs(self, identifiers):
"""Unpack '"col"(2),"col" ASC'-ish strings into components."""
return self._re_keyexprs.findall(identifiers)
def _prep_regexes(self):
"""Pre-compile regular expressions."""
self._re_columns = []
self._pr_options = []
_final = self.preparer.final_quote
quotes = dict(
zip(
("iq", "fq", "esc_fq"),
[
re.escape(s)
for s in (
self.preparer.initial_quote,
_final,
self.preparer._escape_identifier(_final),
)
],
)
)
self._pr_name = _pr_compile(
r"^CREATE (?:\w+ +)?TABLE +"
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($" % quotes,
self.preparer._unescape_identifier,
)
# `col`,`col2`(32),`col3`(15) DESC
#
self._re_keyexprs = _re_compile(
r"(?:"
r"(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)"
r"(?:\((\d+)\))?(?: +(ASC|DESC))?(?=\,|$))+" % quotes
)
# 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
self._re_csv_str = _re_compile(r"\x27(?:\x27\x27|[^\x27])*\x27")
# 123 or 123,456
self._re_csv_int = _re_compile(r"\d+")
# `colname` <type> [type opts]
# (NOT NULL | NULL)
# DEFAULT ('value' | CURRENT_TIMESTAMP...)
# COMMENT 'comment'
# COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
# STORAGE (DISK|MEMORY)
self._re_column = _re_compile(
r" "
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
r"(?P<coltype>\w+)"
r"(?:\((?P<arg>(?:\d+|\d+,\d+|"
r"(?:'(?:''|[^'])*',?)+))\))?"
r"(?: +(?P<unsigned>UNSIGNED))?"
r"(?: +(?P<zerofill>ZEROFILL))?"
r"(?: +CHARACTER SET +(?P<charset>[\w_]+))?"
r"(?: +COLLATE +(?P<collate>[\w_]+))?"
r"(?: +(?P<notnull>(?:NOT )?NULL))?"
r"(?: +DEFAULT +(?P<default>"
r"(?:NULL|'(?:''|[^'])*'|[\w\(\)]+"
r"(?: +ON UPDATE [\w\(\)]+)?)"
r"))?"
r"(?: +(?:GENERATED ALWAYS)? ?AS +(?P<generated>\("
r".*\))? ?(?P<persistence>VIRTUAL|STORED)?)?"
r"(?: +(?P<autoincr>AUTO_INCREMENT))?"
r"(?: +COMMENT +'(?P<comment>(?:''|[^'])*)')?"
r"(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?"
r"(?: +STORAGE +(?P<storage>\w+))?"
r"(?: +(?P<extra>.*))?"
r",?$" % quotes
)
# Fallback, try to parse as little as possible
self._re_column_loose = _re_compile(
r" "
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
r"(?P<coltype>\w+)"
r"(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?"
r".*?(?P<notnull>(?:NOT )NULL)?" % quotes
)
# (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
# (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
# KEY_BLOCK_SIZE size | WITH PARSER name /*!50100 WITH PARSER name */
self._re_key = _re_compile(
r" "
r"(?:(?P<type>\S+) )?KEY"
r"(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?"
r"(?: +USING +(?P<using_pre>\S+))?"
r" +\((?P<columns>.+?)\)"
r"(?: +USING +(?P<using_post>\S+))?"
r"(?: +KEY_BLOCK_SIZE *[ =]? *(?P<keyblock>\S+))?"
r"(?: +WITH PARSER +(?P<parser>\S+))?"
r"(?: +COMMENT +(?P<comment>(\x27\x27|\x27([^\x27])*?\x27)+))?"
r"(?: +/\*(?P<version_sql>.+)\*/ +)?"
r",?$" % quotes
)
# https://forums.mysql.com/read.php?20,567102,567111#msg-567111
# It means if the MySQL version >= \d+, execute what's in the comment
self._re_key_version_sql = _re_compile(
r"\!\d+ " r"(?: *WITH PARSER +(?P<parser>\S+) *)?"
)
# CONSTRAINT `name` FOREIGN KEY (`local_col`)
# REFERENCES `remote` (`remote_col`)
# MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
# ON DELETE CASCADE ON UPDATE RESTRICT
#
# unique constraints come back as KEYs
kw = quotes.copy()
kw["on"] = "RESTRICT|CASCADE|SET NULL|NOACTION"
self._re_fk_constraint = _re_compile(
r" "
r"CONSTRAINT +"
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
r"FOREIGN KEY +"
r"\((?P<local>[^\)]+?)\) REFERENCES +"
r"(?P<table>%(iq)s[^%(fq)s]+%(fq)s"
r"(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +"
r"\((?P<foreign>[^\)]+?)\)"
r"(?: +(?P<match>MATCH \w+))?"
r"(?: +ON DELETE (?P<ondelete>%(on)s))?"
r"(?: +ON UPDATE (?P<onupdate>%(on)s))?" % kw
)
# CONSTRAINT `CONSTRAINT_1` CHECK (`x` > 5)'
# testing on MariaDB 10.2 shows that the CHECK constraint
# is returned on a line by itself, so to match without worrying
# about parenthesis in the expresion we go to the end of the line
self._re_ck_constraint = _re_compile(
r" "
r"CONSTRAINT +"
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
r"CHECK +"
r"\((?P<sqltext>.+)\),?" % kw
)
# PARTITION
#
# punt!
self._re_partition = _re_compile(r"(?:.*)(?:SUB)?PARTITION(?:.*)")
# Table-level options (COLLATE, ENGINE, etc.)
# Do the string options first, since they have quoted
# strings we need to get rid of.
for option in _options_of_type_string:
self._add_option_string(option)
for option in (
"ENGINE",
"TYPE",
"AUTO_INCREMENT",
"AVG_ROW_LENGTH",
"CHARACTER SET",
"DEFAULT CHARSET",
"CHECKSUM",
"COLLATE",
"DELAY_KEY_WRITE",
"INSERT_METHOD",
"MAX_ROWS",
"MIN_ROWS",
"PACK_KEYS",
"ROW_FORMAT",
"KEY_BLOCK_SIZE",
):
self._add_option_word(option)
self._add_option_regex("UNION", r"\([^\)]+\)")
self._add_option_regex("TABLESPACE", r".*? STORAGE DISK")
self._add_option_regex(
"RAID_TYPE",
r"\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+",
)
_optional_equals = r"(?:\s*(?:=\s*)|\s+)"
def _add_option_string(self, directive):
regex = r"(?P<directive>%s)%s" r"'(?P<val>(?:[^']|'')*?)'(?!')" % (
re.escape(directive),
self._optional_equals,
)
self._pr_options.append(
_pr_compile(
regex, lambda v: v.replace("\\\\", "\\").replace("''", "'")
)
)
def _add_option_word(self, directive):
regex = r"(?P<directive>%s)%s" r"(?P<val>\w+)" % (
re.escape(directive),
self._optional_equals,
)
self._pr_options.append(_pr_compile(regex))
def _add_option_regex(self, directive, regex):
regex = r"(?P<directive>%s)%s" r"(?P<val>%s)" % (
re.escape(directive),
self._optional_equals,
regex,
)
self._pr_options.append(_pr_compile(regex))
_options_of_type_string = (
"COMMENT",
"DATA DIRECTORY",
"INDEX DIRECTORY",
"PASSWORD",
"CONNECTION",
)
def _pr_compile(regex, cleanup=None):
"""Prepare a 2-tuple of compiled regex and callable."""
return (_re_compile(regex), cleanup)
def _re_compile(regex):
"""Compile a string to regex, I and UNICODE."""
return re.compile(regex, re.I | re.UNICODE)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/zxjdbc.py
|
# mysql/zxjdbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql+zxjdbc
:name: zxjdbc for Jython
:dbapi: zxjdbc
:connectstring: mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/<database>
:driverurl: http://dev.mysql.com/downloads/connector/j/
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
Character Sets
--------------
SQLAlchemy zxjdbc dialects pass unicode straight through to the
zxjdbc/JDBC layer. To allow multiple character sets to be sent from the
MySQL Connector/J JDBC driver, by default SQLAlchemy sets its
``characterEncoding`` connection property to ``UTF-8``. It may be
overridden via a ``create_engine`` URL parameter.
""" # noqa
import re
from .base import BIT
from .base import MySQLDialect
from .base import MySQLExecutionContext
from ... import types as sqltypes
from ... import util
from ...connectors.zxJDBC import ZxJDBCConnector
class _ZxJDBCBit(BIT):
def result_processor(self, dialect, coltype):
"""Converts boolean or byte arrays from MySQL Connector/J to longs."""
def process(value):
if value is None:
return value
if isinstance(value, bool):
return int(value)
v = 0
for i in value:
v = v << 8 | (i & 0xFF)
value = v
return value
return process
class MySQLExecutionContext_zxjdbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect):
jdbc_db_name = "mysql"
jdbc_driver_name = "com.mysql.jdbc.Driver"
execution_ctx_cls = MySQLExecutionContext_zxjdbc
colspecs = util.update_copy(
MySQLDialect.colspecs, {sqltypes.Time: sqltypes.Time, BIT: _ZxJDBCBit}
)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = {row[0]: row[1] for row in self._compat_fetchall(rs)}
for key in ("character_set_connection", "character_set"):
if opts.get(key, None):
return opts[key]
util.warn(
"Could not detect the connection character set. "
"Assuming latin1."
)
return "latin1"
def _driver_kwargs(self):
"""return kw arg dict to be sent to connect()."""
return dict(characterEncoding="UTF-8", yearIsDateType="false")
def _extract_error_code(self, exception):
# e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist
# [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' ()
m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile(r"[.\-]")
for n in r.split(dbapi_con.dbversion):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
dialect = MySQLDialect_zxjdbc
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/__init__.py
|
# mysql/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base # noqa
from . import cymysql # noqa
from . import gaerdbms # noqa
from . import mysqlconnector # noqa
from . import mysqldb # noqa
from . import oursql # noqa
from . import pymysql # noqa
from . import pyodbc # noqa
from . import zxjdbc # noqa
from .base import BIGINT
from .base import BINARY
from .base import BIT
from .base import BLOB
from .base import BOOLEAN
from .base import CHAR
from .base import DATE
from .base import DATETIME
from .base import DECIMAL
from .base import DOUBLE
from .base import ENUM
from .base import FLOAT
from .base import INTEGER
from .base import JSON
from .base import LONGBLOB
from .base import LONGTEXT
from .base import MEDIUMBLOB
from .base import MEDIUMINT
from .base import MEDIUMTEXT
from .base import NCHAR
from .base import NUMERIC
from .base import NVARCHAR
from .base import REAL
from .base import SET
from .base import SMALLINT
from .base import TEXT
from .base import TIME
from .base import TIMESTAMP
from .base import TINYBLOB
from .base import TINYINT
from .base import TINYTEXT
from .base import VARBINARY
from .base import VARCHAR
from .base import YEAR
from .dml import Insert
from .dml import insert
# default dialect
base.dialect = dialect = mysqldb.dialect
__all__ = (
"BIGINT",
"BINARY",
"BIT",
"BLOB",
"BOOLEAN",
"CHAR",
"DATE",
"DATETIME",
"DECIMAL",
"DOUBLE",
"ENUM",
"DECIMAL",
"FLOAT",
"INTEGER",
"INTEGER",
"JSON",
"LONGBLOB",
"LONGTEXT",
"MEDIUMBLOB",
"MEDIUMINT",
"MEDIUMTEXT",
"NCHAR",
"NVARCHAR",
"NUMERIC",
"SET",
"SMALLINT",
"REAL",
"TEXT",
"TIME",
"TIMESTAMP",
"TINYBLOB",
"TINYINT",
"TINYTEXT",
"VARBINARY",
"VARCHAR",
"YEAR",
"dialect",
"insert",
"Insert",
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/types.py
|
# mysql/types.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import datetime
from ... import exc
from ... import types as sqltypes
from ... import util
class _NumericType(object):
"""Base for MySQL numeric types.
This is the base both for NUMERIC as well as INTEGER, hence
it's a mixin.
"""
def __init__(self, unsigned=False, zerofill=False, **kw):
self.unsigned = unsigned
self.zerofill = zerofill
super(_NumericType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(
self, to_inspect=[_NumericType, sqltypes.Numeric]
)
class _FloatType(_NumericType, sqltypes.Float):
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
if isinstance(self, (REAL, DOUBLE)) and (
(precision is None and scale is not None)
or (precision is not None and scale is None)
):
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether."
)
super(_FloatType, self).__init__(
precision=precision, asdecimal=asdecimal, **kw
)
self.scale = scale
def __repr__(self):
return util.generic_repr(
self, to_inspect=[_FloatType, _NumericType, sqltypes.Float]
)
class _IntegerType(_NumericType, sqltypes.Integer):
def __init__(self, display_width=None, **kw):
self.display_width = display_width
super(_IntegerType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(
self, to_inspect=[_IntegerType, _NumericType, sqltypes.Integer]
)
class _StringType(sqltypes.String):
"""Base for MySQL string types."""
def __init__(
self,
charset=None,
collation=None,
ascii=False, # noqa
binary=False,
unicode=False,
national=False,
**kw
):
self.charset = charset
# allow collate= or collation=
kw.setdefault("collation", kw.pop("collate", collation))
self.ascii = ascii
self.unicode = unicode
self.binary = binary
self.national = national
super(_StringType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(
self, to_inspect=[_StringType, sqltypes.String]
)
class _MatchType(sqltypes.Float, sqltypes.MatchType):
def __init__(self, **kw):
# TODO: float arguments?
sqltypes.Float.__init__(self)
sqltypes.MatchType.__init__(self)
class NUMERIC(_NumericType, sqltypes.NUMERIC):
"""MySQL NUMERIC type."""
__visit_name__ = "NUMERIC"
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a NUMERIC.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(NUMERIC, self).__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
class DECIMAL(_NumericType, sqltypes.DECIMAL):
"""MySQL DECIMAL type."""
__visit_name__ = "DECIMAL"
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DECIMAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DECIMAL, self).__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
class DOUBLE(_FloatType):
"""MySQL DOUBLE type."""
__visit_name__ = "DOUBLE"
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DOUBLE.
.. note::
The :class:`.DOUBLE` type by default converts from float
to Decimal, using a truncation that defaults to 10 digits.
Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
to change this scale, or ``asdecimal=False`` to return values
directly as Python floating points.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DOUBLE, self).__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
class REAL(_FloatType, sqltypes.REAL):
"""MySQL REAL type."""
__visit_name__ = "REAL"
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a REAL.
.. note::
The :class:`.REAL` type by default converts from float
to Decimal, using a truncation that defaults to 10 digits.
Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
to change this scale, or ``asdecimal=False`` to return values
directly as Python floating points.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(REAL, self).__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
class FLOAT(_FloatType, sqltypes.FLOAT):
"""MySQL FLOAT type."""
__visit_name__ = "FLOAT"
def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
"""Construct a FLOAT.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(FLOAT, self).__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
def bind_processor(self, dialect):
return None
class INTEGER(_IntegerType, sqltypes.INTEGER):
"""MySQL INTEGER type."""
__visit_name__ = "INTEGER"
def __init__(self, display_width=None, **kw):
"""Construct an INTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(INTEGER, self).__init__(display_width=display_width, **kw)
class BIGINT(_IntegerType, sqltypes.BIGINT):
"""MySQL BIGINTEGER type."""
__visit_name__ = "BIGINT"
def __init__(self, display_width=None, **kw):
"""Construct a BIGINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(BIGINT, self).__init__(display_width=display_width, **kw)
class MEDIUMINT(_IntegerType):
"""MySQL MEDIUMINTEGER type."""
__visit_name__ = "MEDIUMINT"
def __init__(self, display_width=None, **kw):
"""Construct a MEDIUMINTEGER
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MEDIUMINT, self).__init__(display_width=display_width, **kw)
class TINYINT(_IntegerType):
"""MySQL TINYINT type."""
__visit_name__ = "TINYINT"
def __init__(self, display_width=None, **kw):
"""Construct a TINYINT.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(TINYINT, self).__init__(display_width=display_width, **kw)
class SMALLINT(_IntegerType, sqltypes.SMALLINT):
"""MySQL SMALLINTEGER type."""
__visit_name__ = "SMALLINT"
def __init__(self, display_width=None, **kw):
"""Construct a SMALLINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(SMALLINT, self).__init__(display_width=display_width, **kw)
class BIT(sqltypes.TypeEngine):
"""MySQL BIT type.
This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater
for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a
MSTinyInteger() type.
"""
__visit_name__ = "BIT"
def __init__(self, length=None):
"""Construct a BIT.
:param length: Optional, number of bits.
"""
self.length = length
def result_processor(self, dialect, coltype):
"""Convert a MySQL's 64 bit, variable length binary string to a long.
TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector
already do this, so this logic should be moved to those dialects.
"""
def process(value):
if value is not None:
v = 0
for i in value:
if not isinstance(i, int):
i = ord(i) # convert byte to int on Python 2
v = v << 8 | i
return v
return value
return process
class TIME(sqltypes.TIME):
"""MySQL TIME type. """
__visit_name__ = "TIME"
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL TIME type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the TIME type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
"""
super(TIME, self).__init__(timezone=timezone)
self.fsp = fsp
def result_processor(self, dialect, coltype):
time = datetime.time
def process(value):
# convert from a timedelta value
if value is not None:
microseconds = value.microseconds
seconds = value.seconds
minutes = seconds // 60
return time(
minutes // 60,
minutes % 60,
seconds - minutes * 60,
microsecond=microseconds,
)
else:
return None
return process
class TIMESTAMP(sqltypes.TIMESTAMP):
"""MySQL TIMESTAMP type.
"""
__visit_name__ = "TIMESTAMP"
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL TIMESTAMP type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6.4 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the TIMESTAMP type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
"""
super(TIMESTAMP, self).__init__(timezone=timezone)
self.fsp = fsp
class DATETIME(sqltypes.DATETIME):
"""MySQL DATETIME type.
"""
__visit_name__ = "DATETIME"
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL DATETIME type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6.4 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the DATETIME type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
"""
super(DATETIME, self).__init__(timezone=timezone)
self.fsp = fsp
class YEAR(sqltypes.TypeEngine):
"""MySQL YEAR type, for single byte storage of years 1901-2155."""
__visit_name__ = "YEAR"
def __init__(self, display_width=None):
self.display_width = display_width
class TEXT(_StringType, sqltypes.TEXT):
"""MySQL TEXT type, for text up to 2^16 characters."""
__visit_name__ = "TEXT"
def __init__(self, length=None, **kw):
"""Construct a TEXT.
:param length: Optional, if provided the server may optimize storage
by substituting the smallest TEXT type sufficient to store
``length`` characters.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TEXT, self).__init__(length=length, **kw)
class TINYTEXT(_StringType):
"""MySQL TINYTEXT type, for text up to 2^8 characters."""
__visit_name__ = "TINYTEXT"
def __init__(self, **kwargs):
"""Construct a TINYTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TINYTEXT, self).__init__(**kwargs)
class MEDIUMTEXT(_StringType):
"""MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
__visit_name__ = "MEDIUMTEXT"
def __init__(self, **kwargs):
"""Construct a MEDIUMTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(MEDIUMTEXT, self).__init__(**kwargs)
class LONGTEXT(_StringType):
"""MySQL LONGTEXT type, for text up to 2^32 characters."""
__visit_name__ = "LONGTEXT"
def __init__(self, **kwargs):
"""Construct a LONGTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(LONGTEXT, self).__init__(**kwargs)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""MySQL VARCHAR type, for variable-length character data."""
__visit_name__ = "VARCHAR"
def __init__(self, length=None, **kwargs):
"""Construct a VARCHAR.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""MySQL CHAR type, for fixed-length character data."""
__visit_name__ = "CHAR"
def __init__(self, length=None, **kwargs):
"""Construct a CHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
super(CHAR, self).__init__(length=length, **kwargs)
@classmethod
def _adapt_string_for_cast(self, type_):
# copy the given string type into a CHAR
# for the purposes of rendering a CAST expression
type_ = sqltypes.to_instance(type_)
if isinstance(type_, sqltypes.CHAR):
return type_
elif isinstance(type_, _StringType):
return CHAR(
length=type_.length,
charset=type_.charset,
collation=type_.collation,
ascii=type_.ascii,
binary=type_.binary,
unicode=type_.unicode,
national=False, # not supported in CAST
)
else:
return CHAR(length=type_.length)
class NVARCHAR(_StringType, sqltypes.NVARCHAR):
"""MySQL NVARCHAR type.
For variable-length character data in the server's configured national
character set.
"""
__visit_name__ = "NVARCHAR"
def __init__(self, length=None, **kwargs):
"""Construct an NVARCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs["national"] = True
super(NVARCHAR, self).__init__(length=length, **kwargs)
class NCHAR(_StringType, sqltypes.NCHAR):
"""MySQL NCHAR type.
For fixed-length character data in the server's configured national
character set.
"""
__visit_name__ = "NCHAR"
def __init__(self, length=None, **kwargs):
"""Construct an NCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs["national"] = True
super(NCHAR, self).__init__(length=length, **kwargs)
class TINYBLOB(sqltypes._Binary):
"""MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
__visit_name__ = "TINYBLOB"
class MEDIUMBLOB(sqltypes._Binary):
"""MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
__visit_name__ = "MEDIUMBLOB"
class LONGBLOB(sqltypes._Binary):
"""MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
__visit_name__ = "LONGBLOB"
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/provision.py
|
from ...testing.provision import configure_follower
from ...testing.provision import create_db
from ...testing.provision import drop_db
from ...testing.provision import temp_table_keyword_args
@create_db.for_db("mysql")
def _mysql_create_db(cfg, eng, ident):
with eng.connect() as conn:
try:
_mysql_drop_db(cfg, conn, ident)
except Exception:
pass
conn.execute("CREATE DATABASE %s CHARACTER SET utf8mb4" % ident)
conn.execute(
"CREATE DATABASE %s_test_schema CHARACTER SET utf8mb4" % ident
)
conn.execute(
"CREATE DATABASE %s_test_schema_2 CHARACTER SET utf8mb4" % ident
)
@configure_follower.for_db("mysql")
def _mysql_configure_follower(config, ident):
config.test_schema = "%s_test_schema" % ident
config.test_schema_2 = "%s_test_schema_2" % ident
@drop_db.for_db("mysql")
def _mysql_drop_db(cfg, eng, ident):
with eng.connect() as conn:
conn.execute("DROP DATABASE %s_test_schema" % ident)
conn.execute("DROP DATABASE %s_test_schema_2" % ident)
conn.execute("DROP DATABASE %s" % ident)
@temp_table_keyword_args.for_db("mysql")
def _mysql_temp_table_keyword_args(cfg, eng):
return {"prefixes": ["TEMPORARY"]}
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/mysqldb.py
|
# mysql/mysqldb.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+mysqldb
:name: mysqlclient (maintained fork of MySQL-Python)
:dbapi: mysqldb
:connectstring: mysql+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>
:url: https://pypi.org/project/mysqlclient/
Driver Status
-------------
The mysqlclient DBAPI is a maintained fork of the
`MySQL-Python <http://sourceforge.net/projects/mysql-python>`_ DBAPI
that is no longer maintained. `mysqlclient`_ supports Python 2 and Python 3
and is very stable.
.. _mysqlclient: https://github.com/PyMySQL/mysqlclient-python
.. _mysqldb_unicode:
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
Using MySQLdb with Google Cloud SQL
-----------------------------------
Google Cloud SQL now recommends use of the MySQLdb dialect. Connect
using a URL like the following::
mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>
Server Side Cursors
-------------------
The mysqldb dialect supports server-side cursors. See :ref:`mysql_ss_cursors`.
"""
import re
from .base import MySQLCompiler
from .base import MySQLDialect
from .base import MySQLExecutionContext
from .base import MySQLIdentifierPreparer
from .base import TEXT
from ... import sql
from ... import util
class MySQLExecutionContext_mysqldb(MySQLExecutionContext):
@property
def rowcount(self):
if hasattr(self, "_rowcount"):
return self._rowcount
else:
return self.cursor.rowcount
class MySQLCompiler_mysqldb(MySQLCompiler):
pass
class MySQLIdentifierPreparer_mysqldb(MySQLIdentifierPreparer):
pass
class MySQLDialect_mysqldb(MySQLDialect):
driver = "mysqldb"
supports_unicode_statements = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_native_decimal = True
default_paramstyle = "format"
execution_ctx_cls = MySQLExecutionContext_mysqldb
statement_compiler = MySQLCompiler_mysqldb
preparer = MySQLIdentifierPreparer_mysqldb
def __init__(self, server_side_cursors=False, **kwargs):
super(MySQLDialect_mysqldb, self).__init__(**kwargs)
self.server_side_cursors = server_side_cursors
self._mysql_dbapi_version = (
self._parse_dbapi_version(self.dbapi.__version__)
if self.dbapi is not None and hasattr(self.dbapi, "__version__")
else (0, 0, 0)
)
def _parse_dbapi_version(self, version):
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", version)
if m:
return tuple(int(x) for x in m.group(1, 2, 3) if x is not None)
else:
return (0, 0, 0)
@util.langhelpers.memoized_property
def supports_server_side_cursors(self):
try:
cursors = __import__("MySQLdb.cursors").cursors
self._sscursor = cursors.SSCursor
return True
except (ImportError, AttributeError):
return False
@classmethod
def dbapi(cls):
return __import__("MySQLdb")
def on_connect(self):
super_ = super(MySQLDialect_mysqldb, self).on_connect()
def on_connect(conn):
if super_ is not None:
super_(conn)
charset_name = conn.character_set_name()
if charset_name is not None:
cursor = conn.cursor()
cursor.execute("SET NAMES %s" % charset_name)
cursor.close()
return on_connect
def do_ping(self, dbapi_connection):
try:
dbapi_connection.ping(False)
except self.dbapi.Error as err:
if self.is_disconnect(err, dbapi_connection, None):
return False
else:
raise
else:
return True
def do_executemany(self, cursor, statement, parameters, context=None):
rowcount = cursor.executemany(statement, parameters)
if context is not None:
context._rowcount = rowcount
def _check_unicode_returns(self, connection):
# work around issue fixed in
# https://github.com/farcepest/MySQLdb1/commit/cd44524fef63bd3fcb71947392326e9742d520e8
# specific issue w/ the utf8mb4_bin collation and unicode returns
has_utf8mb4_bin = self.server_version_info > (
5,
) and connection.scalar(
"show collation where %s = 'utf8mb4' and %s = 'utf8mb4_bin'"
% (
self.identifier_preparer.quote("Charset"),
self.identifier_preparer.quote("Collation"),
)
)
if has_utf8mb4_bin:
additional_tests = [
sql.collate(
sql.cast(
sql.literal_column("'test collated returns'"),
TEXT(charset="utf8mb4"),
),
"utf8mb4_bin",
)
]
else:
additional_tests = []
return super(MySQLDialect_mysqldb, self)._check_unicode_returns(
connection, additional_tests
)
def create_connect_args(self, url):
opts = url.translate_connect_args(
database="db", username="user", password="passwd"
)
opts.update(url.query)
util.coerce_kw_type(opts, "compress", bool)
util.coerce_kw_type(opts, "connect_timeout", int)
util.coerce_kw_type(opts, "read_timeout", int)
util.coerce_kw_type(opts, "write_timeout", int)
util.coerce_kw_type(opts, "client_flag", int)
util.coerce_kw_type(opts, "local_infile", int)
# Note: using either of the below will cause all strings to be
# returned as Unicode, both in raw SQL operations and with column
# types like String and MSString.
util.coerce_kw_type(opts, "use_unicode", bool)
util.coerce_kw_type(opts, "charset", str)
# Rich values 'cursorclass' and 'conv' are not supported via
# query string.
ssl = {}
keys = ["ssl_ca", "ssl_key", "ssl_cert", "ssl_capath", "ssl_cipher"]
for key in keys:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts["ssl"] = ssl
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
client_flag = opts.get("client_flag", 0)
if self.dbapi is not None:
try:
CLIENT_FLAGS = __import__(
self.dbapi.__name__ + ".constants.CLIENT"
).constants.CLIENT
client_flag |= CLIENT_FLAGS.FOUND_ROWS
except (AttributeError, ImportError):
self.supports_sane_rowcount = False
opts["client_flag"] = client_flag
return [[], opts]
def _extract_error_code(self, exception):
return exception.args[0]
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
try:
# note: the SQL here would be
# "SHOW VARIABLES LIKE 'character_set%%'"
cset_name = connection.connection.character_set_name
except AttributeError:
util.warn(
"No 'character_set_name' can be detected with "
"this MySQL-Python version; "
"please upgrade to a recent version of MySQL-Python. "
"Assuming latin1."
)
return "latin1"
else:
return cset_name()
_isolation_lookup = set(
[
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
"AUTOCOMMIT",
]
)
def _set_isolation_level(self, connection, level):
if level == "AUTOCOMMIT":
connection.autocommit(True)
else:
connection.autocommit(False)
super(MySQLDialect_mysqldb, self)._set_isolation_level(
connection, level
)
dialect = MySQLDialect_mysqldb
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/pymysql.py
|
# mysql/pymysql.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql+pymysql
:name: PyMySQL
:dbapi: pymysql
:connectstring: mysql+pymysql://<username>:<password>@<host>/<dbname>[?<options>]
:url: https://pymysql.readthedocs.io/
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
MySQL-Python Compatibility
--------------------------
The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver,
and targets 100% compatibility. Most behavioral notes for MySQL-python apply
to the pymysql driver as well.
""" # noqa
from .mysqldb import MySQLDialect_mysqldb
from ...util import langhelpers
from ...util import py3k
class MySQLDialect_pymysql(MySQLDialect_mysqldb):
driver = "pymysql"
description_encoding = None
# generally, these two values should be both True
# or both False. PyMySQL unicode tests pass all the way back
# to 0.4 either way. See [ticket:3337]
supports_unicode_statements = True
supports_unicode_binds = True
def __init__(self, server_side_cursors=False, **kwargs):
super(MySQLDialect_pymysql, self).__init__(**kwargs)
self.server_side_cursors = server_side_cursors
@langhelpers.memoized_property
def supports_server_side_cursors(self):
try:
cursors = __import__("pymysql.cursors").cursors
self._sscursor = cursors.SSCursor
return True
except (ImportError, AttributeError):
return False
@classmethod
def dbapi(cls):
return __import__("pymysql")
def is_disconnect(self, e, connection, cursor):
if super(MySQLDialect_pymysql, self).is_disconnect(
e, connection, cursor
):
return True
elif isinstance(e, self.dbapi.Error):
str_e = str(e).lower()
return (
"already closed" in str_e or "connection was killed" in str_e
)
else:
return False
if py3k:
def _extract_error_code(self, exception):
if isinstance(exception.args[0], Exception):
exception = exception.args[0]
return exception.args[0]
dialect = MySQLDialect_pymysql
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/gaerdbms.py
|
# mysql/gaerdbms.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql+gaerdbms
:name: Google Cloud SQL
:dbapi: rdbms
:connectstring: mysql+gaerdbms:///<dbname>?instance=<instancename>
:url: https://developers.google.com/appengine/docs/python/cloud-sql/developers-guide
This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with
minimal changes.
.. deprecated:: 1.0 This dialect is **no longer necessary** for
Google Cloud SQL; the MySQLdb dialect can be used directly.
Cloud SQL now recommends creating connections via the
mysql dialect using the URL format
``mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>``
Pooling
-------
Google App Engine connections appear to be randomly recycled,
so the dialect does not pool connections. The :class:`.NullPool`
implementation is installed within the :class:`_engine.Engine` by
default.
""" # noqa
import os
import re
from sqlalchemy.util import warn_deprecated
from .mysqldb import MySQLDialect_mysqldb
from ...pool import NullPool
def _is_dev_environment():
return os.environ.get("SERVER_SOFTWARE", "").startswith("Development/")
class MySQLDialect_gaerdbms(MySQLDialect_mysqldb):
@classmethod
def dbapi(cls):
warn_deprecated(
"Google Cloud SQL now recommends creating connections via the "
"MySQLdb dialect directly, using the URL format "
"mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/"
"<projectid>:<instancename>"
)
# from django:
# http://code.google.com/p/googleappengine/source/
# browse/trunk/python/google/storage/speckle/
# python/django/backend/base.py#118
# see also [ticket:2649]
# see also http://stackoverflow.com/q/14224679/34549
from google.appengine.api import apiproxy_stub_map
if _is_dev_environment():
from google.appengine.api import rdbms_mysqldb
return rdbms_mysqldb
elif apiproxy_stub_map.apiproxy.GetStub("rdbms"):
from google.storage.speckle.python.api import rdbms_apiproxy
return rdbms_apiproxy
else:
from google.storage.speckle.python.api import rdbms_googleapi
return rdbms_googleapi
@classmethod
def get_pool_class(cls, url):
# Cloud SQL connections die at any moment
return NullPool
def create_connect_args(self, url):
opts = url.translate_connect_args()
if not _is_dev_environment():
# 'dsn' and 'instance' are because we are skipping
# the traditional google.api.rdbms wrapper
opts["dsn"] = ""
opts["instance"] = url.query["instance"]
return [], opts
def _extract_error_code(self, exception):
match = re.compile(r"^(\d+)L?:|^\((\d+)L?,").match(str(exception))
# The rdbms api will wrap then re-raise some types of errors
# making this regex return no matches.
code = match.group(1) or match.group(2) if match else None
if code:
return int(code)
dialect = MySQLDialect_gaerdbms
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/dml.py
|
from ... import exc
from ... import util
from ...sql.base import _generative
from ...sql.dml import Insert as StandardInsert
from ...sql.elements import ClauseElement
from ...sql.expression import alias
from ...util.langhelpers import public_factory
__all__ = ("Insert", "insert")
class Insert(StandardInsert):
"""MySQL-specific implementation of INSERT.
Adds methods for MySQL-specific syntaxes such as ON DUPLICATE KEY UPDATE.
The :class:`~.mysql.Insert` object is created using the
:func:`sqlalchemy.dialects.mysql.insert` function.
.. versionadded:: 1.2
"""
@property
def inserted(self):
"""Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE statement
MySQL's ON DUPLICATE KEY UPDATE clause allows reference to the row
that would be inserted, via a special function called ``VALUES()``.
This attribute provides all columns in this row to be referenceable
such that they will render within a ``VALUES()`` function inside the
ON DUPLICATE KEY UPDATE clause. The attribute is named ``.inserted``
so as not to conflict with the existing
:meth:`_expression.Insert.values` method.
.. seealso::
:ref:`mysql_insert_on_duplicate_key_update` - example of how
to use :attr:`_expression.Insert.inserted`
"""
return self.inserted_alias.columns
@util.memoized_property
def inserted_alias(self):
return alias(self.table, name="inserted")
@_generative
def on_duplicate_key_update(self, *args, **kw):
r"""
Specifies the ON DUPLICATE KEY UPDATE clause.
:param \**kw: Column keys linked to UPDATE values. The
values may be any SQL expression or supported literal Python
values.
.. warning:: This dictionary does **not** take into account
Python-specified default UPDATE values or generation functions,
e.g. those specified using :paramref:`_schema.Column.onupdate`.
These values will not be exercised for an ON DUPLICATE KEY UPDATE
style of UPDATE, unless values are manually specified here.
:param \*args: As an alternative to passing key/value parameters,
a dictionary or list of 2-tuples can be passed as a single positional
argument.
Passing a single dictionary is equivalent to the keyword argument
form::
insert().on_duplicate_key_update({"name": "some name"})
Passing a list of 2-tuples indicates that the parameter assignments
in the UPDATE clause should be ordered as sent, in a manner similar
to that described for the :class:`_expression.Update`
construct overall
in :ref:`updates_order_parameters`::
insert().on_duplicate_key_update(
[("name", "some name"), ("value", "some value")])
.. versionchanged:: 1.3 parameters can be specified as a dictionary
or list of 2-tuples; the latter form provides for parameter
ordering.
.. versionadded:: 1.2
.. seealso::
:ref:`mysql_insert_on_duplicate_key_update`
"""
if args and kw:
raise exc.ArgumentError(
"Can't pass kwargs and positional arguments simultaneously"
)
if args:
if len(args) > 1:
raise exc.ArgumentError(
"Only a single dictionary or list of tuples "
"is accepted positionally."
)
values = args[0]
else:
values = kw
inserted_alias = getattr(self, "inserted_alias", None)
self._post_values_clause = OnDuplicateClause(inserted_alias, values)
return self
insert = public_factory(
Insert, ".dialects.mysql.insert", ".dialects.mysql.Insert"
)
class OnDuplicateClause(ClauseElement):
__visit_name__ = "on_duplicate_key_update"
_parameter_ordering = None
def __init__(self, inserted_alias, update):
self.inserted_alias = inserted_alias
# auto-detect that parameters should be ordered. This is copied from
# Update._proces_colparams(), however we don't look for a special flag
# in this case since we are not disambiguating from other use cases as
# we are in Update.values().
if isinstance(update, list) and (
update and isinstance(update[0], tuple)
):
self._parameter_ordering = [key for key, value in update]
update = dict(update)
if not update or not isinstance(update, dict):
raise ValueError("update parameter must be a non-empty dictionary")
self.update = update
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/json.py
|
# mysql/json.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
from ... import types as sqltypes
class JSON(sqltypes.JSON):
"""MySQL JSON type.
MySQL supports JSON as of version 5.7. Note that MariaDB does **not**
support JSON at the time of this writing.
The :class:`.mysql.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`_types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function at the database level.
.. versionadded:: 1.1
"""
pass
class _FormatTypeMixin(object):
def _format_value(self, value):
raise NotImplementedError()
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
def _format_value(self, value):
if isinstance(value, int):
value = "$[%s]" % value
else:
value = '$."%s"' % value
return value
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
def _format_value(self, value):
return "$%s" % (
"".join(
[
"[%s]" % elem if isinstance(elem, int) else '."%s"' % elem
for elem in value
]
)
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/pyodbc.py
|
# mysql/pyodbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mysql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
.. note:: The PyODBC for MySQL dialect is not well supported, and
is subject to unresolved character encoding issues
which exist within the current ODBC drivers available.
(see http://code.google.com/p/pyodbc/issues/detail?id=25).
Other dialects for MySQL are recommended.
Pass through exact pyodbc connection string::
import urllib
connection_string = (
'DRIVER=MySQL ODBC 8.0 ANSI Driver;'
'SERVER=localhost;'
'PORT=3307;'
'DATABASE=mydb;'
'UID=root;'
'PWD=(whatever);'
'charset=utf8mb4;'
)
params = urllib.parse.quote_plus(connection_string)
connection_uri = "mysql+pyodbc:///?odbc_connect=%s" % params
""" # noqa
import re
from .base import MySQLDialect
from .base import MySQLExecutionContext
from .types import TIME
from ... import util
from ...connectors.pyodbc import PyODBCConnector
from ...sql.sqltypes import Time
class _pyodbcTIME(TIME):
def result_processor(self, dialect, coltype):
def process(value):
# pyodbc returns a datetime.time object; no need to convert
return value
return process
class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
colspecs = util.update_copy(MySQLDialect.colspecs, {Time: _pyodbcTIME})
supports_unicode_statements = False
execution_ctx_cls = MySQLExecutionContext_pyodbc
pyodbc_driver_name = "MySQL"
def __init__(self, **kw):
# deal with http://code.google.com/p/pyodbc/issues/detail?id=25
kw.setdefault("convert_unicode", True)
super(MySQLDialect_pyodbc, self).__init__(**kw)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = {row[0]: row[1] for row in self._compat_fetchall(rs)}
for key in ("character_set_connection", "character_set"):
if opts.get(key, None):
return opts[key]
util.warn(
"Could not detect the connection character set. "
"Assuming latin1."
)
return "latin1"
def _extract_error_code(self, exception):
m = re.compile(r"\((\d+)\)").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
else:
return None
dialect = MySQLDialect_pyodbc
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/base.py
|
# mysql/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql
:name: MySQL
Supported Versions and Features
-------------------------------
SQLAlchemy supports MySQL starting with version 4.1 through modern releases.
However, no heroic measures are taken to work around major missing
SQL features - if your server version does not support sub-selects, for
example, they won't work in SQLAlchemy either.
See the official MySQL documentation for detailed information about features
supported in any given server release.
.. _mysql_connection_timeouts:
Connection Timeouts and Disconnects
-----------------------------------
MySQL features an automatic connection close behavior, for connections that
have been idle for a fixed period of time, defaulting to eight hours.
To circumvent having this issue, use
the :paramref:`_sa.create_engine.pool_recycle` option which ensures that
a connection will be discarded and replaced with a new one if it has been
present in the pool for a fixed number of seconds::
engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
For more comprehensive disconnect detection of pooled connections, including
accommodation of server restarts and network issues, a pre-ping approach may
be employed. See :ref:`pool_disconnects` for current approaches.
.. seealso::
:ref:`pool_disconnects` - Background on several techniques for dealing
with timed out connections as well as database restarts.
.. _mysql_storage_engines:
CREATE TABLE arguments including Storage Engines
------------------------------------------------
MySQL's CREATE TABLE syntax includes a wide array of special options,
including ``ENGINE``, ``CHARSET``, ``MAX_ROWS``, ``ROW_FORMAT``,
``INSERT_METHOD``, and many more.
To accommodate the rendering of these arguments, specify the form
``mysql_argument_name="value"``. For example, to specify a table with
``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8mb4``, and ``KEY_BLOCK_SIZE``
of ``1024``::
Table('mytable', metadata,
Column('data', String(32)),
mysql_engine='InnoDB',
mysql_charset='utf8mb4',
mysql_key_block_size="1024"
)
The MySQL dialect will normally transfer any keyword specified as
``mysql_keyword_name`` to be rendered as ``KEYWORD_NAME`` in the
``CREATE TABLE`` statement. A handful of these names will render with a space
instead of an underscore; to support this, the MySQL dialect has awareness of
these particular names, which include ``DATA DIRECTORY``
(e.g. ``mysql_data_directory``), ``CHARACTER SET`` (e.g.
``mysql_character_set``) and ``INDEX DIRECTORY`` (e.g.
``mysql_index_directory``).
The most common argument is ``mysql_engine``, which refers to the storage
engine for the table. Historically, MySQL server installations would default
to ``MyISAM`` for this value, although newer versions may be defaulting
to ``InnoDB``. The ``InnoDB`` engine is typically preferred for its support
of transactions and foreign keys.
A :class:`_schema.Table`
that is created in a MySQL database with a storage engine
of ``MyISAM`` will be essentially non-transactional, meaning any
INSERT/UPDATE/DELETE statement referring to this table will be invoked as
autocommit. It also will have no support for foreign key constraints; while
the ``CREATE TABLE`` statement accepts foreign key options, when using the
``MyISAM`` storage engine these arguments are discarded. Reflecting such a
table will also produce no foreign key constraint information.
For fully atomic transactions as well as support for foreign key
constraints, all participating ``CREATE TABLE`` statements must specify a
transactional engine, which in the vast majority of cases is ``InnoDB``.
.. seealso::
`The InnoDB Storage Engine
<http://dev.mysql.com/doc/refman/5.0/en/innodb-storage-engine.html>`_ -
on the MySQL website.
Case Sensitivity and Table Reflection
-------------------------------------
MySQL has inconsistent support for case-sensitive identifier
names, basing support on specific details of the underlying
operating system. However, it has been observed that no matter
what case sensitivity behavior is present, the names of tables in
foreign key declarations are *always* received from the database
as all-lower case, making it impossible to accurately reflect a
schema where inter-related tables use mixed-case identifier names.
Therefore it is strongly advised that table names be declared as
all lower case both within SQLAlchemy as well as on the MySQL
database itself, especially if database reflection features are
to be used.
.. _mysql_isolation_level:
Transaction Isolation Level
---------------------------
All MySQL dialects support setting of transaction isolation level both via a
dialect-specific parameter :paramref:`_sa.create_engine.isolation_level`
accepted
by :func:`_sa.create_engine`, as well as the
:paramref:`.Connection.execution_options.isolation_level` argument as passed to
:meth:`_engine.Connection.execution_options`.
This feature works by issuing the
command ``SET SESSION TRANSACTION ISOLATION LEVEL <level>`` for each new
connection. For the special AUTOCOMMIT isolation level, DBAPI-specific
techniques are used.
To set isolation level using :func:`_sa.create_engine`::
engine = create_engine(
"mysql://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED"
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
Valid values for ``isolation_level`` include:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
The special ``AUTOCOMMIT`` value makes use of the various "autocommit"
attributes provided by specific DBAPIs, and is currently supported by
MySQLdb, MySQL-Client, MySQL-Connector Python, and PyMySQL. Using it,
the MySQL connection will return true for the value of
``SELECT @@autocommit;``.
.. versionadded:: 1.1 - added support for the AUTOCOMMIT isolation level.
AUTO_INCREMENT Behavior
-----------------------
When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
the first :class:`.Integer` primary key column which is not marked as a
foreign key::
>>> t = Table('mytable', metadata,
... Column('mytable_id', Integer, primary_key=True)
... )
>>> t.create()
CREATE TABLE mytable (
id INTEGER NOT NULL AUTO_INCREMENT,
PRIMARY KEY (id)
)
You can disable this behavior by passing ``False`` to the
:paramref:`_schema.Column.autoincrement` argument of :class:`_schema.Column`.
This flag
can also be used to enable auto-increment on a secondary column in a
multi-column key for some storage engines::
Table('mytable', metadata,
Column('gid', Integer, primary_key=True, autoincrement=False),
Column('id', Integer, primary_key=True)
)
.. _mysql_ss_cursors:
Server Side Cursors
-------------------
Server-side cursor support is available for the MySQLdb and PyMySQL dialects.
From a MySQL point of view this means that the ``MySQLdb.cursors.SSCursor`` or
``pymysql.cursors.SSCursor`` class is used when building up the cursor which
will receive results. The most typical way of invoking this feature is via the
:paramref:`.Connection.execution_options.stream_results` connection execution
option. Server side cursors can also be enabled for all SELECT statements
unconditionally by passing ``server_side_cursors=True`` to
:func:`_sa.create_engine`.
.. versionadded:: 1.1.4 - added server-side cursor support.
.. _mysql_unicode:
Unicode
-------
Charset Selection
~~~~~~~~~~~~~~~~~
Most MySQL DBAPIs offer the option to set the client character set for
a connection. This is typically delivered using the ``charset`` parameter
in the URL, such as::
e = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4")
This charset is the **client character set** for the connection. Some
MySQL DBAPIs will default this to a value such as ``latin1``, and some
will make use of the ``default-character-set`` setting in the ``my.cnf``
file as well. Documentation for the DBAPI in use should be consulted
for specific behavior.
The encoding used for Unicode has traditionally been ``'utf8'``. However,
for MySQL versions 5.5.3 on forward, a new MySQL-specific encoding
``'utf8mb4'`` has been introduced, and as of MySQL 8.0 a warning is emitted
by the server if plain ``utf8`` is specified within any server-side
directives, replaced with ``utf8mb3``. The rationale for this new encoding
is due to the fact that MySQL's legacy utf-8 encoding only supports
codepoints up to three bytes instead of four. Therefore,
when communicating with a MySQL database
that includes codepoints more than three bytes in size,
this new charset is preferred, if supported by both the database as well
as the client DBAPI, as in::
e = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4")
All modern DBAPIs should support the ``utf8mb4`` charset.
In order to use ``utf8mb4`` encoding for a schema that was created with legacy
``utf8``, changes to the MySQL schema and/or server configuration may be
required.
.. seealso::
`The utf8mb4 Character Set \
<http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html>`_ - \
in the MySQL documentation
.. _mysql_binary_introducer:
Dealing with Binary Data Warnings and Unicode
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MySQL versions 5.6, 5.7 and later (not MariaDB at the time of this writing) now
emit a warning when attempting to pass binary data to the database, while a
character set encoding is also in place, when the binary data itself is not
valid for that encoding::
default.py:509: Warning: (1300, "Invalid utf8mb4 character string:
'F9876A'")
cursor.execute(statement, parameters)
This warning is due to the fact that the MySQL client library is attempting to
interpret the binary string as a unicode object even if a datatype such
as :class:`.LargeBinary` is in use. To resolve this, the SQL statement requires
a binary "character set introducer" be present before any non-NULL value
that renders like this::
INSERT INTO table (data) VALUES (_binary %s)
These character set introducers are provided by the DBAPI driver, assuming the
use of mysqlclient or PyMySQL (both of which are recommended). Add the query
string parameter ``binary_prefix=true`` to the URL to repair this warning::
# mysqlclient
engine = create_engine(
"mysql+mysqldb://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true")
# PyMySQL
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true")
The ``binary_prefix`` flag may or may not be supported by other MySQL drivers.
SQLAlchemy itself cannot render this ``_binary`` prefix reliably, as it does
not work with the NULL value, which is valid to be sent as a bound parameter.
As the MySQL driver renders parameters directly into the SQL string, it's the
most efficient place for this additional keyword to be passed.
.. seealso::
`Character set introducers <https://dev.mysql.com/doc/refman/5.7/en/charset-introducer.html>`_ - on the MySQL website
ANSI Quoting Style
------------------
MySQL features two varieties of identifier "quoting style", one using
backticks and the other using quotes, e.g. ```some_identifier``` vs.
``"some_identifier"``. All MySQL dialects detect which version
is in use by checking the value of ``sql_mode`` when a connection is first
established with a particular :class:`_engine.Engine`.
This quoting style comes
into play when rendering table and column names as well as when reflecting
existing database structures. The detection is entirely automatic and
no special configuration is needed to use either quoting style.
MySQL SQL Extensions
--------------------
Many of the MySQL SQL extensions are handled through SQLAlchemy's generic
function and operator support::
table.select(table.c.password==func.md5('plaintext'))
table.select(table.c.username.op('regexp')('^[a-d]'))
And of course any valid MySQL statement can be executed as a string as well.
Some limited direct support for MySQL extensions to SQL is currently
available.
* INSERT..ON DUPLICATE KEY UPDATE: See
:ref:`mysql_insert_on_duplicate_key_update`
* SELECT pragma, use :meth:`_expression.Select.prefix_with` and
:meth:`_query.Query.prefix_with`::
select(...).prefix_with(['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
* UPDATE with LIMIT::
update(..., mysql_limit=10)
* optimizer hints, use :meth:`_expression.Select.prefix_with` and
:meth:`_query.Query.prefix_with`::
select(...).prefix_with("/*+ NO_RANGE_OPTIMIZATION(t4 PRIMARY) */")
* index hints, use :meth:`_expression.Select.with_hint` and
:meth:`_query.Query.with_hint`::
select(...).with_hint(some_table, "USE INDEX xyz")
.. _mysql_insert_on_duplicate_key_update:
INSERT...ON DUPLICATE KEY UPDATE (Upsert)
------------------------------------------
MySQL allows "upserts" (update or insert)
of rows into a table via the ``ON DUPLICATE KEY UPDATE`` clause of the
``INSERT`` statement. A candidate row will only be inserted if that row does
not match an existing primary or unique key in the table; otherwise, an UPDATE
will be performed. The statement allows for separate specification of the
values to INSERT versus the values for UPDATE.
SQLAlchemy provides ``ON DUPLICATE KEY UPDATE`` support via the MySQL-specific
:func:`.mysql.insert()` function, which provides
the generative method :meth:`~.mysql.Insert.on_duplicate_key_update`::
from sqlalchemy.dialects.mysql import insert
insert_stmt = insert(my_table).values(
id='some_existing_id',
data='inserted value')
on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
data=insert_stmt.inserted.data,
status='U'
)
conn.execute(on_duplicate_key_stmt)
Unlike PostgreSQL's "ON CONFLICT" phrase, the "ON DUPLICATE KEY UPDATE"
phrase will always match on any primary key or unique key, and will always
perform an UPDATE if there's a match; there are no options for it to raise
an error or to skip performing an UPDATE.
``ON DUPLICATE KEY UPDATE`` is used to perform an update of the already
existing row, using any combination of new values as well as values
from the proposed insertion. These values are normally specified using
keyword arguments passed to the
:meth:`~.mysql.Insert.on_duplicate_key_update`
given column key values (usually the name of the column, unless it
specifies :paramref:`_schema.Column.key`
) as keys and literal or SQL expressions
as values::
on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
data="some data",
updated_at=func.current_timestamp(),
)
In a manner similar to that of :meth:`.UpdateBase.values`, other parameter
forms are accepted, including a single dictionary::
on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
{"data": "some data", "updated_at": func.current_timestamp()},
)
as well as a list of 2-tuples, which will automatically provide
a parameter-ordered UPDATE statement in a manner similar to that described
at :ref:`updates_order_parameters`. Unlike the :class:`_expression.Update`
object,
no special flag is needed to specify the intent since the argument form is
this context is unambiguous::
on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
[
("data", "some data"),
("updated_at", func.current_timestamp()),
],
)
.. versionchanged:: 1.3 support for parameter-ordered UPDATE clause within
MySQL ON DUPLICATE KEY UPDATE
.. warning::
The :meth:`_expression.Insert.on_duplicate_key_update`
method does **not** take into
account Python-side default UPDATE values or generation functions, e.g.
e.g. those specified using :paramref:`_schema.Column.onupdate`.
These values will not be exercised for an ON DUPLICATE KEY style of UPDATE,
unless they are manually specified explicitly in the parameters.
In order to refer to the proposed insertion row, the special alias
:attr:`~.mysql.Insert.inserted` is available as an attribute on
the :class:`.mysql.Insert` object; this object is a
:class:`_expression.ColumnCollection` which contains all columns of the target
table::
from sqlalchemy.dialects.mysql import insert
stmt = insert(my_table).values(
id='some_id',
data='inserted value',
author='jlh')
do_update_stmt = stmt.on_duplicate_key_update(
data="updated value",
author=stmt.inserted.author
)
conn.execute(do_update_stmt)
When rendered, the "inserted" namespace will produce the expression
``VALUES(<columnname>)``.
.. versionadded:: 1.2 Added support for MySQL ON DUPLICATE KEY UPDATE clause
rowcount Support
----------------
SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the
usual definition of "number of rows matched by an UPDATE or DELETE" statement.
This is in contradiction to the default setting on most MySQL DBAPI drivers,
which is "number of rows actually modified/deleted". For this reason, the
SQLAlchemy MySQL dialects always add the ``constants.CLIENT.FOUND_ROWS``
flag, or whatever is equivalent for the target dialect, upon connection.
This setting is currently hardcoded.
.. seealso::
:attr:`_engine.ResultProxy.rowcount`
CAST Support
------------
MySQL documents the CAST operator as available in version 4.0.2. When using
the SQLAlchemy :func:`.cast` function, SQLAlchemy
will not render the CAST token on MySQL before this version, based on server
version detection, instead rendering the internal expression directly.
CAST may still not be desirable on an early MySQL version post-4.0.2, as it
didn't add all datatype support until 4.1.1. If your application falls into
this narrow area, the behavior of CAST can be controlled using the
:ref:`sqlalchemy.ext.compiler_toplevel` system, as per the recipe below::
from sqlalchemy.sql.expression import Cast
from sqlalchemy.ext.compiler import compiles
@compiles(Cast, 'mysql')
def _check_mysql_version(element, compiler, **kw):
if compiler.dialect.server_version_info < (4, 1, 0):
return compiler.process(element.clause, **kw)
else:
return compiler.visit_cast(element, **kw)
The above function, which only needs to be declared once
within an application, overrides the compilation of the
:func:`.cast` construct to check for version 4.1.0 before
fully rendering CAST; else the internal element of the
construct is rendered directly.
.. _mysql_indexes:
MySQL Specific Index Options
----------------------------
MySQL-specific extensions to the :class:`.Index` construct are available.
Index Length
~~~~~~~~~~~~~
MySQL provides an option to create index entries with a certain length, where
"length" refers to the number of characters or bytes in each value which will
become part of the index. SQLAlchemy provides this feature via the
``mysql_length`` parameter::
Index('my_index', my_table.c.data, mysql_length=10)
Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4,
'b': 9})
Prefix lengths are given in characters for nonbinary string types and in bytes
for binary string types. The value passed to the keyword argument *must* be
either an integer (and, thus, specify the same prefix length value for all
columns of the index) or a dict in which keys are column names and values are
prefix length values for corresponding columns. MySQL only allows a length for
a column of an index if it is for a CHAR, VARCHAR, TEXT, BINARY, VARBINARY and
BLOB.
Index Prefixes
~~~~~~~~~~~~~~
MySQL storage engines permit you to specify an index prefix when creating
an index. SQLAlchemy provides this feature via the
``mysql_prefix`` parameter on :class:`.Index`::
Index('my_index', my_table.c.data, mysql_prefix='FULLTEXT')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX, so it *must* be a valid index prefix for your MySQL
storage engine.
.. versionadded:: 1.1.5
.. seealso::
`CREATE INDEX <http://dev.mysql.com/doc/refman/5.0/en/create-index.html>`_ - MySQL documentation
Index Types
~~~~~~~~~~~~~
Some MySQL storage engines permit you to specify an index type when creating
an index or primary key constraint. SQLAlchemy provides this feature via the
``mysql_using`` parameter on :class:`.Index`::
Index('my_index', my_table.c.data, mysql_using='hash')
As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`::
PrimaryKeyConstraint("data", mysql_using='hash')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
type for your MySQL storage engine.
More information can be found at:
http://dev.mysql.com/doc/refman/5.0/en/create-index.html
http://dev.mysql.com/doc/refman/5.0/en/create-table.html
Index Parsers
~~~~~~~~~~~~~
CREATE FULLTEXT INDEX in MySQL also supports a "WITH PARSER" option. This
is available using the keyword argument ``mysql_with_parser``::
Index(
'my_index', my_table.c.data,
mysql_prefix='FULLTEXT', mysql_with_parser="ngram")
.. versionadded:: 1.3
.. _mysql_foreign_keys:
MySQL Foreign Keys
------------------
MySQL's behavior regarding foreign keys has some important caveats.
Foreign Key Arguments to Avoid
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MySQL does not support the foreign key arguments "DEFERRABLE", "INITIALLY",
or "MATCH". Using the ``deferrable`` or ``initially`` keyword argument with
:class:`_schema.ForeignKeyConstraint` or :class:`_schema.ForeignKey`
will have the effect of
these keywords being rendered in a DDL expression, which will then raise an
error on MySQL. In order to use these keywords on a foreign key while having
them ignored on a MySQL backend, use a custom compile rule::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.schema import ForeignKeyConstraint
@compiles(ForeignKeyConstraint, "mysql")
def process(element, compiler, **kw):
element.deferrable = element.initially = None
return compiler.visit_foreign_key_constraint(element, **kw)
.. versionchanged:: 0.9.0 - the MySQL backend no longer silently ignores
the ``deferrable`` or ``initially`` keyword arguments of
:class:`_schema.ForeignKeyConstraint` and :class:`_schema.ForeignKey`.
The "MATCH" keyword is in fact more insidious, and is explicitly disallowed
by SQLAlchemy in conjunction with the MySQL backend. This argument is
silently ignored by MySQL, but in addition has the effect of ON UPDATE and ON
DELETE options also being ignored by the backend. Therefore MATCH should
never be used with the MySQL backend; as is the case with DEFERRABLE and
INITIALLY, custom compilation rules can be used to correct a MySQL
ForeignKeyConstraint at DDL definition time.
.. versionadded:: 0.9.0 - the MySQL backend will raise a
:class:`.CompileError` when the ``match`` keyword is used with
:class:`_schema.ForeignKeyConstraint` or :class:`_schema.ForeignKey`.
Reflection of Foreign Key Constraints
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Not all MySQL storage engines support foreign keys. When using the
very common ``MyISAM`` MySQL storage engine, the information loaded by table
reflection will not include foreign keys. For these tables, you may supply a
:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
Table('mytable', metadata,
ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
autoload=True
)
.. seealso::
:ref:`mysql_storage_engines`
.. _mysql_unique_constraints:
MySQL Unique Constraints and Reflection
---------------------------------------
SQLAlchemy supports both the :class:`.Index` construct with the
flag ``unique=True``, indicating a UNIQUE index, as well as the
:class:`.UniqueConstraint` construct, representing a UNIQUE constraint.
Both objects/syntaxes are supported by MySQL when emitting DDL to create
these constraints. However, MySQL does not have a unique constraint
construct that is separate from a unique index; that is, the "UNIQUE"
constraint on MySQL is equivalent to creating a "UNIQUE INDEX".
When reflecting these constructs, the
:meth:`_reflection.Inspector.get_indexes`
and the :meth:`_reflection.Inspector.get_unique_constraints`
methods will **both**
return an entry for a UNIQUE index in MySQL. However, when performing
full table reflection using ``Table(..., autoload=True)``,
the :class:`.UniqueConstraint` construct is
**not** part of the fully reflected :class:`_schema.Table` construct under any
circumstances; this construct is always represented by a :class:`.Index`
with the ``unique=True`` setting present in the :attr:`_schema.Table.indexes`
collection.
.. _mysql_timestamp_null:
TIMESTAMP Columns and NULL
--------------------------
MySQL historically enforces that a column which specifies the
TIMESTAMP datatype implicitly includes a default value of
CURRENT_TIMESTAMP, even though this is not stated, and additionally
sets the column as NOT NULL, the opposite behavior vs. that of all
other datatypes::
mysql> CREATE TABLE ts_test (
-> a INTEGER,
-> b INTEGER NOT NULL,
-> c TIMESTAMP,
-> d TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
-> e TIMESTAMP NULL);
Query OK, 0 rows affected (0.03 sec)
mysql> SHOW CREATE TABLE ts_test;
+---------+-----------------------------------------------------
| Table | Create Table
+---------+-----------------------------------------------------
| ts_test | CREATE TABLE `ts_test` (
`a` int(11) DEFAULT NULL,
`b` int(11) NOT NULL,
`c` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`d` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`e` timestamp NULL DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
Above, we see that an INTEGER column defaults to NULL, unless it is specified
with NOT NULL. But when the column is of type TIMESTAMP, an implicit
default of CURRENT_TIMESTAMP is generated which also coerces the column
to be a NOT NULL, even though we did not specify it as such.
This behavior of MySQL can be changed on the MySQL side using the
`explicit_defaults_for_timestamp
<http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html
#sysvar_explicit_defaults_for_timestamp>`_ configuration flag introduced in
MySQL 5.6. With this server setting enabled, TIMESTAMP columns behave like
any other datatype on the MySQL side with regards to defaults and nullability.
However, to accommodate the vast majority of MySQL databases that do not
specify this new flag, SQLAlchemy emits the "NULL" specifier explicitly with
any TIMESTAMP column that does not specify ``nullable=False``. In order to
accommodate newer databases that specify ``explicit_defaults_for_timestamp``,
SQLAlchemy also emits NOT NULL for TIMESTAMP columns that do specify
``nullable=False``. The following example illustrates::
from sqlalchemy import MetaData, Integer, Table, Column, text
from sqlalchemy.dialects.mysql import TIMESTAMP
m = MetaData()
t = Table('ts_test', m,
Column('a', Integer),
Column('b', Integer, nullable=False),
Column('c', TIMESTAMP),
Column('d', TIMESTAMP, nullable=False)
)
from sqlalchemy import create_engine
e = create_engine("mysql://scott:tiger@localhost/test", echo=True)
m.create_all(e)
output::
CREATE TABLE ts_test (
a INTEGER,
b INTEGER NOT NULL,
c TIMESTAMP NULL,
d TIMESTAMP NOT NULL
)
.. versionchanged:: 1.0.0 - SQLAlchemy now renders NULL or NOT NULL in all
cases for TIMESTAMP columns, to accommodate
``explicit_defaults_for_timestamp``. Prior to this version, it will
not render "NOT NULL" for a TIMESTAMP column that is ``nullable=False``.
""" # noqa
from array import array as _array
from collections import defaultdict
import re
import sys
from sqlalchemy import literal_column
from sqlalchemy.sql import visitors
from . import reflection as _reflection
from .enumerated import ENUM
from .enumerated import SET
from .json import JSON
from .json import JSONIndexType
from .json import JSONPathType
from .types import _FloatType
from .types import _IntegerType
from .types import _MatchType
from .types import _NumericType
from .types import _StringType
from .types import BIGINT
from .types import BIT
from .types import CHAR
from .types import DATETIME
from .types import DECIMAL
from .types import DOUBLE
from .types import FLOAT
from .types import INTEGER
from .types import LONGBLOB
from .types import LONGTEXT
from .types import MEDIUMBLOB
from .types import MEDIUMINT
from .types import MEDIUMTEXT
from .types import NCHAR
from .types import NUMERIC
from .types import NVARCHAR
from .types import REAL
from .types import SMALLINT
from .types import TEXT
from .types import TIME
from .types import TIMESTAMP
from .types import TINYBLOB
from .types import TINYINT
from .types import TINYTEXT
from .types import VARCHAR
from .types import YEAR
from ... import exc
from ... import log
from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
from ...engine import default
from ...engine import reflection
from ...sql import compiler
from ...sql import elements
from ...sql import util as sql_util
from ...types import BINARY
from ...types import BLOB
from ...types import BOOLEAN
from ...types import DATE
from ...types import VARBINARY
from ...util import topological
RESERVED_WORDS = set(
[
"accessible",
"accessible",
"add",
"admin",
"all",
"alter",
"analyze",
"and",
"array", # 8.0
"as",
"asc",
"asensitive",
"before",
"between",
"bigint",
"binary",
"blob",
"both",
"by",
"call",
"cascade",
"case",
"change",
"char",
"character",
"check",
"collate",
"column",
"columns",
"condition",
"constraint",
"continue",
"convert",
"create",
"cross",
"cume_dist",
"current_date",
"current_time",
"current_timestamp",
"current_user",
"cursor",
"database",
"databases",
"day_hour",
"day_microsecond",
"day_minute",
"day_second",
"dec",
"decimal",
"declare",
"default",
"delayed",
"delete",
"desc",
"describe",
"deterministic",
"distinct",
"distinctrow",
"div",
"double",
"drop",
"dual",
"each",
"else",
"elseif",
"empty",
"enclosed",
"escaped",
"except",
"exists",
"exit",
"explain",
"false",
"fetch",
"fields",
"first_value",
"float",
"float4",
"float8",
"for",
"force",
"foreign",
"from",
"fulltext",
"function",
"general",
"generated",
"get",
"grant",
"group",
"grouping",
"groups",
"having",
"high_priority",
"hour_microsecond",
"hour_minute",
"hour_second",
"if",
"ignore",
"ignore_server_ids",
"in",
"index",
"infile",
"inner",
"inout",
"insensitive",
"insert",
"int",
"int1",
"int2",
"int3",
"int4",
"int8",
"integer",
"interval",
"into",
"io_after_gtids",
"io_before_gtids",
"is",
"iterate",
"join",
"json_table",
"key",
"keys",
"kill",
"last_value",
"leading",
"leave",
"left",
"like",
"limit",
"linear",
"linear",
"lines",
"load",
"localtime",
"localtimestamp",
"lock",
"long",
"longblob",
"longtext",
"loop",
"low_priority",
"master_bind",
"master_heartbeat_period",
"master_ssl_verify_server_cert",
"master_ssl_verify_server_cert",
"match",
"maxvalue",
"mediumblob",
"mediumint",
"mediumtext",
"member", # 8.0
"middleint",
"minute_microsecond",
"minute_second",
"mod",
"modifies",
"natural",
"no_write_to_binlog",
"not",
"nth_value",
"ntile",
"null",
"numeric",
"of",
"on",
"one_shot",
"optimize",
"optimizer_costs",
"option",
"optionally",
"or",
"order",
"out",
"outer",
"outfile",
"over",
"partition",
"percent_rank",
"persist",
"persist_only",
"precision",
"primary",
"privileges",
"procedure",
"purge",
"range",
"range",
"rank",
"read",
"read_only",
"read_only",
"read_write",
"read_write", # 5.1
"reads",
"real",
"recursive",
"references",
"regexp",
"release",
"rename",
"repeat",
"replace",
"require",
"resignal",
"restrict",
"return",
"revoke",
"right",
"rlike",
"role",
"row",
"row_number",
"rows",
"schema",
"schemas",
"second_microsecond",
"select",
"sensitive",
"separator",
"set",
"show",
"signal",
"slow", # 5.5
"smallint",
"soname",
"spatial",
"specific",
"sql",
"sql_after_gtids",
"sql_before_gtids", # 5.6
"sql_big_result",
"sql_calc_found_rows",
"sql_small_result",
"sqlexception",
"sqlstate",
"sqlwarning",
"ssl",
"starting",
"stored",
"straight_join",
"system",
"table",
"tables", # 4.1
"terminated",
"then",
"tinyblob",
"tinyint",
"tinytext",
"to",
"trailing",
"trigger",
"true",
"undo",
"union",
"unique",
"unlock",
"unsigned",
"update",
"usage",
"use",
"using",
"utc_date",
"utc_time",
"utc_timestamp",
"values",
"varbinary",
"varchar",
"varcharacter",
"varying",
"virtual", # 5.7
"when",
"where",
"while",
"window", # 8.0
"with",
"write",
"x509",
"xor",
"year_month",
"zerofill", # 5.0
]
)
AUTOCOMMIT_RE = re.compile(
r"\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|LOAD +DATA|REPLACE)",
re.I | re.UNICODE,
)
SET_RE = re.compile(
r"\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w", re.I | re.UNICODE
)
# old names
MSTime = TIME
MSSet = SET
MSEnum = ENUM
MSLongBlob = LONGBLOB
MSMediumBlob = MEDIUMBLOB
MSTinyBlob = TINYBLOB
MSBlob = BLOB
MSBinary = BINARY
MSVarBinary = VARBINARY
MSNChar = NCHAR
MSNVarChar = NVARCHAR
MSChar = CHAR
MSString = VARCHAR
MSLongText = LONGTEXT
MSMediumText = MEDIUMTEXT
MSTinyText = TINYTEXT
MSText = TEXT
MSYear = YEAR
MSTimeStamp = TIMESTAMP
MSBit = BIT
MSSmallInteger = SMALLINT
MSTinyInteger = TINYINT
MSMediumInteger = MEDIUMINT
MSBigInteger = BIGINT
MSNumeric = NUMERIC
MSDecimal = DECIMAL
MSDouble = DOUBLE
MSReal = REAL
MSFloat = FLOAT
MSInteger = INTEGER
colspecs = {
_IntegerType: _IntegerType,
_NumericType: _NumericType,
_FloatType: _FloatType,
sqltypes.Numeric: NUMERIC,
sqltypes.Float: FLOAT,
sqltypes.Time: TIME,
sqltypes.Enum: ENUM,
sqltypes.MatchType: _MatchType,
sqltypes.JSON: JSON,
sqltypes.JSON.JSONIndexType: JSONIndexType,
sqltypes.JSON.JSONPathType: JSONPathType,
}
# Everything 3.23 through 5.1 excepting OpenGIS types.
ischema_names = {
"bigint": BIGINT,
"binary": BINARY,
"bit": BIT,
"blob": BLOB,
"boolean": BOOLEAN,
"char": CHAR,
"date": DATE,
"datetime": DATETIME,
"decimal": DECIMAL,
"double": DOUBLE,
"enum": ENUM,
"fixed": DECIMAL,
"float": FLOAT,
"int": INTEGER,
"integer": INTEGER,
"json": JSON,
"longblob": LONGBLOB,
"longtext": LONGTEXT,
"mediumblob": MEDIUMBLOB,
"mediumint": MEDIUMINT,
"mediumtext": MEDIUMTEXT,
"nchar": NCHAR,
"nvarchar": NVARCHAR,
"numeric": NUMERIC,
"set": SET,
"smallint": SMALLINT,
"text": TEXT,
"time": TIME,
"timestamp": TIMESTAMP,
"tinyblob": TINYBLOB,
"tinyint": TINYINT,
"tinytext": TINYTEXT,
"varbinary": VARBINARY,
"varchar": VARCHAR,
"year": YEAR,
}
class MySQLExecutionContext(default.DefaultExecutionContext):
def should_autocommit_text(self, statement):
return AUTOCOMMIT_RE.match(statement)
def create_server_side_cursor(self):
if self.dialect.supports_server_side_cursors:
return self._dbapi_connection.cursor(self.dialect._sscursor)
else:
raise NotImplementedError()
class MySQLCompiler(compiler.SQLCompiler):
render_table_with_column_in_update_from = True
"""Overridden from base SQLCompiler value"""
extract_map = compiler.SQLCompiler.extract_map.copy()
extract_map.update({"milliseconds": "millisecond"})
def visit_random_func(self, fn, **kw):
return "rand%s" % self.function_argspec(fn)
def visit_sysdate_func(self, fn, **kw):
return "SYSDATE()"
def _render_json_extract_from_binary(self, binary, operator, **kw):
# note we are intentionally calling upon the process() calls in the
# order in which they appear in the SQL String as this is used
# by positional parameter rendering
if binary.type._type_affinity is sqltypes.JSON:
return "JSON_EXTRACT(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
# for non-JSON, MySQL doesn't handle JSON null at all so it has to
# be explicit
case_expression = "CASE JSON_EXTRACT(%s, %s) WHEN 'null' THEN NULL" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
if binary.type._type_affinity is sqltypes.Integer:
type_expression = (
"ELSE CAST(JSON_EXTRACT(%s, %s) AS SIGNED INTEGER)"
% (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
)
elif binary.type._type_affinity is sqltypes.Numeric:
# FLOAT / REAL not added in MySQL til 8.0.17
type_expression = (
"ELSE CAST(JSON_EXTRACT(%s, %s) AS DECIMAL(10, 6))"
% (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
)
elif binary.type._type_affinity is sqltypes.Boolean:
# the NULL handling is particularly weird with boolean, so
# explicitly return true/false constants
type_expression = "WHEN true THEN true ELSE false"
elif binary.type._type_affinity is sqltypes.String:
# this fails with a JSON value that's a four byte unicode
# string. SQLite has the same problem at the moment
type_expression = "ELSE JSON_UNQUOTE(JSON_EXTRACT(%s, %s))" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
else:
# other affinity....this is not expected right now
type_expression = "ELSE JSON_EXTRACT(%s, %s)"
return case_expression + " " + type_expression + " END"
def visit_json_getitem_op_binary(self, binary, operator, **kw):
return self._render_json_extract_from_binary(binary, operator, **kw)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
return self._render_json_extract_from_binary(binary, operator, **kw)
def visit_on_duplicate_key_update(self, on_duplicate, **kw):
if on_duplicate._parameter_ordering:
parameter_ordering = [
elements._column_as_key(key)
for key in on_duplicate._parameter_ordering
]
ordered_keys = set(parameter_ordering)
cols = [
self.statement.table.c[key]
for key in parameter_ordering
if key in self.statement.table.c
] + [
c for c in self.statement.table.c if c.key not in ordered_keys
]
else:
cols = self.statement.table.c
clauses = []
# traverses through all table columns to preserve table column order
for column in (col for col in cols if col.key in on_duplicate.update):
val = on_duplicate.update[column.key]
if elements._is_literal(val):
val = elements.BindParameter(None, val, type_=column.type)
value_text = self.process(val.self_group(), use_schema=False)
else:
def replace(obj):
if (
isinstance(obj, elements.BindParameter)
and obj.type._isnull
):
obj = obj._clone()
obj.type = column.type
return obj
elif (
isinstance(obj, elements.ColumnClause)
and obj.table is on_duplicate.inserted_alias
):
obj = literal_column(
"VALUES(" + self.preparer.quote(column.name) + ")"
)
return obj
else:
# element is not replaced
return None
val = visitors.replacement_traverse(val, {}, replace)
value_text = self.process(val.self_group(), use_schema=False)
name_text = self.preparer.quote(column.name)
clauses.append("%s = %s" % (name_text, value_text))
non_matching = set(on_duplicate.update) - set(c.key for c in cols)
if non_matching:
util.warn(
"Additional column names not matching "
"any column keys in table '%s': %s"
% (
self.statement.table.name,
(", ".join("'%s'" % c for c in non_matching)),
)
)
return "ON DUPLICATE KEY UPDATE " + ", ".join(clauses)
def visit_concat_op_binary(self, binary, operator, **kw):
return "concat(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_match_op_binary(self, binary, operator, **kw):
return "MATCH (%s) AGAINST (%s IN BOOLEAN MODE)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def get_from_hint_text(self, table, text):
return text
def visit_typeclause(self, typeclause, type_=None, **kw):
if type_ is None:
type_ = typeclause.type.dialect_impl(self.dialect)
if isinstance(type_, sqltypes.TypeDecorator):
return self.visit_typeclause(typeclause, type_.impl, **kw)
elif isinstance(type_, sqltypes.Integer):
if getattr(type_, "unsigned", False):
return "UNSIGNED INTEGER"
else:
return "SIGNED INTEGER"
elif isinstance(type_, sqltypes.TIMESTAMP):
return "DATETIME"
elif isinstance(
type_,
(
sqltypes.DECIMAL,
sqltypes.DateTime,
sqltypes.Date,
sqltypes.Time,
),
):
return self.dialect.type_compiler.process(type_)
elif isinstance(type_, sqltypes.String) and not isinstance(
type_, (ENUM, SET)
):
adapted = CHAR._adapt_string_for_cast(type_)
return self.dialect.type_compiler.process(adapted)
elif isinstance(type_, sqltypes._Binary):
return "BINARY"
elif isinstance(type_, sqltypes.JSON):
return "JSON"
elif isinstance(type_, sqltypes.NUMERIC):
return self.dialect.type_compiler.process(type_).replace(
"NUMERIC", "DECIMAL"
)
else:
return None
def visit_cast(self, cast, **kw):
# No cast until 4, no decimals until 5.
if not self.dialect._supports_cast:
util.warn(
"Current MySQL version does not support "
"CAST; the CAST will be skipped."
)
return self.process(cast.clause.self_group(), **kw)
type_ = self.process(cast.typeclause)
if type_ is None:
util.warn(
"Datatype %s does not support CAST on MySQL; "
"the CAST will be skipped."
% self.dialect.type_compiler.process(cast.typeclause.type)
)
return self.process(cast.clause.self_group(), **kw)
return "CAST(%s AS %s)" % (self.process(cast.clause, **kw), type_)
def render_literal_value(self, value, type_):
value = super(MySQLCompiler, self).render_literal_value(value, type_)
if self.dialect._backslash_escapes:
value = value.replace("\\", "\\\\")
return value
# override native_boolean=False behavior here, as
# MySQL still supports native boolean
def visit_true(self, element, **kw):
return "true"
def visit_false(self, element, **kw):
return "false"
def get_select_precolumns(self, select, **kw):
"""Add special MySQL keywords in place of DISTINCT.
.. note::
this usage is deprecated. :meth:`_expression.Select.prefix_with`
should be used for special keywords at the start
of a SELECT.
"""
if isinstance(select._distinct, util.string_types):
return select._distinct.upper() + " "
elif select._distinct:
return "DISTINCT "
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
if join.full:
join_type = " FULL OUTER JOIN "
elif join.isouter:
join_type = " LEFT OUTER JOIN "
else:
join_type = " INNER JOIN "
return "".join(
(
self.process(join.left, asfrom=True, **kwargs),
join_type,
self.process(join.right, asfrom=True, **kwargs),
" ON ",
self.process(join.onclause, **kwargs),
)
)
def for_update_clause(self, select, **kw):
if select._for_update_arg.read:
tmp = " LOCK IN SHARE MODE"
else:
tmp = " FOR UPDATE"
if select._for_update_arg.of and self.dialect.supports_for_update_of:
tables = util.OrderedSet()
for c in select._for_update_arg.of:
tables.update(sql_util.surface_selectables_only(c))
tmp += " OF " + ", ".join(
self.process(table, ashint=True, use_schema=False, **kw)
for table in tables
)
if select._for_update_arg.nowait:
tmp += " NOWAIT"
if select._for_update_arg.skip_locked and self.dialect._is_mysql:
tmp += " SKIP LOCKED"
return tmp
def limit_clause(self, select, **kw):
# MySQL supports:
# LIMIT <limit>
# LIMIT <offset>, <limit>
# and in server versions > 3.3:
# LIMIT <limit> OFFSET <offset>
# The latter is more readable for offsets but we're stuck with the
# former until we can refine dialects by server revision.
limit_clause, offset_clause = (
select._limit_clause,
select._offset_clause,
)
if limit_clause is None and offset_clause is None:
return ""
elif offset_clause is not None:
# As suggested by the MySQL docs, need to apply an
# artificial limit if one wasn't provided
# http://dev.mysql.com/doc/refman/5.0/en/select.html
if limit_clause is None:
# hardwire the upper limit. Currently
# needed by OurSQL with Python 3
# (https://bugs.launchpad.net/oursql/+bug/686232),
# but also is consistent with the usage of the upper
# bound as part of MySQL's "syntax" for OFFSET with
# no LIMIT
return " \n LIMIT %s, %s" % (
self.process(offset_clause, **kw),
"18446744073709551615",
)
else:
return " \n LIMIT %s, %s" % (
self.process(offset_clause, **kw),
self.process(limit_clause, **kw),
)
else:
# No offset provided, so just use the limit
return " \n LIMIT %s" % (self.process(limit_clause, **kw),)
def update_limit_clause(self, update_stmt):
limit = update_stmt.kwargs.get("%s_limit" % self.dialect.name, None)
if limit:
return "LIMIT %s" % limit
else:
return None
def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
return ", ".join(
t._compiler_dispatch(self, asfrom=True, **kw)
for t in [from_table] + list(extra_froms)
)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
return None
def delete_table_clause(self, delete_stmt, from_table, extra_froms):
"""If we have extra froms make sure we render any alias as hint."""
ashint = False
if extra_froms:
ashint = True
return from_table._compiler_dispatch(
self, asfrom=True, iscrud=True, ashint=ashint
)
def delete_extra_from_clause(
self, delete_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the DELETE .. USING clause specific to MySQL."""
return "USING " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in [from_table] + extra_froms
)
def visit_empty_set_expr(self, element_types):
return (
"SELECT %(outer)s FROM (SELECT %(inner)s) "
"as _empty_set WHERE 1!=1"
% {
"inner": ", ".join(
"1 AS _in_%s" % idx
for idx, type_ in enumerate(element_types)
),
"outer": ", ".join(
"_in_%s" % idx for idx, type_ in enumerate(element_types)
),
}
)
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "NOT (%s <=> %s)" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_isnot_distinct_from_binary(self, binary, operator, **kw):
return "%s <=> %s" % (
self.process(binary.left),
self.process(binary.right),
)
class MySQLDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kw):
"""Builds column DDL."""
colspec = [
self.preparer.format_column(column),
self.dialect.type_compiler.process(
column.type, type_expression=column
),
]
if column.computed is not None:
colspec.append(self.process(column.computed))
is_timestamp = isinstance(
column.type._unwrapped_dialect_impl(self.dialect),
sqltypes.TIMESTAMP,
)
if not column.nullable:
colspec.append("NOT NULL")
# see: http://docs.sqlalchemy.org/en/latest/dialects/
# mysql.html#mysql_timestamp_null
elif column.nullable and is_timestamp:
colspec.append("NULL")
default = self.get_column_default_string(column)
if default is not None:
colspec.append("DEFAULT " + default)
comment = column.comment
if comment is not None:
literal = self.sql_compiler.render_literal_value(
comment, sqltypes.String()
)
colspec.append("COMMENT " + literal)
if (
column.table is not None
and column is column.table._autoincrement_column
and column.server_default is None
):
colspec.append("AUTO_INCREMENT")
return " ".join(colspec)
def post_create_table(self, table):
"""Build table-level CREATE options like ENGINE and COLLATE."""
table_opts = []
opts = dict(
(k[len(self.dialect.name) + 1 :].upper(), v)
for k, v in table.kwargs.items()
if k.startswith("%s_" % self.dialect.name)
)
if table.comment is not None:
opts["COMMENT"] = table.comment
partition_options = [
"PARTITION_BY",
"PARTITIONS",
"SUBPARTITIONS",
"SUBPARTITION_BY",
]
nonpart_options = set(opts).difference(partition_options)
part_options = set(opts).intersection(partition_options)
for opt in topological.sort(
[
("DEFAULT_CHARSET", "COLLATE"),
("DEFAULT_CHARACTER_SET", "COLLATE"),
],
nonpart_options,
):
arg = opts[opt]
if opt in _reflection._options_of_type_string:
arg = self.sql_compiler.render_literal_value(
arg, sqltypes.String()
)
if opt in (
"DATA_DIRECTORY",
"INDEX_DIRECTORY",
"DEFAULT_CHARACTER_SET",
"CHARACTER_SET",
"DEFAULT_CHARSET",
"DEFAULT_COLLATE",
):
opt = opt.replace("_", " ")
joiner = "="
if opt in (
"TABLESPACE",
"DEFAULT CHARACTER SET",
"CHARACTER SET",
"COLLATE",
):
joiner = " "
table_opts.append(joiner.join((opt, arg)))
for opt in topological.sort(
[
("PARTITION_BY", "PARTITIONS"),
("PARTITION_BY", "SUBPARTITION_BY"),
("PARTITION_BY", "SUBPARTITIONS"),
("PARTITIONS", "SUBPARTITIONS"),
("PARTITIONS", "SUBPARTITION_BY"),
("SUBPARTITION_BY", "SUBPARTITIONS"),
],
part_options,
):
arg = opts[opt]
if opt in _reflection._options_of_type_string:
arg = self.sql_compiler.render_literal_value(
arg, sqltypes.String()
)
opt = opt.replace("_", " ")
joiner = " "
table_opts.append(joiner.join((opt, arg)))
return " ".join(table_opts)
def visit_create_index(self, create, **kw):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
table = preparer.format_table(index.table)
columns = [
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
]
name = self._prepared_index_name(index)
text = "CREATE "
if index.unique:
text += "UNIQUE "
index_prefix = index.kwargs.get("mysql_prefix", None)
if index_prefix:
text += index_prefix + " "
text += "INDEX %s ON %s " % (name, table)
length = index.dialect_options["mysql"]["length"]
if length is not None:
if isinstance(length, dict):
# length value can be a (column_name --> integer value)
# mapping specifying the prefix length for each column of the
# index
columns = ", ".join(
"%s(%d)" % (expr, length[col.name])
if col.name in length
else (
"%s(%d)" % (expr, length[expr])
if expr in length
else "%s" % expr
)
for col, expr in zip(index.expressions, columns)
)
else:
# or can be an integer value specifying the same
# prefix length for all columns of the index
columns = ", ".join(
"%s(%d)" % (col, length) for col in columns
)
else:
columns = ", ".join(columns)
text += "(%s)" % columns
parser = index.dialect_options["mysql"]["with_parser"]
if parser is not None:
text += " WITH PARSER %s" % (parser,)
using = index.dialect_options["mysql"]["using"]
if using is not None:
text += " USING %s" % (preparer.quote(using))
return text
def visit_primary_key_constraint(self, constraint):
text = super(MySQLDDLCompiler, self).visit_primary_key_constraint(
constraint
)
using = constraint.dialect_options["mysql"]["using"]
if using:
text += " USING %s" % (self.preparer.quote(using))
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s ON %s" % (
self._prepared_index_name(index, include_schema=False),
self.preparer.format_table(index.table),
)
def visit_drop_constraint(self, drop):
constraint = drop.element
if isinstance(constraint, sa_schema.ForeignKeyConstraint):
qual = "FOREIGN KEY "
const = self.preparer.format_constraint(constraint)
elif isinstance(constraint, sa_schema.PrimaryKeyConstraint):
qual = "PRIMARY KEY "
const = ""
elif isinstance(constraint, sa_schema.UniqueConstraint):
qual = "INDEX "
const = self.preparer.format_constraint(constraint)
elif isinstance(constraint, sa_schema.CheckConstraint):
if self.dialect._is_mariadb:
qual = "CONSTRAINT "
else:
qual = "CHECK "
const = self.preparer.format_constraint(constraint)
else:
qual = ""
const = self.preparer.format_constraint(constraint)
return "ALTER TABLE %s DROP %s%s" % (
self.preparer.format_table(constraint.table),
qual,
const,
)
def define_constraint_match(self, constraint):
if constraint.match is not None:
raise exc.CompileError(
"MySQL ignores the 'MATCH' keyword while at the same time "
"causes ON UPDATE/ON DELETE clauses to be ignored."
)
return ""
def visit_set_table_comment(self, create):
return "ALTER TABLE %s COMMENT %s" % (
self.preparer.format_table(create.element),
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.String()
),
)
def visit_drop_table_comment(self, create):
return "ALTER TABLE %s COMMENT ''" % (
self.preparer.format_table(create.element)
)
def visit_set_column_comment(self, create):
return "ALTER TABLE %s CHANGE %s %s" % (
self.preparer.format_table(create.element.table),
self.preparer.format_column(create.element),
self.get_column_specification(create.element),
)
class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def _extend_numeric(self, type_, spec):
"Extend a numeric-type declaration with MySQL specific extensions."
if not self._mysql_type(type_):
return spec
if type_.unsigned:
spec += " UNSIGNED"
if type_.zerofill:
spec += " ZEROFILL"
return spec
def _extend_string(self, type_, defaults, spec):
"""Extend a string-type declaration with standard SQL CHARACTER SET /
COLLATE annotations and MySQL specific extensions.
"""
def attr(name):
return getattr(type_, name, defaults.get(name))
if attr("charset"):
charset = "CHARACTER SET %s" % attr("charset")
elif attr("ascii"):
charset = "ASCII"
elif attr("unicode"):
charset = "UNICODE"
else:
charset = None
if attr("collation"):
collation = "COLLATE %s" % type_.collation
elif attr("binary"):
collation = "BINARY"
else:
collation = None
if attr("national"):
# NATIONAL (aka NCHAR/NVARCHAR) trumps charsets.
return " ".join(
[c for c in ("NATIONAL", spec, collation) if c is not None]
)
return " ".join(
[c for c in (spec, charset, collation) if c is not None]
)
def _mysql_type(self, type_):
return isinstance(type_, (_StringType, _NumericType))
def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return self._extend_numeric(type_, "NUMERIC")
elif type_.scale is None:
return self._extend_numeric(
type_,
"NUMERIC(%(precision)s)" % {"precision": type_.precision},
)
else:
return self._extend_numeric(
type_,
"NUMERIC(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return self._extend_numeric(type_, "DECIMAL")
elif type_.scale is None:
return self._extend_numeric(
type_,
"DECIMAL(%(precision)s)" % {"precision": type_.precision},
)
else:
return self._extend_numeric(
type_,
"DECIMAL(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
def visit_DOUBLE(self, type_, **kw):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(
type_,
"DOUBLE(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
else:
return self._extend_numeric(type_, "DOUBLE")
def visit_REAL(self, type_, **kw):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(
type_,
"REAL(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
else:
return self._extend_numeric(type_, "REAL")
def visit_FLOAT(self, type_, **kw):
if (
self._mysql_type(type_)
and type_.scale is not None
and type_.precision is not None
):
return self._extend_numeric(
type_, "FLOAT(%s, %s)" % (type_.precision, type_.scale)
)
elif type_.precision is not None:
return self._extend_numeric(
type_, "FLOAT(%s)" % (type_.precision,)
)
else:
return self._extend_numeric(type_, "FLOAT")
def visit_INTEGER(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"INTEGER(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "INTEGER")
def visit_BIGINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"BIGINT(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "BIGINT")
def visit_MEDIUMINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"MEDIUMINT(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "MEDIUMINT")
def visit_TINYINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_, "TINYINT(%s)" % type_.display_width
)
else:
return self._extend_numeric(type_, "TINYINT")
def visit_SMALLINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"SMALLINT(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "SMALLINT")
def visit_BIT(self, type_, **kw):
if type_.length is not None:
return "BIT(%s)" % type_.length
else:
return "BIT"
def visit_DATETIME(self, type_, **kw):
if getattr(type_, "fsp", None):
return "DATETIME(%d)" % type_.fsp
else:
return "DATETIME"
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
if getattr(type_, "fsp", None):
return "TIME(%d)" % type_.fsp
else:
return "TIME"
def visit_TIMESTAMP(self, type_, **kw):
if getattr(type_, "fsp", None):
return "TIMESTAMP(%d)" % type_.fsp
else:
return "TIMESTAMP"
def visit_YEAR(self, type_, **kw):
if type_.display_width is None:
return "YEAR"
else:
return "YEAR(%s)" % type_.display_width
def visit_TEXT(self, type_, **kw):
if type_.length:
return self._extend_string(type_, {}, "TEXT(%d)" % type_.length)
else:
return self._extend_string(type_, {}, "TEXT")
def visit_TINYTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "TINYTEXT")
def visit_MEDIUMTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "MEDIUMTEXT")
def visit_LONGTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "LONGTEXT")
def visit_VARCHAR(self, type_, **kw):
if type_.length:
return self._extend_string(type_, {}, "VARCHAR(%d)" % type_.length)
else:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" % self.dialect.name
)
def visit_CHAR(self, type_, **kw):
if type_.length:
return self._extend_string(
type_, {}, "CHAR(%(length)s)" % {"length": type_.length}
)
else:
return self._extend_string(type_, {}, "CHAR")
def visit_NVARCHAR(self, type_, **kw):
# We'll actually generate the equiv. "NATIONAL VARCHAR" instead
# of "NVARCHAR".
if type_.length:
return self._extend_string(
type_,
{"national": True},
"VARCHAR(%(length)s)" % {"length": type_.length},
)
else:
raise exc.CompileError(
"NVARCHAR requires a length on dialect %s" % self.dialect.name
)
def visit_NCHAR(self, type_, **kw):
# We'll actually generate the equiv.
# "NATIONAL CHAR" instead of "NCHAR".
if type_.length:
return self._extend_string(
type_,
{"national": True},
"CHAR(%(length)s)" % {"length": type_.length},
)
else:
return self._extend_string(type_, {"national": True}, "CHAR")
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY(%d)" % type_.length
def visit_JSON(self, type_, **kw):
return "JSON"
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
def visit_enum(self, type_, **kw):
if not type_.native_enum:
return super(MySQLTypeCompiler, self).visit_enum(type_)
else:
return self._visit_enumerated_values("ENUM", type_, type_.enums)
def visit_BLOB(self, type_, **kw):
if type_.length:
return "BLOB(%d)" % type_.length
else:
return "BLOB"
def visit_TINYBLOB(self, type_, **kw):
return "TINYBLOB"
def visit_MEDIUMBLOB(self, type_, **kw):
return "MEDIUMBLOB"
def visit_LONGBLOB(self, type_, **kw):
return "LONGBLOB"
def _visit_enumerated_values(self, name, type_, enumerated_values):
quoted_enums = []
for e in enumerated_values:
quoted_enums.append("'%s'" % e.replace("'", "''"))
return self._extend_string(
type_, {}, "%s(%s)" % (name, ",".join(quoted_enums))
)
def visit_ENUM(self, type_, **kw):
return self._visit_enumerated_values(
"ENUM", type_, type_._enumerated_values
)
def visit_SET(self, type_, **kw):
return self._visit_enumerated_values(
"SET", type_, type_._enumerated_values
)
def visit_BOOLEAN(self, type_, **kw):
return "BOOL"
class MySQLIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect, server_ansiquotes=False, **kw):
if not server_ansiquotes:
quote = "`"
else:
quote = '"'
super(MySQLIdentifierPreparer, self).__init__(
dialect, initial_quote=quote, escape_quote=quote
)
def _quote_free_identifiers(self, *ids):
"""Unilaterally identifier-quote any number of strings."""
return tuple([self.quote_identifier(i) for i in ids if i is not None])
@log.class_logger
class MySQLDialect(default.DefaultDialect):
"""Details of the MySQL dialect.
Not used directly in application code.
"""
name = "mysql"
supports_alter = True
# MySQL has no true "boolean" type; we
# allow for the "true" and "false" keywords, however
supports_native_boolean = False
# identifiers are 64, however aliases can be 255...
max_identifier_length = 255
max_index_name_length = 64
supports_native_enum = True
supports_for_update_of = False # default for MySQL ...
# ... may be updated to True for MySQL 8+ in initialize()
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
supports_multivalues_insert = True
supports_comments = True
inline_comments = True
default_paramstyle = "format"
colspecs = colspecs
cte_follows_insert = True
statement_compiler = MySQLCompiler
ddl_compiler = MySQLDDLCompiler
type_compiler = MySQLTypeCompiler
ischema_names = ischema_names
preparer = MySQLIdentifierPreparer
# default SQL compilation settings -
# these are modified upon initialize(),
# i.e. first connect
_backslash_escapes = True
_server_ansiquotes = False
construct_arguments = [
(sa_schema.Table, {"*": None}),
(sql.Update, {"limit": None}),
(sa_schema.PrimaryKeyConstraint, {"using": None}),
(
sa_schema.Index,
{
"using": None,
"length": None,
"prefix": None,
"with_parser": None,
},
),
]
def __init__(
self,
isolation_level=None,
json_serializer=None,
json_deserializer=None,
**kwargs
):
kwargs.pop("use_ansiquotes", None) # legacy
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
self._json_serializer = json_serializer
self._json_deserializer = json_deserializer
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
_isolation_lookup = set(
[
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
]
)
def set_isolation_level(self, connection, level):
level = level.replace("_", " ")
# adjust for ConnectionFairy being present
# allows attribute set e.g. "connection.autocommit = True"
# to work properly
if hasattr(connection, "connection"):
connection = connection.connection
self._set_isolation_level(connection, level)
def _set_isolation_level(self, connection, level):
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL %s" % level)
cursor.execute("COMMIT")
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
if self._is_mysql and self.server_version_info >= (5, 7, 20):
cursor.execute("SELECT @@transaction_isolation")
else:
cursor.execute("SELECT @@tx_isolation")
row = cursor.fetchone()
if row is None:
util.warn(
"Could not retrieve transaction isolation level for MySQL "
"connection."
)
raise NotImplementedError()
val = row[0]
cursor.close()
if util.py3k and isinstance(val, bytes):
val = val.decode()
return val.upper().replace("-", " ")
def _get_server_version_info(self, connection):
# get database server version info explicitly over the wire
# to avoid proxy servers like MaxScale getting in the
# way with their own values, see #4205
dbapi_con = connection.connection
cursor = dbapi_con.cursor()
cursor.execute("SELECT VERSION()")
val = cursor.fetchone()[0]
cursor.close()
if util.py3k and isinstance(val, bytes):
val = val.decode()
return self._parse_server_version(val)
def _parse_server_version(self, val):
version = []
r = re.compile(r"[.\-]")
for n in r.split(val):
try:
version.append(int(n))
except ValueError:
mariadb = re.match(r"(.*)(MariaDB)(.*)", n)
if mariadb:
version.extend(g for g in mariadb.groups() if g)
else:
version.append(n)
return tuple(version)
def do_commit(self, dbapi_connection):
"""Execute a COMMIT."""
# COMMIT/ROLLBACK were introduced in 3.23.15.
# Yes, we have at least one user who has to talk to these old
# versions!
#
# Ignore commit/rollback if support isn't present, otherwise even
# basic operations via autocommit fail.
try:
dbapi_connection.commit()
except Exception:
if self.server_version_info < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_rollback(self, dbapi_connection):
"""Execute a ROLLBACK."""
try:
dbapi_connection.rollback()
except Exception:
if self.server_version_info < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_begin_twophase(self, connection, xid):
connection.execute(sql.text("XA BEGIN :xid"), xid=xid)
def do_prepare_twophase(self, connection, xid):
connection.execute(sql.text("XA END :xid"), xid=xid)
connection.execute(sql.text("XA PREPARE :xid"), xid=xid)
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
connection.execute(sql.text("XA END :xid"), xid=xid)
connection.execute(sql.text("XA ROLLBACK :xid"), xid=xid)
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
connection.execute(sql.text("XA COMMIT :xid"), xid=xid)
def do_recover_twophase(self, connection):
resultset = connection.execute("XA RECOVER")
return [row["data"][0 : row["gtrid_length"]] for row in resultset]
def is_disconnect(self, e, connection, cursor):
if isinstance(
e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError)
):
return self._extract_error_code(e) in (
2006,
2013,
2014,
2045,
2055,
)
elif isinstance(
e, (self.dbapi.InterfaceError, self.dbapi.InternalError)
):
# if underlying connection is closed,
# this is the error you get
return "(0, '')" in str(e)
else:
return False
def _compat_fetchall(self, rp, charset=None):
"""Proxy result rows to smooth over MySQL-Python driver
inconsistencies."""
return [_DecodingRowProxy(row, charset) for row in rp.fetchall()]
def _compat_fetchone(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver
inconsistencies."""
row = rp.fetchone()
if row:
return _DecodingRowProxy(row, charset)
else:
return None
def _compat_first(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver
inconsistencies."""
row = rp.first()
if row:
return _DecodingRowProxy(row, charset)
else:
return None
def _extract_error_code(self, exception):
raise NotImplementedError()
def _get_default_schema_name(self, connection):
return connection.execute("SELECT DATABASE()").scalar()
def has_table(self, connection, table_name, schema=None):
# SHOW TABLE STATUS LIKE and SHOW TABLES LIKE do not function properly
# on macosx (and maybe win?) with multibyte table names.
#
# TODO: if this is not a problem on win, make the strategy swappable
# based on platform. DESCRIBE is slower.
# [ticket:726]
# full_name = self.identifier_preparer.format_table(table,
# use_schema=True)
full_name = ".".join(
self.identifier_preparer._quote_free_identifiers(
schema, table_name
)
)
st = "DESCRIBE %s" % full_name
rs = None
try:
try:
rs = connection.execution_options(
skip_user_error_events=True
).execute(st)
have = rs.fetchone() is not None
rs.close()
return have
except exc.DBAPIError as e:
if self._extract_error_code(e.orig) == 1146:
return False
raise
finally:
if rs:
rs.close()
def initialize(self, connection):
self._connection_charset = self._detect_charset(connection)
self._detect_sql_mode(connection)
self._detect_ansiquotes(connection)
self._detect_casing(connection)
if self._server_ansiquotes:
# if ansiquotes == True, build a new IdentifierPreparer
# with the new setting
self.identifier_preparer = self.preparer(
self, server_ansiquotes=self._server_ansiquotes
)
default.DefaultDialect.initialize(self, connection)
self.supports_for_update_of = (
self._is_mysql and self.server_version_info >= (8,)
)
self._needs_correct_for_88718_96365 = (
not self._is_mariadb and self.server_version_info >= (8,)
)
self._warn_for_known_db_issues()
def _warn_for_known_db_issues(self):
if self._is_mariadb:
mdb_version = self._mariadb_normalized_version_info
if mdb_version > (10, 2) and mdb_version < (10, 2, 9):
util.warn(
"MariaDB %r before 10.2.9 has known issues regarding "
"CHECK constraints, which impact handling of NULL values "
"with SQLAlchemy's boolean datatype (MDEV-13596). An "
"additional issue prevents proper migrations of columns "
"with CHECK constraints (MDEV-11114). Please upgrade to "
"MariaDB 10.2.9 or greater, or use the MariaDB 10.1 "
"series, to avoid these issues." % (mdb_version,)
)
@property
def _is_mariadb(self):
return (
self.server_version_info and "MariaDB" in self.server_version_info
)
@property
def _is_mysql(self):
return not self._is_mariadb
@property
def _is_mariadb_102(self):
return self._is_mariadb and self._mariadb_normalized_version_info > (
10,
2,
)
@property
def _mariadb_normalized_version_info(self):
# MariaDB's wire-protocol prepends the server_version with
# the string "5.5"; now that we use @@version we no longer see this.
if self._is_mariadb:
idx = self.server_version_info.index("MariaDB")
return self.server_version_info[idx - 3 : idx]
else:
return self.server_version_info
@property
def _supports_cast(self):
return (
self.server_version_info is None
or self.server_version_info >= (4, 0, 2)
)
@reflection.cache
def get_schema_names(self, connection, **kw):
rp = connection.execute("SHOW schemas")
return [r[0] for r in rp]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
"""Return a Unicode SHOW TABLES from a given schema."""
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
charset = self._connection_charset
if self.server_version_info < (5, 0, 2):
rp = connection.execute(
"SHOW TABLES FROM %s"
% self.identifier_preparer.quote_identifier(current_schema)
)
return [
row[0] for row in self._compat_fetchall(rp, charset=charset)
]
else:
rp = connection.execute(
"SHOW FULL TABLES FROM %s"
% self.identifier_preparer.quote_identifier(current_schema)
)
return [
row[0]
for row in self._compat_fetchall(rp, charset=charset)
if row[1] == "BASE TABLE"
]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if self.server_version_info < (5, 0, 2):
raise NotImplementedError
if schema is None:
schema = self.default_schema_name
if self.server_version_info < (5, 0, 2):
return self.get_table_names(connection, schema)
charset = self._connection_charset
rp = connection.execute(
"SHOW FULL TABLES FROM %s"
% self.identifier_preparer.quote_identifier(schema)
)
return [
row[0]
for row in self._compat_fetchall(rp, charset=charset)
if row[1] in ("VIEW", "SYSTEM VIEW")
]
@reflection.cache
def get_table_options(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return parsed_state.table_options
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return parsed_state.columns
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
for key in parsed_state.keys:
if key["type"] == "PRIMARY":
# There can be only one.
cols = [s[0] for s in key["columns"]]
return {"constrained_columns": cols, "name": None}
return {"constrained_columns": [], "name": None}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
default_schema = None
fkeys = []
for spec in parsed_state.fk_constraints:
ref_name = spec["table"][-1]
ref_schema = len(spec["table"]) > 1 and spec["table"][-2] or schema
if not ref_schema:
if default_schema is None:
default_schema = connection.dialect.default_schema_name
if schema == default_schema:
ref_schema = schema
loc_names = spec["local"]
ref_names = spec["foreign"]
con_kw = {}
for opt in ("onupdate", "ondelete"):
if spec.get(opt, False):
con_kw[opt] = spec[opt]
fkey_d = {
"name": spec["name"],
"constrained_columns": loc_names,
"referred_schema": ref_schema,
"referred_table": ref_name,
"referred_columns": ref_names,
"options": con_kw,
}
fkeys.append(fkey_d)
if self._needs_correct_for_88718_96365:
self._correct_for_mysql_bugs_88718_96365(fkeys, connection)
return fkeys
def _correct_for_mysql_bugs_88718_96365(self, fkeys, connection):
# Foreign key is always in lower case (MySQL 8.0)
# https://bugs.mysql.com/bug.php?id=88718
# issue #4344 for SQLAlchemy
# table name also for MySQL 8.0
# https://bugs.mysql.com/bug.php?id=96365
# issue #4751 for SQLAlchemy
# for lower_case_table_names=2, information_schema.columns
# preserves the original table/schema casing, but SHOW CREATE
# TABLE does not. this problem is not in lower_case_table_names=1,
# but use case-insensitive matching for these two modes in any case.
if self._casing in (1, 2):
def lower(s):
return s.lower()
else:
# if on case sensitive, there can be two tables referenced
# with the same name different casing, so we need to use
# case-sensitive matching.
def lower(s):
return s
default_schema_name = connection.dialect.default_schema_name
col_tuples = [
(
lower(rec["referred_schema"] or default_schema_name),
lower(rec["referred_table"]),
col_name,
)
for rec in fkeys
for col_name in rec["referred_columns"]
]
if col_tuples:
correct_for_wrong_fk_case = connection.execute(
sql.text(
"""
select table_schema, table_name, column_name
from information_schema.columns
where (table_schema, table_name, lower(column_name)) in
:table_data;
"""
).bindparams(sql.bindparam("table_data", expanding=True)),
table_data=col_tuples,
)
# in casing=0, table name and schema name come back in their
# exact case.
# in casing=1, table name and schema name come back in lower
# case.
# in casing=2, table name and schema name come back from the
# information_schema.columns view in the case
# that was used in CREATE DATABASE and CREATE TABLE, but
# SHOW CREATE TABLE converts them to *lower case*, therefore
# not matching. So for this case, case-insensitive lookup
# is necessary
d = defaultdict(dict)
for schema, tname, cname in correct_for_wrong_fk_case:
d[(lower(schema), lower(tname))]["SCHEMANAME"] = schema
d[(lower(schema), lower(tname))]["TABLENAME"] = tname
d[(lower(schema), lower(tname))][cname.lower()] = cname
for fkey in fkeys:
rec = d[
(
lower(fkey["referred_schema"] or default_schema_name),
lower(fkey["referred_table"]),
)
]
fkey["referred_table"] = rec["TABLENAME"]
if fkey["referred_schema"] is not None:
fkey["referred_schema"] = rec["SCHEMANAME"]
fkey["referred_columns"] = [
rec[col.lower()] for col in fkey["referred_columns"]
]
@reflection.cache
def get_check_constraints(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return [
{"name": spec["name"], "sqltext": spec["sqltext"]}
for spec in parsed_state.ck_constraints
]
@reflection.cache
def get_table_comment(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return {"text": parsed_state.table_options.get("mysql_comment", None)}
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
indexes = []
for spec in parsed_state.keys:
dialect_options = {}
unique = False
flavor = spec["type"]
if flavor == "PRIMARY":
continue
if flavor == "UNIQUE":
unique = True
elif flavor in ("FULLTEXT", "SPATIAL"):
dialect_options["mysql_prefix"] = flavor
elif flavor is None:
pass
else:
self.logger.info(
"Converting unknown KEY type %s to a plain KEY", flavor
)
pass
if spec["parser"]:
dialect_options["mysql_with_parser"] = spec["parser"]
index_d = {}
if dialect_options:
index_d["dialect_options"] = dialect_options
index_d["name"] = spec["name"]
index_d["column_names"] = [s[0] for s in spec["columns"]]
index_d["unique"] = unique
if flavor:
index_d["type"] = flavor
indexes.append(index_d)
return indexes
@reflection.cache
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return [
{
"name": key["name"],
"column_names": [col[0] for col in key["columns"]],
"duplicates_index": key["name"],
}
for key in parsed_state.keys
if key["type"] == "UNIQUE"
]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
charset = self._connection_charset
full_name = ".".join(
self.identifier_preparer._quote_free_identifiers(schema, view_name)
)
sql = self._show_create_table(
connection, None, charset, full_name=full_name
)
return sql
def _parsed_state_or_create(
self, connection, table_name, schema=None, **kw
):
return self._setup_parser(
connection,
table_name,
schema,
info_cache=kw.get("info_cache", None),
)
@util.memoized_property
def _tabledef_parser(self):
"""return the MySQLTableDefinitionParser, generate if needed.
The deferred creation ensures that the dialect has
retrieved server version information first.
"""
if self.server_version_info < (4, 1) and self._server_ansiquotes:
# ANSI_QUOTES doesn't affect SHOW CREATE TABLE on < 4.1
preparer = self.preparer(self, server_ansiquotes=False)
else:
preparer = self.identifier_preparer
return _reflection.MySQLTableDefinitionParser(self, preparer)
@reflection.cache
def _setup_parser(self, connection, table_name, schema=None, **kw):
charset = self._connection_charset
parser = self._tabledef_parser
full_name = ".".join(
self.identifier_preparer._quote_free_identifiers(
schema, table_name
)
)
sql = self._show_create_table(
connection, None, charset, full_name=full_name
)
if re.match(r"^CREATE (?:ALGORITHM)?.* VIEW", sql):
# Adapt views to something table-like.
columns = self._describe_table(
connection, None, charset, full_name=full_name
)
sql = parser._describe_to_create(table_name, columns)
return parser.parse(sql, charset)
def _detect_charset(self, connection):
raise NotImplementedError()
def _detect_casing(self, connection):
"""Sniff out identifier case sensitivity.
Cached per-connection. This value can not change without a server
restart.
"""
# http://dev.mysql.com/doc/refman/5.0/en/name-case-sensitivity.html
charset = self._connection_charset
row = self._compat_first(
connection.execute("SHOW VARIABLES LIKE 'lower_case_table_names'"),
charset=charset,
)
if not row:
cs = 0
else:
# 4.0.15 returns OFF or ON according to [ticket:489]
# 3.23 doesn't, 4.0.27 doesn't..
if row[1] == "OFF":
cs = 0
elif row[1] == "ON":
cs = 1
else:
cs = int(row[1])
self._casing = cs
return cs
def _detect_collations(self, connection):
"""Pull the active COLLATIONS list from the server.
Cached per-connection.
"""
collations = {}
if self.server_version_info < (4, 1, 0):
pass
else:
charset = self._connection_charset
rs = connection.execute("SHOW COLLATION")
for row in self._compat_fetchall(rs, charset):
collations[row[0]] = row[1]
return collations
def _detect_sql_mode(self, connection):
row = self._compat_first(
connection.execute("SHOW VARIABLES LIKE 'sql_mode'"),
charset=self._connection_charset,
)
if not row:
util.warn(
"Could not retrieve SQL_MODE; please ensure the "
"MySQL user has permissions to SHOW VARIABLES"
)
self._sql_mode = ""
else:
self._sql_mode = row[1] or ""
def _detect_ansiquotes(self, connection):
"""Detect and adjust for the ANSI_QUOTES sql mode."""
mode = self._sql_mode
if not mode:
mode = ""
elif mode.isdigit():
mode_no = int(mode)
mode = (mode_no | 4 == mode_no) and "ANSI_QUOTES" or ""
self._server_ansiquotes = "ANSI_QUOTES" in mode
# as of MySQL 5.0.1
self._backslash_escapes = "NO_BACKSLASH_ESCAPES" not in mode
def _show_create_table(
self, connection, table, charset=None, full_name=None
):
"""Run SHOW CREATE TABLE for a ``Table``."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "SHOW CREATE TABLE %s" % full_name
rp = None
try:
rp = connection.execution_options(
skip_user_error_events=True
).execute(st)
except exc.DBAPIError as e:
if self._extract_error_code(e.orig) == 1146:
util.raise_(exc.NoSuchTableError(full_name), replace_context=e)
else:
raise
row = self._compat_first(rp, charset=charset)
if not row:
raise exc.NoSuchTableError(full_name)
return row[1].strip()
return sql
def _describe_table(self, connection, table, charset=None, full_name=None):
"""Run DESCRIBE for a ``Table`` and return processed rows."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "DESCRIBE %s" % full_name
rp, rows = None, None
try:
try:
rp = connection.execution_options(
skip_user_error_events=True
).execute(st)
except exc.DBAPIError as e:
code = self._extract_error_code(e.orig)
if code == 1146:
util.raise_(
exc.NoSuchTableError(full_name), replace_context=e
)
elif code == 1356:
util.raise_(
exc.UnreflectableTableError(
"Table or view named %s could not be "
"reflected: %s" % (full_name, e)
),
replace_context=e,
)
else:
raise
rows = self._compat_fetchall(rp, charset=charset)
finally:
if rp:
rp.close()
return rows
class _DecodingRowProxy(object):
"""Return unicode-decoded values based on type inspection.
Smooth over data type issues (esp. with alpha driver versions) and
normalize strings as Unicode regardless of user-configured driver
encoding settings.
"""
# Some MySQL-python versions can return some columns as
# sets.Set(['value']) (seriously) but thankfully that doesn't
# seem to come up in DDL queries.
_encoding_compat = {
"koi8r": "koi8_r",
"koi8u": "koi8_u",
"utf16": "utf-16-be", # MySQL's uft16 is always bigendian
"utf8mb4": "utf8", # real utf8
"eucjpms": "ujis",
}
def __init__(self, rowproxy, charset):
self.rowproxy = rowproxy
self.charset = self._encoding_compat.get(charset, charset)
def __getitem__(self, index):
item = self.rowproxy[index]
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, util.binary_type):
return item.decode(self.charset)
else:
return item
def __getattr__(self, attr):
item = getattr(self.rowproxy, attr)
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, util.binary_type):
return item.decode(self.charset)
else:
return item
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mysql/cymysql.py
|
# mysql/cymysql.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql+cymysql
:name: CyMySQL
:dbapi: cymysql
:connectstring: mysql+cymysql://<username>:<password>@<host>/<dbname>[?<options>]
:url: https://github.com/nakagami/CyMySQL
.. note::
The CyMySQL dialect is **not tested as part of SQLAlchemy's continuous
integration** and may have unresolved issues. The recommended MySQL
dialects are mysqlclient and PyMySQL.
""" # noqa
from .base import BIT
from .base import MySQLDialect
from .mysqldb import MySQLDialect_mysqldb
from ... import util
class _cymysqlBIT(BIT):
def result_processor(self, dialect, coltype):
"""Convert a MySQL's 64 bit, variable length binary string to a long.
"""
def process(value):
if value is not None:
v = 0
for i in util.iterbytes(value):
v = v << 8 | i
return v
return value
return process
class MySQLDialect_cymysql(MySQLDialect_mysqldb):
driver = "cymysql"
description_encoding = None
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
supports_unicode_statements = True
colspecs = util.update_copy(MySQLDialect.colspecs, {BIT: _cymysqlBIT})
@classmethod
def dbapi(cls):
return __import__("cymysql")
def _detect_charset(self, connection):
return connection.connection.charset
def _extract_error_code(self, exception):
return exception.errno
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.OperationalError):
return self._extract_error_code(e) in (
2006,
2013,
2014,
2045,
2055,
)
elif isinstance(e, self.dbapi.InterfaceError):
# if underlying connection is closed,
# this is the error you get
return True
else:
return False
dialect = MySQLDialect_cymysql
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mssql/mxodbc.py
|
# mssql/mxodbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+mxodbc
:name: mxODBC
:dbapi: mxodbc
:connectstring: mssql+mxodbc://<username>:<password>@<dsnname>
:url: http://www.egenix.com/
Execution Modes
---------------
mxODBC features two styles of statement execution, using the
``cursor.execute()`` and ``cursor.executedirect()`` methods (the second being
an extension to the DBAPI specification). The former makes use of a particular
API call specific to the SQL Server Native Client ODBC driver known
SQLDescribeParam, while the latter does not.
mxODBC apparently only makes repeated use of a single prepared statement
when SQLDescribeParam is used. The advantage to prepared statement reuse is
one of performance. The disadvantage is that SQLDescribeParam has a limited
set of scenarios in which bind parameters are understood, including that they
cannot be placed within the argument lists of function calls, anywhere outside
the FROM, or even within subqueries within the FROM clause - making the usage
of bind parameters within SELECT statements impossible for all but the most
simplistic statements.
For this reason, the mxODBC dialect uses the "native" mode by default only for
INSERT, UPDATE, and DELETE statements, and uses the escaped string mode for
all other statements.
This behavior can be controlled via
:meth:`~sqlalchemy.sql.expression.Executable.execution_options` using the
``native_odbc_execute`` flag with a value of ``True`` or ``False``, where a
value of ``True`` will unconditionally use native bind parameters and a value
of ``False`` will unconditionally use string-escaped parameters.
"""
from .base import _MSDate
from .base import _MSDateTime
from .base import _MSTime
from .base import MSDialect
from .base import VARBINARY
from .pyodbc import _MSNumeric_pyodbc
from .pyodbc import MSExecutionContext_pyodbc
from ... import types as sqltypes
from ...connectors.mxodbc import MxODBCConnector
class _MSNumeric_mxodbc(_MSNumeric_pyodbc):
"""Include pyodbc's numeric processor.
"""
class _MSDate_mxodbc(_MSDate):
def bind_processor(self, dialect):
def process(value):
if value is not None:
return "%s-%s-%s" % (value.year, value.month, value.day)
else:
return None
return process
class _MSTime_mxodbc(_MSTime):
def bind_processor(self, dialect):
def process(value):
if value is not None:
return "%s:%s:%s" % (value.hour, value.minute, value.second)
else:
return None
return process
class _VARBINARY_mxodbc(VARBINARY):
"""
mxODBC Support for VARBINARY column types.
This handles the special case for null VARBINARY values,
which maps None values to the mx.ODBC.Manager.BinaryNull symbol.
"""
def bind_processor(self, dialect):
if dialect.dbapi is None:
return None
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
# should pull from mx.ODBC.Manager.BinaryNull
return dialect.dbapi.BinaryNull
return process
class MSExecutionContext_mxodbc(MSExecutionContext_pyodbc):
"""
The pyodbc execution context is useful for enabling
SELECT SCOPE_IDENTITY in cases where OUTPUT clause
does not work (tables with insert triggers).
"""
# todo - investigate whether the pyodbc execution context
# is really only being used in cases where OUTPUT
# won't work.
class MSDialect_mxodbc(MxODBCConnector, MSDialect):
# this is only needed if "native ODBC" mode is used,
# which is now disabled by default.
# statement_compiler = MSSQLStrictCompiler
execution_ctx_cls = MSExecutionContext_mxodbc
# flag used by _MSNumeric_mxodbc
_need_decimal_fix = True
colspecs = {
sqltypes.Numeric: _MSNumeric_mxodbc,
sqltypes.DateTime: _MSDateTime,
sqltypes.Date: _MSDate_mxodbc,
sqltypes.Time: _MSTime_mxodbc,
VARBINARY: _VARBINARY_mxodbc,
sqltypes.LargeBinary: _VARBINARY_mxodbc,
}
def __init__(self, description_encoding=None, **params):
super(MSDialect_mxodbc, self).__init__(**params)
self.description_encoding = description_encoding
dialect = MSDialect_mxodbc
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mssql/zxjdbc.py
|
# mssql/zxjdbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...]
:driverurl: http://jtds.sourceforge.net/
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
""" # noqa
from .base import MSDialect
from .base import MSExecutionContext
from ... import engine
from ...connectors.zxJDBC import ZxJDBCConnector
class MSExecutionContext_zxjdbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
super(MSExecutionContext_zxjdbc, self).pre_exec()
# scope_identity after the fact returns null in jTDS so we must
# embed it
if self._select_lastrowid and self.dialect.use_scope_identity:
self._embedded_scope_identity = True
self.statement += "; SELECT scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
while True:
try:
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error:
self.cursor.nextset()
self._lastrowid = int(row[0])
if (
self.isinsert or self.isupdate or self.isdelete
) and self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
table = self.dialect.identifier_preparer.format_table(
self.compiled.statement.table
)
self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
jdbc_db_name = "jtds:sqlserver"
jdbc_driver_name = "net.sourceforge.jtds.jdbc.Driver"
execution_ctx_cls = MSExecutionContext_zxjdbc
def _get_server_version_info(self, connection):
return tuple(
int(x) for x in connection.connection.dbversion.split(".")
)
dialect = MSDialect_zxjdbc
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mssql/__init__.py
|
# mssql/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import adodbapi # noqa
from . import base # noqa
from . import mxodbc # noqa
from . import pymssql # noqa
from . import pyodbc # noqa
from . import zxjdbc # noqa
from .base import BIGINT
from .base import BINARY
from .base import BIT
from .base import CHAR
from .base import DATE
from .base import DATETIME
from .base import DATETIME2
from .base import DATETIMEOFFSET
from .base import DECIMAL
from .base import FLOAT
from .base import IMAGE
from .base import INTEGER
from .base import MONEY
from .base import NCHAR
from .base import NTEXT
from .base import NUMERIC
from .base import NVARCHAR
from .base import REAL
from .base import ROWVERSION
from .base import SMALLDATETIME
from .base import SMALLINT
from .base import SMALLMONEY
from .base import SQL_VARIANT
from .base import TEXT
from .base import TIME
from .base import TIMESTAMP
from .base import TINYINT
from .base import try_cast
from .base import UNIQUEIDENTIFIER
from .base import VARBINARY
from .base import VARCHAR
from .base import XML
base.dialect = dialect = pyodbc.dialect
__all__ = (
"INTEGER",
"BIGINT",
"SMALLINT",
"TINYINT",
"VARCHAR",
"NVARCHAR",
"CHAR",
"NCHAR",
"TEXT",
"NTEXT",
"DECIMAL",
"NUMERIC",
"FLOAT",
"DATETIME",
"DATETIME2",
"DATETIMEOFFSET",
"DATE",
"TIME",
"SMALLDATETIME",
"BINARY",
"VARBINARY",
"BIT",
"REAL",
"IMAGE",
"TIMESTAMP",
"ROWVERSION",
"MONEY",
"SMALLMONEY",
"UNIQUEIDENTIFIER",
"SQL_VARIANT",
"XML",
"dialect",
"try_cast",
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mssql/pymssql.py
|
# mssql/pymssql.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+pymssql
:name: pymssql
:dbapi: pymssql
:connectstring: mssql+pymssql://<username>:<password>@<freetds_name>/?charset=utf8
pymssql is a Python module that provides a Python DBAPI interface around
`FreeTDS <http://www.freetds.org/>`_.
.. note::
pymssql is currently not included in SQLAlchemy's continuous integration
(CI) testing.
Modern versions of this driver worked very well with SQL Server and FreeTDS
from Linux and were highly recommended. However, pymssql is currently
unmaintained and has fallen behind the progress of the Microsoft ODBC driver in
its support for newer features of SQL Server. The latest official release of
pymssql at the time of this document is version 2.1.4 (August, 2018) and it
lacks support for:
1. table-valued parameters (TVPs),
2. ``datetimeoffset`` columns using timezone-aware ``datetime`` objects
(values are sent and retrieved as strings), and
3. encrypted connections (e.g., to Azure SQL), when pymssql is installed from
the pre-built wheels. Support for encrypted connections requires building
pymssql from source, which can be a nuisance, especially under Windows.
The above features are all supported by mssql+pyodbc when using Microsoft's
ODBC Driver for SQL Server (msodbcsql), which is now available for Windows,
(several flavors of) Linux, and macOS.
""" # noqa
import re
from .base import MSDialect
from .base import MSIdentifierPreparer
from ... import processors
from ... import types as sqltypes
from ... import util
class _MSNumeric_pymssql(sqltypes.Numeric):
def result_processor(self, dialect, type_):
if not self.asdecimal:
return processors.to_float
else:
return sqltypes.Numeric.result_processor(self, dialect, type_)
class MSIdentifierPreparer_pymssql(MSIdentifierPreparer):
def __init__(self, dialect):
super(MSIdentifierPreparer_pymssql, self).__init__(dialect)
# pymssql has the very unusual behavior that it uses pyformat
# yet does not require that percent signs be doubled
self._double_percents = False
class MSDialect_pymssql(MSDialect):
supports_native_decimal = True
driver = "pymssql"
preparer = MSIdentifierPreparer_pymssql
colspecs = util.update_copy(
MSDialect.colspecs,
{sqltypes.Numeric: _MSNumeric_pymssql, sqltypes.Float: sqltypes.Float},
)
@classmethod
def dbapi(cls):
module = __import__("pymssql")
# pymmsql < 2.1.1 doesn't have a Binary method. we use string
client_ver = tuple(int(x) for x in module.__version__.split("."))
if client_ver < (2, 1, 1):
# TODO: monkeypatching here is less than ideal
module.Binary = lambda x: x if hasattr(x, "decode") else str(x)
if client_ver < (1,):
util.warn(
"The pymssql dialect expects at least "
"the 1.0 series of the pymssql DBAPI."
)
return module
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version")
m = re.match(r"Microsoft .*? - (\d+).(\d+).(\d+).(\d+)", vers)
if m:
return tuple(int(x) for x in m.group(1, 2, 3, 4))
else:
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
opts.update(url.query)
port = opts.pop("port", None)
if port and "host" in opts:
opts["host"] = "%s:%s" % (opts["host"], port)
return [[], opts]
def is_disconnect(self, e, connection, cursor):
for msg in (
"Adaptive Server connection timed out",
"Net-Lib error during Connection reset by peer",
"message 20003", # connection timeout
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed",
"message 20006", # Write to the server failed
"message 20017", # Unexpected EOF from the server
"message 20047", # DBPROCESS is dead or not enabled
):
if msg in str(e):
return True
else:
return False
def set_isolation_level(self, connection, level):
if level == "AUTOCOMMIT":
connection.autocommit(True)
else:
connection.autocommit(False)
super(MSDialect_pymssql, self).set_isolation_level(
connection, level
)
dialect = MSDialect_pymssql
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mssql/adodbapi.py
|
# mssql/adodbapi.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+adodbapi
:name: adodbapi
:dbapi: adodbapi
:connectstring: mssql+adodbapi://<username>:<password>@<dsnname>
:url: http://adodbapi.sourceforge.net/
.. note::
The adodbapi dialect is not implemented in SQLAlchemy versions 0.6 and
above at this time.
"""
import datetime
import sys
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.dialects.mssql.base import MSDateTime
from sqlalchemy.dialects.mssql.base import MSDialect
class MSDateTime_adodbapi(MSDateTime):
def result_processor(self, dialect, coltype):
def process(value):
# adodbapi will return datetimes with empty time
# values as datetime.date() objects.
# Promote them back to full datetime.datetime()
if type(value) is datetime.date:
return datetime.datetime(value.year, value.month, value.day)
return value
return process
class MSDialect_adodbapi(MSDialect):
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_unicode = sys.maxunicode == 65535
supports_unicode_statements = True
driver = "adodbapi"
@classmethod
def import_dbapi(cls):
import adodbapi as module
return module
colspecs = util.update_copy(
MSDialect.colspecs, {sqltypes.DateTime: MSDateTime_adodbapi}
)
def create_connect_args(self, url):
def check_quote(token):
if ";" in str(token):
token = "'%s'" % token
return token
keys = dict((k, check_quote(v)) for k, v in url.query.items())
connectors = ["Provider=SQLOLEDB"]
if "port" in keys:
connectors.append(
"Data Source=%s, %s" % (keys.get("host"), keys.get("port"))
)
else:
connectors.append("Data Source=%s" % keys.get("host"))
connectors.append("Initial Catalog=%s" % keys.get("database"))
user = keys.get("user")
if user:
connectors.append("User Id=%s" % user)
connectors.append("Password=%s" % keys.get("password", ""))
else:
connectors.append("Integrated Security=SSPI")
return [[";".join(connectors)], {}]
def is_disconnect(self, e, connection, cursor):
return isinstance(
e, self.dbapi.adodbapi.DatabaseError
) and "'connection failure'" in str(e)
dialect = MSDialect_adodbapi
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mssql/provision.py
|
from ... import create_engine
from ... import exc
from ...testing.provision import create_db
from ...testing.provision import drop_db
from ...testing.provision import log
from ...testing.provision import run_reap_dbs
from ...testing.provision import update_db_opts
@update_db_opts.for_db("mssql")
def _mssql_update_db_opts(db_url, db_opts):
db_opts["legacy_schema_aliasing"] = False
@create_db.for_db("mssql")
def _mssql_create_db(cfg, eng, ident):
with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
conn.execute("create database %s" % ident)
conn.execute(
"ALTER DATABASE %s SET ALLOW_SNAPSHOT_ISOLATION ON" % ident
)
conn.execute(
"ALTER DATABASE %s SET READ_COMMITTED_SNAPSHOT ON" % ident
)
conn.execute("use %s" % ident)
conn.execute("create schema test_schema")
conn.execute("create schema test_schema_2")
@drop_db.for_db("mssql")
def _mssql_drop_db(cfg, eng, ident):
with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
_mssql_drop_ignore(conn, ident)
def _mssql_drop_ignore(conn, ident):
try:
# typically when this happens, we can't KILL the session anyway,
# so let the cleanup process drop the DBs
# for row in conn.execute(
# "select session_id from sys.dm_exec_sessions "
# "where database_id=db_id('%s')" % ident):
# log.info("killing SQL server sesssion %s", row['session_id'])
# conn.execute("kill %s" % row['session_id'])
conn.execute("drop database %s" % ident)
log.info("Reaped db: %s", ident)
return True
except exc.DatabaseError as err:
log.warning("couldn't drop db: %s", err)
return False
@run_reap_dbs.for_db("mssql")
def _reap_mssql_dbs(url, idents):
log.info("db reaper connecting to %r", url)
eng = create_engine(url)
with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
log.info("identifiers in file: %s", ", ".join(idents))
to_reap = conn.execute(
"select d.name from sys.databases as d where name "
"like 'TEST_%' and not exists (select session_id "
"from sys.dm_exec_sessions "
"where database_id=d.database_id)"
)
all_names = {dbname.lower() for (dbname,) in to_reap}
to_drop = set()
for name in all_names:
if name in idents:
to_drop.add(name)
dropped = total = 0
for total, dbname in enumerate(to_drop, 1):
if _mssql_drop_ignore(conn, dbname):
dropped += 1
log.info(
"Dropped %d out of %d stale databases detected", dropped, total
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mssql/information_schema.py
|
# mssql/information_schema.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# TODO: should be using the sys. catalog with SQL Server, not information
# schema
from ... import cast
from ... import Column
from ... import MetaData
from ... import Table
from ... import util
from ...ext.compiler import compiles
from ...sql import expression
from ...types import Boolean
from ...types import Integer
from ...types import String
from ...types import TypeDecorator
from ...types import Unicode
ischema = MetaData()
class CoerceUnicode(TypeDecorator):
impl = Unicode
def process_bind_param(self, value, dialect):
if util.py2k and isinstance(value, util.binary_type):
value = value.decode(dialect.encoding)
return value
def bind_expression(self, bindvalue):
return _cast_on_2005(bindvalue)
class _cast_on_2005(expression.ColumnElement):
def __init__(self, bindvalue):
self.bindvalue = bindvalue
@compiles(_cast_on_2005)
def _compile(element, compiler, **kw):
from . import base
if (
compiler.dialect.server_version_info is None
or compiler.dialect.server_version_info < base.MS_2005_VERSION
):
return compiler.process(element.bindvalue, **kw)
else:
return compiler.process(cast(element.bindvalue, Unicode), **kw)
schemata = Table(
"SCHEMATA",
ischema,
Column("CATALOG_NAME", CoerceUnicode, key="catalog_name"),
Column("SCHEMA_NAME", CoerceUnicode, key="schema_name"),
Column("SCHEMA_OWNER", CoerceUnicode, key="schema_owner"),
schema="INFORMATION_SCHEMA",
)
tables = Table(
"TABLES",
ischema,
Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("TABLE_TYPE", CoerceUnicode, key="table_type"),
schema="INFORMATION_SCHEMA",
)
columns = Table(
"COLUMNS",
ischema,
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
Column("IS_NULLABLE", Integer, key="is_nullable"),
Column("DATA_TYPE", String, key="data_type"),
Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
Column(
"CHARACTER_MAXIMUM_LENGTH", Integer, key="character_maximum_length"
),
Column("NUMERIC_PRECISION", Integer, key="numeric_precision"),
Column("NUMERIC_SCALE", Integer, key="numeric_scale"),
Column("COLUMN_DEFAULT", Integer, key="column_default"),
Column("COLLATION_NAME", String, key="collation_name"),
schema="INFORMATION_SCHEMA",
)
constraints = Table(
"TABLE_CONSTRAINTS",
ischema,
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
Column("CONSTRAINT_TYPE", CoerceUnicode, key="constraint_type"),
schema="INFORMATION_SCHEMA",
)
column_constraints = Table(
"CONSTRAINT_COLUMN_USAGE",
ischema,
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
schema="INFORMATION_SCHEMA",
)
key_constraints = Table(
"KEY_COLUMN_USAGE",
ischema,
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
Column("CONSTRAINT_SCHEMA", CoerceUnicode, key="constraint_schema"),
Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
schema="INFORMATION_SCHEMA",
)
ref_constraints = Table(
"REFERENTIAL_CONSTRAINTS",
ischema,
Column("CONSTRAINT_CATALOG", CoerceUnicode, key="constraint_catalog"),
Column("CONSTRAINT_SCHEMA", CoerceUnicode, key="constraint_schema"),
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
# TODO: is CATLOG misspelled ?
Column(
"UNIQUE_CONSTRAINT_CATLOG",
CoerceUnicode,
key="unique_constraint_catalog",
),
Column(
"UNIQUE_CONSTRAINT_SCHEMA",
CoerceUnicode,
key="unique_constraint_schema",
),
Column(
"UNIQUE_CONSTRAINT_NAME", CoerceUnicode, key="unique_constraint_name"
),
Column("MATCH_OPTION", String, key="match_option"),
Column("UPDATE_RULE", String, key="update_rule"),
Column("DELETE_RULE", String, key="delete_rule"),
schema="INFORMATION_SCHEMA",
)
views = Table(
"VIEWS",
ischema,
Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("VIEW_DEFINITION", CoerceUnicode, key="view_definition"),
Column("CHECK_OPTION", String, key="check_option"),
Column("IS_UPDATABLE", String, key="is_updatable"),
schema="INFORMATION_SCHEMA",
)
computed_columns = Table(
"computed_columns",
ischema,
Column("object_id", Integer),
Column("name", CoerceUnicode),
Column("is_computed", Boolean),
Column("is_persisted", Boolean),
Column("definition", CoerceUnicode),
schema="sys",
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mssql/pyodbc.py
|
# mssql/pyodbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mssql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mssql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
Connecting to PyODBC
--------------------
The URL here is to be translated to PyODBC connection strings, as
detailed in `ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_.
DSN Connections
^^^^^^^^^^^^^^^
A DSN connection in ODBC means that a pre-existing ODBC datasource is
configured on the client machine. The application then specifies the name
of this datasource, which encompasses details such as the specific ODBC driver
in use as well as the network address of the database. Assuming a datasource
is configured on the client, a basic DSN-based connection looks like::
engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn")
Which above, will pass the following connection string to PyODBC::
dsn=mydsn;UID=user;PWD=pass
If the username and password are omitted, the DSN form will also add
the ``Trusted_Connection=yes`` directive to the ODBC string.
Hostname Connections
^^^^^^^^^^^^^^^^^^^^
Hostname-based connections are also supported by pyodbc. These are often
easier to use than a DSN and have the additional advantage that the specific
database name to connect towards may be specified locally in the URL, rather
than it being fixed as part of a datasource configuration.
When using a hostname connection, the driver name must also be specified in the
query parameters of the URL. As these names usually have spaces in them, the
name must be URL encoded which means using plus signs for spaces::
engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0")
Other keywords interpreted by the Pyodbc dialect to be passed to
``pyodbc.connect()`` in both the DSN and hostname cases include:
``odbc_autotranslate``, ``ansi``, ``unicode_results``, ``autocommit``.
Note that in order for the dialect to recognize these keywords
(including the ``driver`` keyword above) they must be all lowercase.
Pass through exact Pyodbc string
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A PyODBC connection string can also be sent in pyodbc's format directly, as
specified in `ConnectionStrings
<https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_ into the driver
using the parameter ``odbc_connect``. The delimeters must be URL encoded, as
illustrated below using ``urllib.parse.quote_plus``::
import urllib
params = urllib.parse.quote_plus("DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password")
engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
Driver / Unicode Support
-------------------------
PyODBC works best with Microsoft ODBC drivers, particularly in the area
of Unicode support on both Python 2 and Python 3.
Using the FreeTDS ODBC drivers on Linux or OSX with PyODBC is **not**
recommended; there have been historically many Unicode-related issues
in this area, including before Microsoft offered ODBC drivers for Linux
and OSX. Now that Microsoft offers drivers for all platforms, for
PyODBC support these are recommended. FreeTDS remains relevant for
non-ODBC drivers such as pymssql where it works very well.
Rowcount Support
----------------
Pyodbc only has partial support for rowcount. See the notes at
:ref:`mssql_rowcount_versioning` for important notes when using ORM
versioning.
.. _mssql_pyodbc_fastexecutemany:
Fast Executemany Mode
---------------------
The Pyodbc driver has added support for a "fast executemany" mode of execution
which greatly reduces round trips for a DBAPI ``executemany()`` call when using
Microsoft ODBC drivers. The feature is enabled by setting the flag
``.fast_executemany`` on the DBAPI cursor when an executemany call is to be
used. The SQLAlchemy pyodbc SQL Server dialect supports setting this flag
automatically when the ``.fast_executemany`` flag is passed to
:func:`_sa.create_engine`
; note that the ODBC driver must be the Microsoft driver
in order to use this flag::
engine = create_engine(
"mssql+pyodbc://scott:tiger@mssql2017:1433/test?driver=ODBC+Driver+13+for+SQL+Server",
fast_executemany=True)
.. versionadded:: 1.3
.. seealso::
`fast executemany <https://github.com/mkleehammer/pyodbc/wiki/Features-beyond-the-DB-API#fast_executemany>`_
- on github
""" # noqa
import datetime
import decimal
import re
import struct
from .base import BINARY
from .base import DATETIMEOFFSET
from .base import MSDialect
from .base import MSExecutionContext
from .base import VARBINARY
from ... import exc
from ... import types as sqltypes
from ... import util
from ...connectors.pyodbc import PyODBCConnector
class _ms_numeric_pyodbc(object):
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
The routines here are needed for older pyodbc versions
as well as current mxODBC versions.
"""
def bind_processor(self, dialect):
super_process = super(_ms_numeric_pyodbc, self).bind_processor(dialect)
if not dialect._need_decimal_fix:
return super_process
def process(value):
if self.asdecimal and isinstance(value, decimal.Decimal):
adjusted = value.adjusted()
if adjusted < 0:
return self._small_dec_to_string(value)
elif adjusted > 7:
return self._large_dec_to_string(value)
if super_process:
return super_process(value)
else:
return value
return process
# these routines needed for older versions of pyodbc.
# as of 2.1.8 this logic is integrated.
def _small_dec_to_string(self, value):
return "%s0.%s%s" % (
(value < 0 and "-" or ""),
"0" * (abs(value.adjusted()) - 1),
"".join([str(nint) for nint in value.as_tuple()[1]]),
)
def _large_dec_to_string(self, value):
_int = value.as_tuple()[1]
if "E" in str(value):
result = "%s%s%s" % (
(value < 0 and "-" or ""),
"".join([str(s) for s in _int]),
"0" * (value.adjusted() - (len(_int) - 1)),
)
else:
if (len(_int) - 1) > value.adjusted():
result = "%s%s.%s" % (
(value < 0 and "-" or ""),
"".join([str(s) for s in _int][0 : value.adjusted() + 1]),
"".join([str(s) for s in _int][value.adjusted() + 1 :]),
)
else:
result = "%s%s" % (
(value < 0 and "-" or ""),
"".join([str(s) for s in _int][0 : value.adjusted() + 1]),
)
return result
class _MSNumeric_pyodbc(_ms_numeric_pyodbc, sqltypes.Numeric):
pass
class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float):
pass
class _ms_binary_pyodbc(object):
"""Wraps binary values in dialect-specific Binary wrapper.
If the value is null, return a pyodbc-specific BinaryNull
object to prevent pyODBC [and FreeTDS] from defaulting binary
NULL types to SQLWCHAR and causing implicit conversion errors.
"""
def bind_processor(self, dialect):
if dialect.dbapi is None:
return None
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
# pyodbc-specific
return dialect.dbapi.BinaryNull
return process
class _ODBCDateTimeOffset(DATETIMEOFFSET):
def bind_processor(self, dialect):
def process(value):
if value is None:
return None
elif isinstance(value, util.string_types):
# if a string was passed directly, allow it through
return value
else:
# Convert to string format required by T-SQL
dto_string = value.strftime("%Y-%m-%d %H:%M:%S.%f %z")
# offset needs a colon, e.g., -0700 -> -07:00
# "UTC offset in the form (+-)HHMM[SS[.ffffff]]"
# backend currently rejects seconds / fractional seconds
dto_string = re.sub(
r"([\+\-]\d{2})([\d\.]+)$", r"\1:\2", dto_string
)
return dto_string
return process
class _VARBINARY_pyodbc(_ms_binary_pyodbc, VARBINARY):
pass
class _BINARY_pyodbc(_ms_binary_pyodbc, BINARY):
pass
class MSExecutionContext_pyodbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
"""where appropriate, issue "select scope_identity()" in the same
statement.
Background on why "scope_identity()" is preferable to "@@identity":
http://msdn.microsoft.com/en-us/library/ms190315.aspx
Background on why we attempt to embed "scope_identity()" into the same
statement as the INSERT:
http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
"""
super(MSExecutionContext_pyodbc, self).pre_exec()
# don't embed the scope_identity select into an
# "INSERT .. DEFAULT VALUES"
if (
self._select_lastrowid
and self.dialect.use_scope_identity
and len(self.parameters[0])
):
self._embedded_scope_identity = True
self.statement += "; select scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
# Fetch the last inserted id from the manipulated statement
# We may have to skip over a number of result sets with
# no data (due to triggers, etc.)
while True:
try:
# fetchall() ensures the cursor is consumed
# without closing it (FreeTDS particularly)
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error:
# no way around this - nextset() consumes the previous set
# so we need to just keep flipping
self.cursor.nextset()
self._lastrowid = int(row[0])
else:
super(MSExecutionContext_pyodbc, self).post_exec()
class MSDialect_pyodbc(PyODBCConnector, MSDialect):
# mssql still has problems with this on Linux
supports_sane_rowcount_returning = False
execution_ctx_cls = MSExecutionContext_pyodbc
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric: _MSNumeric_pyodbc,
sqltypes.Float: _MSFloat_pyodbc,
BINARY: _BINARY_pyodbc,
DATETIMEOFFSET: _ODBCDateTimeOffset,
# SQL Server dialect has a VARBINARY that is just to support
# "deprecate_large_types" w/ VARBINARY(max), but also we must
# handle the usual SQL standard VARBINARY
VARBINARY: _VARBINARY_pyodbc,
sqltypes.VARBINARY: _VARBINARY_pyodbc,
sqltypes.LargeBinary: _VARBINARY_pyodbc,
},
)
def __init__(
self, description_encoding=None, fast_executemany=False, **params
):
if "description_encoding" in params:
self.description_encoding = params.pop("description_encoding")
super(MSDialect_pyodbc, self).__init__(**params)
self.use_scope_identity = (
self.use_scope_identity
and self.dbapi
and hasattr(self.dbapi.Cursor, "nextset")
)
self._need_decimal_fix = self.dbapi and self._dbapi_version() < (
2,
1,
8,
)
self.fast_executemany = fast_executemany
def _get_server_version_info(self, connection):
try:
# "Version of the instance of SQL Server, in the form
# of 'major.minor.build.revision'"
raw = connection.scalar(
"SELECT CAST(SERVERPROPERTY('ProductVersion') AS VARCHAR)"
)
except exc.DBAPIError:
# SQL Server docs indicate this function isn't present prior to
# 2008. Before we had the VARCHAR cast above, pyodbc would also
# fail on this query.
return super(MSDialect_pyodbc, self)._get_server_version_info(
connection, allow_chars=False
)
else:
version = []
r = re.compile(r"[.\-]")
for n in r.split(raw):
try:
version.append(int(n))
except ValueError:
pass
return tuple(version)
def on_connect(self):
super_ = super(MSDialect_pyodbc, self).on_connect()
def on_connect(conn):
if super_ is not None:
super_(conn)
self._setup_timestampoffset_type(conn)
return on_connect
def _setup_timestampoffset_type(self, connection):
# output converter function for datetimeoffset
def _handle_datetimeoffset(dto_value):
tup = struct.unpack("<6hI2h", dto_value)
return datetime.datetime(
tup[0],
tup[1],
tup[2],
tup[3],
tup[4],
tup[5],
tup[6] // 1000,
util.timezone(
datetime.timedelta(hours=tup[7], minutes=tup[8])
),
)
odbc_SQL_SS_TIMESTAMPOFFSET = -155 # as defined in SQLNCLI.h
connection.add_output_converter(
odbc_SQL_SS_TIMESTAMPOFFSET, _handle_datetimeoffset
)
def do_executemany(self, cursor, statement, parameters, context=None):
if self.fast_executemany:
cursor.fast_executemany = True
super(MSDialect_pyodbc, self).do_executemany(
cursor, statement, parameters, context=context
)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
code = e.args[0]
if code in (
"08S01",
"01002",
"08003",
"08007",
"08S02",
"08001",
"HYT00",
"HY010",
"10054",
):
return True
return super(MSDialect_pyodbc, self).is_disconnect(
e, connection, cursor
)
dialect = MSDialect_pyodbc
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/mssql/base.py
|
# mssql/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql
:name: Microsoft SQL Server
.. _mssql_identity:
Auto Increment Behavior / IDENTITY Columns
------------------------------------------
SQL Server provides so-called "auto incrementing" behavior using the
``IDENTITY`` construct, which can be placed on any single integer column in a
table. SQLAlchemy considers ``IDENTITY`` within its default "autoincrement"
behavior for an integer primary key column, described at
:paramref:`_schema.Column.autoincrement`. This means that by default,
the first
integer primary key column in a :class:`_schema.Table`
will be considered to be the
identity column and will generate DDL as such::
from sqlalchemy import Table, MetaData, Column, Integer
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
The above example will generate DDL as:
.. sourcecode:: sql
CREATE TABLE t (
id INTEGER NOT NULL IDENTITY(1,1),
x INTEGER NULL,
PRIMARY KEY (id)
)
For the case where this default generation of ``IDENTITY`` is not desired,
specify ``False`` for the :paramref:`_schema.Column.autoincrement` flag,
on the first integer primary key column::
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer))
m.create_all(engine)
To add the ``IDENTITY`` keyword to a non-primary key column, specify
``True`` for the :paramref:`_schema.Column.autoincrement` flag on the desired
:class:`_schema.Column` object, and ensure that
:paramref:`_schema.Column.autoincrement`
is set to ``False`` on any integer primary key column::
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer, autoincrement=True))
m.create_all(engine)
.. versionchanged:: 1.3 Added ``mssql_identity_start`` and
``mssql_identity_increment`` parameters to :class:`_schema.Column`.
These replace
the use of the :class:`.Sequence` object in order to specify these values.
.. deprecated:: 1.3
The use of :class:`.Sequence` to specify IDENTITY characteristics is
deprecated and will be removed in a future release. Please use
the ``mssql_identity_start`` and ``mssql_identity_increment`` parameters
documented at :ref:`mssql_identity`.
.. note::
There can only be one IDENTITY column on the table. When using
``autoincrement=True`` to enable the IDENTITY keyword, SQLAlchemy does not
guard against multiple columns specifying the option simultaneously. The
SQL Server database will instead reject the ``CREATE TABLE`` statement.
.. note::
An INSERT statement which attempts to provide a value for a column that is
marked with IDENTITY will be rejected by SQL Server. In order for the
value to be accepted, a session-level option "SET IDENTITY_INSERT" must be
enabled. The SQLAlchemy SQL Server dialect will perform this operation
automatically when using a core :class:`_expression.Insert`
construct; if the
execution specifies a value for the IDENTITY column, the "IDENTITY_INSERT"
option will be enabled for the span of that statement's invocation.However,
this scenario is not high performing and should not be relied upon for
normal use. If a table doesn't actually require IDENTITY behavior in its
integer primary key column, the keyword should be disabled when creating
the table by ensuring that ``autoincrement=False`` is set.
Controlling "Start" and "Increment"
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specific control over the "start" and "increment" values for
the ``IDENTITY`` generator are provided using the
``mssql_identity_start`` and ``mssql_identity_increment`` parameters
passed to the :class:`_schema.Column` object::
from sqlalchemy import Table, Integer, Column
test = Table(
'test', metadata,
Column(
'id', Integer, primary_key=True, mssql_identity_start=100,
mssql_identity_increment=10
),
Column('name', String(20))
)
The CREATE TABLE for the above :class:`_schema.Table` object would be:
.. sourcecode:: sql
CREATE TABLE test (
id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
name VARCHAR(20) NULL,
)
.. versionchanged:: 1.3 The ``mssql_identity_start`` and
``mssql_identity_increment`` parameters are now used to affect the
``IDENTITY`` generator for a :class:`_schema.Column` under SQL Server.
Previously, the :class:`.Sequence` object was used. As SQL Server now
supports real sequences as a separate construct, :class:`.Sequence` will be
functional in the normal way in a future SQLAlchemy version.
INSERT behavior
^^^^^^^^^^^^^^^^
Handling of the ``IDENTITY`` column at INSERT time involves two key
techniques. The most common is being able to fetch the "last inserted value"
for a given ``IDENTITY`` column, a process which SQLAlchemy performs
implicitly in many cases, most importantly within the ORM.
The process for fetching this value has several variants:
* In the vast majority of cases, RETURNING is used in conjunction with INSERT
statements on SQL Server in order to get newly generated primary key values:
.. sourcecode:: sql
INSERT INTO t (x) OUTPUT inserted.id VALUES (?)
* When RETURNING is not available or has been disabled via
``implicit_returning=False``, either the ``scope_identity()`` function or
the ``@@identity`` variable is used; behavior varies by backend:
* when using PyODBC, the phrase ``; select scope_identity()`` will be
appended to the end of the INSERT statement; a second result set will be
fetched in order to receive the value. Given a table as::
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer),
implicit_returning=False)
an INSERT will look like:
.. sourcecode:: sql
INSERT INTO t (x) VALUES (?); select scope_identity()
* Other dialects such as pymssql will call upon
``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT
statement. If the flag ``use_scope_identity=False`` is passed to
:func:`_sa.create_engine`,
the statement ``SELECT @@identity AS lastrowid``
is used instead.
A table that contains an ``IDENTITY`` column will prohibit an INSERT statement
that refers to the identity column explicitly. The SQLAlchemy dialect will
detect when an INSERT construct, created using a core
:func:`_expression.insert`
construct (not a plain string SQL), refers to the identity column, and
in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert
statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
execution. Given this example::
m = MetaData()
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
with engine.begin() as conn:
conn.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
The above column will be created with IDENTITY, however the INSERT statement
we emit is specifying explicit values. In the echo output we can see
how SQLAlchemy handles this:
.. sourcecode:: sql
CREATE TABLE t (
id INTEGER NOT NULL IDENTITY(1,1),
x INTEGER NULL,
PRIMARY KEY (id)
)
COMMIT
SET IDENTITY_INSERT t ON
INSERT INTO t (id, x) VALUES (?, ?)
((1, 1), (2, 2))
SET IDENTITY_INSERT t OFF
COMMIT
This
is an auxiliary use case suitable for testing and bulk insert scenarios.
MAX on VARCHAR / NVARCHAR
-------------------------
SQL Server supports the special string "MAX" within the
:class:`_types.VARCHAR` and :class:`_types.NVARCHAR` datatypes,
to indicate "maximum length possible". The dialect currently handles this as
a length of "None" in the base type, rather than supplying a
dialect-specific version of these types, so that a base type
specified such as ``VARCHAR(None)`` can assume "unlengthed" behavior on
more than one backend without using dialect-specific types.
To build a SQL Server VARCHAR or NVARCHAR with MAX length, use None::
my_table = Table(
'my_table', metadata,
Column('my_data', VARCHAR(None)),
Column('my_n_data', NVARCHAR(None))
)
Collation Support
-----------------
Character collations are supported by the base string types,
specified by the string argument "collation"::
from sqlalchemy import VARCHAR
Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
When such a column is associated with a :class:`_schema.Table`, the
CREATE TABLE statement for this column will yield::
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
LIMIT/OFFSET Support
--------------------
MSSQL has no support for the LIMIT or OFFSET keywords. LIMIT is
supported directly through the ``TOP`` Transact SQL keyword::
select.limit
will yield::
SELECT TOP n
If using SQL Server 2005 or above, LIMIT with OFFSET
support is available through the ``ROW_NUMBER OVER`` construct.
For versions below 2005, LIMIT with OFFSET usage will fail.
.. _mssql_isolation_level:
Transaction Isolation Level
---------------------------
All SQL Server dialects support setting of transaction isolation level
both via a dialect-specific parameter
:paramref:`_sa.create_engine.isolation_level`
accepted by :func:`_sa.create_engine`,
as well as the :paramref:`.Connection.execution_options.isolation_level`
argument as passed to
:meth:`_engine.Connection.execution_options`.
This feature works by issuing the
command ``SET TRANSACTION ISOLATION LEVEL <level>`` for
each new connection.
To set isolation level using :func:`_sa.create_engine`::
engine = create_engine(
"mssql+pyodbc://scott:tiger@ms_2008",
isolation_level="REPEATABLE READ"
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
Valid values for ``isolation_level`` include:
* ``AUTOCOMMIT`` - pyodbc / pymssql-specific
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``SNAPSHOT`` - specific to SQL Server
.. versionadded:: 1.1 support for isolation level setting on Microsoft
SQL Server.
.. versionadded:: 1.2 added AUTOCOMMIT isolation level setting
Nullability
-----------
MSSQL has support for three levels of column nullability. The default
nullability allows nulls and is explicit in the CREATE TABLE
construct::
name VARCHAR(20) NULL
If ``nullable=None`` is specified then no specification is made. In
other words the database's configured default is used. This will
render::
name VARCHAR(20)
If ``nullable`` is ``True`` or ``False`` then the column will be
``NULL`` or ``NOT NULL`` respectively.
Date / Time Handling
--------------------
DATE and TIME are supported. Bind parameters are converted
to datetime.datetime() objects as required by most MSSQL drivers,
and results are processed from strings if needed.
The DATE and TIME types are not available for MSSQL 2005 and
previous - if a server version below 2008 is detected, DDL
for these types will be issued as DATETIME.
.. _mssql_large_type_deprecation:
Large Text/Binary Type Deprecation
----------------------------------
Per
`SQL Server 2012/2014 Documentation <http://technet.microsoft.com/en-us/library/ms187993.aspx>`_,
the ``NTEXT``, ``TEXT`` and ``IMAGE`` datatypes are to be removed from SQL
Server in a future release. SQLAlchemy normally relates these types to the
:class:`.UnicodeText`, :class:`_expression.TextClause` and
:class:`.LargeBinary` datatypes.
In order to accommodate this change, a new flag ``deprecate_large_types``
is added to the dialect, which will be automatically set based on detection
of the server version in use, if not otherwise set by the user. The
behavior of this flag is as follows:
* When this flag is ``True``, the :class:`.UnicodeText`,
:class:`_expression.TextClause` and
:class:`.LargeBinary` datatypes, when used to render DDL, will render the
types ``NVARCHAR(max)``, ``VARCHAR(max)``, and ``VARBINARY(max)``,
respectively. This is a new behavior as of the addition of this flag.
* When this flag is ``False``, the :class:`.UnicodeText`,
:class:`_expression.TextClause` and
:class:`.LargeBinary` datatypes, when used to render DDL, will render the
types ``NTEXT``, ``TEXT``, and ``IMAGE``,
respectively. This is the long-standing behavior of these types.
* The flag begins with the value ``None``, before a database connection is
established. If the dialect is used to render DDL without the flag being
set, it is interpreted the same as ``False``.
* On first connection, the dialect detects if SQL Server version 2012 or
greater is in use; if the flag is still at ``None``, it sets it to ``True``
or ``False`` based on whether 2012 or greater is detected.
* The flag can be set to either ``True`` or ``False`` when the dialect
is created, typically via :func:`_sa.create_engine`::
eng = create_engine("mssql+pymssql://user:pass@host/db",
deprecate_large_types=True)
* Complete control over whether the "old" or "new" types are rendered is
available in all SQLAlchemy versions by using the UPPERCASE type objects
instead: :class:`_types.NVARCHAR`, :class:`_types.VARCHAR`,
:class:`_types.VARBINARY`, :class:`_types.TEXT`, :class:`_mssql.NTEXT`,
:class:`_mssql.IMAGE`
will always remain fixed and always output exactly that
type.
.. versionadded:: 1.0.0
.. _multipart_schema_names:
Multipart Schema Names
----------------------
SQL Server schemas sometimes require multiple parts to their "schema"
qualifier, that is, including the database name and owner name as separate
tokens, such as ``mydatabase.dbo.some_table``. These multipart names can be set
at once using the :paramref:`_schema.Table.schema` argument of
:class:`_schema.Table`::
Table(
"some_table", metadata,
Column("q", String(50)),
schema="mydatabase.dbo"
)
When performing operations such as table or component reflection, a schema
argument that contains a dot will be split into separate
"database" and "owner" components in order to correctly query the SQL
Server information schema tables, as these two values are stored separately.
Additionally, when rendering the schema name for DDL or SQL, the two
components will be quoted separately for case sensitive names and other
special characters. Given an argument as below::
Table(
"some_table", metadata,
Column("q", String(50)),
schema="MyDataBase.dbo"
)
The above schema would be rendered as ``[MyDataBase].dbo``, and also in
reflection, would be reflected using "dbo" as the owner and "MyDataBase"
as the database name.
To control how the schema name is broken into database / owner,
specify brackets (which in SQL Server are quoting characters) in the name.
Below, the "owner" will be considered as ``MyDataBase.dbo`` and the
"database" will be None::
Table(
"some_table", metadata,
Column("q", String(50)),
schema="[MyDataBase.dbo]"
)
To individually specify both database and owner name with special characters
or embedded dots, use two sets of brackets::
Table(
"some_table", metadata,
Column("q", String(50)),
schema="[MyDataBase.Period].[MyOwner.Dot]"
)
.. versionchanged:: 1.2 the SQL Server dialect now treats brackets as
identifier delimeters splitting the schema into separate database
and owner tokens, to allow dots within either name itself.
.. _legacy_schema_rendering:
Legacy Schema Mode
------------------
Very old versions of the MSSQL dialect introduced the behavior such that a
schema-qualified table would be auto-aliased when used in a
SELECT statement; given a table::
account_table = Table(
'account', metadata,
Column('id', Integer, primary_key=True),
Column('info', String(100)),
schema="customer_schema"
)
this legacy mode of rendering would assume that "customer_schema.account"
would not be accepted by all parts of the SQL statement, as illustrated
below::
>>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=True)
>>> print(account_table.select().compile(eng))
SELECT account_1.id, account_1.info
FROM customer_schema.account AS account_1
This mode of behavior is now off by default, as it appears to have served
no purpose; however in the case that legacy applications rely upon it,
it is available using the ``legacy_schema_aliasing`` argument to
:func:`_sa.create_engine` as illustrated above.
.. versionchanged:: 1.1 the ``legacy_schema_aliasing`` flag introduced
in version 1.0.5 to allow disabling of legacy mode for schemas now
defaults to False.
.. _mssql_indexes:
Clustered Index Support
-----------------------
The MSSQL dialect supports clustered indexes (and primary keys) via the
``mssql_clustered`` option. This option is available to :class:`.Index`,
:class:`.UniqueConstraint`. and :class:`.PrimaryKeyConstraint`.
To generate a clustered index::
Index("my_index", table.c.x, mssql_clustered=True)
which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
To generate a clustered primary key use::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
which will render the table, for example, as::
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY CLUSTERED (x, y))
Similarly, we can generate a clustered unique constraint using::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True),
)
To explicitly request a non-clustered primary key (for example, when
a separate clustered index is desired), use::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=False))
which will render the table, for example, as::
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY NONCLUSTERED (x, y))
.. versionchanged:: 1.1 the ``mssql_clustered`` option now defaults
to None, rather than False. ``mssql_clustered=False`` now explicitly
renders the NONCLUSTERED clause, whereas None omits the CLUSTERED
clause entirely, allowing SQL Server defaults to take effect.
MSSQL-Specific Index Options
-----------------------------
In addition to clustering, the MSSQL dialect supports other special options
for :class:`.Index`.
INCLUDE
^^^^^^^
The ``mssql_include`` option renders INCLUDE(colname) for the given string
names::
Index("my_index", table.c.x, mssql_include=['y'])
would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
.. _mssql_index_where:
Filtered Indexes
^^^^^^^^^^^^^^^^
The ``mssql_where`` option renders WHERE(condition) for the given string
names::
Index("my_index", table.c.x, mssql_where=table.c.x > 10)
would render the index as ``CREATE INDEX my_index ON table (x) WHERE x > 10``.
.. versionadded:: 1.3.4
Index ordering
^^^^^^^^^^^^^^
Index ordering is available via functional expressions, such as::
Index("my_index", table.c.x.desc())
would render the index as ``CREATE INDEX my_index ON table (x DESC)``
.. seealso::
:ref:`schema_indexes_functional`
Compatibility Levels
--------------------
MSSQL supports the notion of setting compatibility levels at the
database level. This allows, for instance, to run a database that
is compatible with SQL2000 while running on a SQL2005 database
server. ``server_version_info`` will always return the database
server version information (in this case SQL2005) and not the
compatibility level information. Because of this, if running under
a backwards compatibility mode SQLAlchemy may attempt to use T-SQL
statements that are unable to be parsed by the database server.
Triggers
--------
SQLAlchemy by default uses OUTPUT INSERTED to get at newly
generated primary key values via IDENTITY columns or other
server side defaults. MS-SQL does not
allow the usage of OUTPUT INSERTED on tables that have triggers.
To disable the usage of OUTPUT INSERTED on a per-table basis,
specify ``implicit_returning=False`` for each :class:`_schema.Table`
which has triggers::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
# ...,
implicit_returning=False
)
Declarative form::
class MyClass(Base):
# ...
__table_args__ = {'implicit_returning':False}
This option can also be specified engine-wide using the
``implicit_returning=False`` argument on :func:`_sa.create_engine`.
.. _mssql_rowcount_versioning:
Rowcount Support / ORM Versioning
---------------------------------
The SQL Server drivers may have limited ability to return the number
of rows updated from an UPDATE or DELETE statement.
As of this writing, the PyODBC driver is not able to return a rowcount when
OUTPUT INSERTED is used. This impacts the SQLAlchemy ORM's versioning feature
in many cases where server-side value generators are in use in that while the
versioning operations can succeed, the ORM cannot always check that an UPDATE
or DELETE statement matched the number of rows expected, which is how it
verifies that the version identifier matched. When this condition occurs, a
warning will be emitted but the operation will proceed.
The use of OUTPUT INSERTED can be disabled by setting the
:paramref:`_schema.Table.implicit_returning` flag to ``False`` on a particular
:class:`_schema.Table`, which in declarative looks like::
class MyTable(Base):
__tablename__ = 'mytable'
id = Column(Integer, primary_key=True)
stuff = Column(String(10))
timestamp = Column(TIMESTAMP(), default=text('DEFAULT'))
__mapper_args__ = {
'version_id_col': timestamp,
'version_id_generator': False,
}
__table_args__ = {
'implicit_returning': False
}
Enabling Snapshot Isolation
---------------------------
SQL Server has a default transaction
isolation mode that locks entire tables, and causes even mildly concurrent
applications to have long held locks and frequent deadlocks.
Enabling snapshot isolation for the database as a whole is recommended
for modern levels of concurrency support. This is accomplished via the
following ALTER DATABASE commands executed at the SQL prompt::
ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON
Background on SQL Server snapshot isolation is available at
http://msdn.microsoft.com/en-us/library/ms175095.aspx.
""" # noqa
import codecs
import datetime
import operator
import re
from . import information_schema as ischema
from ... import engine
from ... import exc
from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
from ...engine import default
from ...engine import reflection
from ...sql import compiler
from ...sql import expression
from ...sql import func
from ...sql import quoted_name
from ...sql import util as sql_util
from ...types import BIGINT
from ...types import BINARY
from ...types import CHAR
from ...types import DATE
from ...types import DATETIME
from ...types import DECIMAL
from ...types import FLOAT
from ...types import INTEGER
from ...types import NCHAR
from ...types import NUMERIC
from ...types import NVARCHAR
from ...types import SMALLINT
from ...types import TEXT
from ...types import VARCHAR
from ...util import update_wrapper
from ...util.langhelpers import public_factory
# http://sqlserverbuilds.blogspot.com/
MS_2016_VERSION = (13,)
MS_2014_VERSION = (12,)
MS_2012_VERSION = (11,)
MS_2008_VERSION = (10,)
MS_2005_VERSION = (9,)
MS_2000_VERSION = (8,)
RESERVED_WORDS = set(
[
"add",
"all",
"alter",
"and",
"any",
"as",
"asc",
"authorization",
"backup",
"begin",
"between",
"break",
"browse",
"bulk",
"by",
"cascade",
"case",
"check",
"checkpoint",
"close",
"clustered",
"coalesce",
"collate",
"column",
"commit",
"compute",
"constraint",
"contains",
"containstable",
"continue",
"convert",
"create",
"cross",
"current",
"current_date",
"current_time",
"current_timestamp",
"current_user",
"cursor",
"database",
"dbcc",
"deallocate",
"declare",
"default",
"delete",
"deny",
"desc",
"disk",
"distinct",
"distributed",
"double",
"drop",
"dump",
"else",
"end",
"errlvl",
"escape",
"except",
"exec",
"execute",
"exists",
"exit",
"external",
"fetch",
"file",
"fillfactor",
"for",
"foreign",
"freetext",
"freetexttable",
"from",
"full",
"function",
"goto",
"grant",
"group",
"having",
"holdlock",
"identity",
"identity_insert",
"identitycol",
"if",
"in",
"index",
"inner",
"insert",
"intersect",
"into",
"is",
"join",
"key",
"kill",
"left",
"like",
"lineno",
"load",
"merge",
"national",
"nocheck",
"nonclustered",
"not",
"null",
"nullif",
"of",
"off",
"offsets",
"on",
"open",
"opendatasource",
"openquery",
"openrowset",
"openxml",
"option",
"or",
"order",
"outer",
"over",
"percent",
"pivot",
"plan",
"precision",
"primary",
"print",
"proc",
"procedure",
"public",
"raiserror",
"read",
"readtext",
"reconfigure",
"references",
"replication",
"restore",
"restrict",
"return",
"revert",
"revoke",
"right",
"rollback",
"rowcount",
"rowguidcol",
"rule",
"save",
"schema",
"securityaudit",
"select",
"session_user",
"set",
"setuser",
"shutdown",
"some",
"statistics",
"system_user",
"table",
"tablesample",
"textsize",
"then",
"to",
"top",
"tran",
"transaction",
"trigger",
"truncate",
"tsequal",
"union",
"unique",
"unpivot",
"update",
"updatetext",
"use",
"user",
"values",
"varying",
"view",
"waitfor",
"when",
"where",
"while",
"with",
"writetext",
]
)
class REAL(sqltypes.REAL):
__visit_name__ = "REAL"
def __init__(self, **kw):
# REAL is a synonym for FLOAT(24) on SQL server.
# it is only accepted as the word "REAL" in DDL, the numeric
# precision value is not allowed to be present
kw.setdefault("precision", 24)
super(REAL, self).__init__(**kw)
class TINYINT(sqltypes.Integer):
__visit_name__ = "TINYINT"
# MSSQL DATE/TIME types have varied behavior, sometimes returning
# strings. MSDate/TIME check for everything, and always
# filter bind parameters into datetime objects (required by pyodbc,
# not sure about other dialects).
class _MSDate(sqltypes.Date):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
_reg = re.compile(r"(\d+)-(\d+)-(\d+)")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.date()
elif isinstance(value, util.string_types):
m = self._reg.match(value)
if not m:
raise ValueError(
"could not parse %r as a date value" % (value,)
)
return datetime.date(*[int(x or 0) for x in m.groups()])
else:
return value
return process
class TIME(sqltypes.TIME):
def __init__(self, precision=None, **kwargs):
self.precision = precision
super(TIME, self).__init__()
__zero_date = datetime.date(1900, 1, 1)
def bind_processor(self, dialect):
def process(value):
if isinstance(value, datetime.datetime):
value = datetime.datetime.combine(
self.__zero_date, value.time()
)
elif isinstance(value, datetime.time):
""" issue #5339
per: https://github.com/mkleehammer/pyodbc/wiki/Tips-and-Tricks-by-Database-Platform#time-columns
pass TIME value as string
""" # noqa
value = str(value)
return value
return process
_reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.time()
elif isinstance(value, util.string_types):
m = self._reg.match(value)
if not m:
raise ValueError(
"could not parse %r as a time value" % (value,)
)
return datetime.time(*[int(x or 0) for x in m.groups()])
else:
return value
return process
_MSTime = TIME
class _DateTimeBase(object):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
class _MSDateTime(_DateTimeBase, sqltypes.DateTime):
pass
class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = "SMALLDATETIME"
class DATETIME2(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = "DATETIME2"
def __init__(self, precision=None, **kw):
super(DATETIME2, self).__init__(**kw)
self.precision = precision
# TODO: is this not an Interval ?
class DATETIMEOFFSET(sqltypes.TypeEngine):
__visit_name__ = "DATETIMEOFFSET"
def __init__(self, precision=None, **kwargs):
self.precision = precision
class _UnicodeLiteral(object):
def literal_processor(self, dialect):
def process(value):
value = value.replace("'", "''")
if dialect.identifier_preparer._double_percents:
value = value.replace("%", "%%")
return "N'%s'" % value
return process
class _MSUnicode(_UnicodeLiteral, sqltypes.Unicode):
pass
class _MSUnicodeText(_UnicodeLiteral, sqltypes.UnicodeText):
pass
class TIMESTAMP(sqltypes._Binary):
"""Implement the SQL Server TIMESTAMP type.
Note this is **completely different** than the SQL Standard
TIMESTAMP type, which is not supported by SQL Server. It
is a read-only datatype that does not support INSERT of values.
.. versionadded:: 1.2
.. seealso::
:class:`_mssql.ROWVERSION`
"""
__visit_name__ = "TIMESTAMP"
# expected by _Binary to be present
length = None
def __init__(self, convert_int=False):
"""Construct a TIMESTAMP or ROWVERSION type.
:param convert_int: if True, binary integer values will
be converted to integers on read.
.. versionadded:: 1.2
"""
self.convert_int = convert_int
def result_processor(self, dialect, coltype):
super_ = super(TIMESTAMP, self).result_processor(dialect, coltype)
if self.convert_int:
def process(value):
value = super_(value)
if value is not None:
# https://stackoverflow.com/a/30403242/34549
value = int(codecs.encode(value, "hex"), 16)
return value
return process
else:
return super_
class ROWVERSION(TIMESTAMP):
"""Implement the SQL Server ROWVERSION type.
The ROWVERSION datatype is a SQL Server synonym for the TIMESTAMP
datatype, however current SQL Server documentation suggests using
ROWVERSION for new datatypes going forward.
The ROWVERSION datatype does **not** reflect (e.g. introspect) from the
database as itself; the returned datatype will be
:class:`_mssql.TIMESTAMP`.
This is a read-only datatype that does not support INSERT of values.
.. versionadded:: 1.2
.. seealso::
:class:`_mssql.TIMESTAMP`
"""
__visit_name__ = "ROWVERSION"
class NTEXT(sqltypes.UnicodeText):
"""MSSQL NTEXT type, for variable-length unicode text up to 2^30
characters."""
__visit_name__ = "NTEXT"
class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary):
"""The MSSQL VARBINARY type.
This type is present to support "deprecate_large_types" mode where
either ``VARBINARY(max)`` or IMAGE is rendered. Otherwise, this type
object is redundant vs. :class:`_types.VARBINARY`.
.. versionadded:: 1.0.0
.. seealso::
:ref:`mssql_large_type_deprecation`
"""
__visit_name__ = "VARBINARY"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = "IMAGE"
class XML(sqltypes.Text):
"""MSSQL XML type.
This is a placeholder type for reflection purposes that does not include
any Python-side datatype support. It also does not currently support
additional arguments, such as "CONTENT", "DOCUMENT",
"xml_schema_collection".
.. versionadded:: 1.1.11
"""
__visit_name__ = "XML"
class BIT(sqltypes.TypeEngine):
__visit_name__ = "BIT"
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class SQL_VARIANT(sqltypes.TypeEngine):
__visit_name__ = "SQL_VARIANT"
class TryCast(sql.elements.Cast):
"""Represent a SQL Server TRY_CAST expression.
"""
__visit_name__ = "try_cast"
def __init__(self, *arg, **kw):
"""Create a TRY_CAST expression.
:class:`.TryCast` is a subclass of SQLAlchemy's :class:`.Cast`
construct, and works in the same way, except that the SQL expression
rendered is "TRY_CAST" rather than "CAST"::
from sqlalchemy import select
from sqlalchemy import Numeric
from sqlalchemy.dialects.mssql import try_cast
stmt = select([
try_cast(product_table.c.unit_price, Numeric(10, 4))
])
The above would render::
SELECT TRY_CAST (product_table.unit_price AS NUMERIC(10, 4))
FROM product_table
.. versionadded:: 1.3.7
"""
super(TryCast, self).__init__(*arg, **kw)
try_cast = public_factory(TryCast, ".dialects.mssql.try_cast")
# old names.
MSDateTime = _MSDateTime
MSDate = _MSDate
MSReal = REAL
MSTinyInteger = TINYINT
MSTime = TIME
MSSmallDateTime = SMALLDATETIME
MSDateTime2 = DATETIME2
MSDateTimeOffset = DATETIMEOFFSET
MSText = TEXT
MSNText = NTEXT
MSString = VARCHAR
MSNVarchar = NVARCHAR
MSChar = CHAR
MSNChar = NCHAR
MSBinary = BINARY
MSVarBinary = VARBINARY
MSImage = IMAGE
MSBit = BIT
MSMoney = MONEY
MSSmallMoney = SMALLMONEY
MSUniqueIdentifier = UNIQUEIDENTIFIER
MSVariant = SQL_VARIANT
ischema_names = {
"int": INTEGER,
"bigint": BIGINT,
"smallint": SMALLINT,
"tinyint": TINYINT,
"varchar": VARCHAR,
"nvarchar": NVARCHAR,
"char": CHAR,
"nchar": NCHAR,
"text": TEXT,
"ntext": NTEXT,
"decimal": DECIMAL,
"numeric": NUMERIC,
"float": FLOAT,
"datetime": DATETIME,
"datetime2": DATETIME2,
"datetimeoffset": DATETIMEOFFSET,
"date": DATE,
"time": TIME,
"smalldatetime": SMALLDATETIME,
"binary": BINARY,
"varbinary": VARBINARY,
"bit": BIT,
"real": REAL,
"image": IMAGE,
"xml": XML,
"timestamp": TIMESTAMP,
"money": MONEY,
"smallmoney": SMALLMONEY,
"uniqueidentifier": UNIQUEIDENTIFIER,
"sql_variant": SQL_VARIANT,
}
class MSTypeCompiler(compiler.GenericTypeCompiler):
def _extend(self, spec, type_, length=None):
"""Extend a string-type declaration with standard SQL
COLLATE annotations.
"""
if getattr(type_, "collation", None):
collation = "COLLATE %s" % type_.collation
else:
collation = None
if not length:
length = type_.length
if length:
spec = spec + "(%s)" % length
return " ".join([c for c in (spec, collation) if c is not None])
def visit_FLOAT(self, type_, **kw):
precision = getattr(type_, "precision", None)
if precision is None:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {"precision": precision}
def visit_TINYINT(self, type_, **kw):
return "TINYINT"
def visit_DATETIMEOFFSET(self, type_, **kw):
if type_.precision is not None:
return "DATETIMEOFFSET(%s)" % type_.precision
else:
return "DATETIMEOFFSET"
def visit_TIME(self, type_, **kw):
precision = getattr(type_, "precision", None)
if precision is not None:
return "TIME(%s)" % precision
else:
return "TIME"
def visit_TIMESTAMP(self, type_, **kw):
return "TIMESTAMP"
def visit_ROWVERSION(self, type_, **kw):
return "ROWVERSION"
def visit_DATETIME2(self, type_, **kw):
precision = getattr(type_, "precision", None)
if precision is not None:
return "DATETIME2(%s)" % precision
else:
return "DATETIME2"
def visit_SMALLDATETIME(self, type_, **kw):
return "SMALLDATETIME"
def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_VARCHAR(type_, **kw)
else:
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_NVARCHAR(type_, **kw)
else:
return self.visit_NTEXT(type_, **kw)
def visit_NTEXT(self, type_, **kw):
return self._extend("NTEXT", type_)
def visit_TEXT(self, type_, **kw):
return self._extend("TEXT", type_)
def visit_VARCHAR(self, type_, **kw):
return self._extend("VARCHAR", type_, length=type_.length or "max")
def visit_CHAR(self, type_, **kw):
return self._extend("CHAR", type_)
def visit_NCHAR(self, type_, **kw):
return self._extend("NCHAR", type_)
def visit_NVARCHAR(self, type_, **kw):
return self._extend("NVARCHAR", type_, length=type_.length or "max")
def visit_date(self, type_, **kw):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_, **kw)
else:
return self.visit_DATE(type_, **kw)
def visit_time(self, type_, **kw):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_, **kw)
else:
return self.visit_TIME(type_, **kw)
def visit_large_binary(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_VARBINARY(type_, **kw)
else:
return self.visit_IMAGE(type_, **kw)
def visit_IMAGE(self, type_, **kw):
return "IMAGE"
def visit_XML(self, type_, **kw):
return "XML"
def visit_VARBINARY(self, type_, **kw):
return self._extend("VARBINARY", type_, length=type_.length or "max")
def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
def visit_BIT(self, type_, **kw):
return "BIT"
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_SMALLMONEY(self, type_, **kw):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
def visit_SQL_VARIANT(self, type_, **kw):
return "SQL_VARIANT"
class MSExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
_select_lastrowid = False
_result_proxy = None
_lastrowid = None
def _opt_encode(self, statement):
if not self.dialect.supports_unicode_statements:
return self.dialect._encoder(statement)[0]
else:
return statement
def pre_exec(self):
"""Activate IDENTITY_INSERT if needed."""
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = (
seq_column.key in self.compiled_parameters[0]
) or (
self.compiled.statement.parameters
and (
(
self.compiled.statement._has_multi_parameters
and (
seq_column.key
in self.compiled.statement.parameters[0]
or seq_column
in self.compiled.statement.parameters[0]
)
)
or (
not self.compiled.statement._has_multi_parameters
and (
seq_column.key
in self.compiled.statement.parameters
or seq_column
in self.compiled.statement.parameters
)
)
)
)
else:
self._enable_identity_insert = False
self._select_lastrowid = (
not self.compiled.inline
and insert_has_sequence
and not self.compiled.returning
and not self._enable_identity_insert
and not self.executemany
)
if self._enable_identity_insert:
self.root_connection._cursor_execute(
self.cursor,
self._opt_encode(
"SET IDENTITY_INSERT %s ON"
% self.dialect.identifier_preparer.format_table(tbl)
),
(),
self,
)
def post_exec(self):
"""Disable IDENTITY_INSERT if enabled."""
conn = self.root_connection
if self._select_lastrowid:
if self.dialect.use_scope_identity:
conn._cursor_execute(
self.cursor,
"SELECT scope_identity() AS lastrowid",
(),
self,
)
else:
conn._cursor_execute(
self.cursor, "SELECT @@identity AS lastrowid", (), self
)
# fetchall() ensures the cursor is consumed without closing it
row = self.cursor.fetchall()[0]
self._lastrowid = int(row[0])
if (
self.isinsert or self.isupdate or self.isdelete
) and self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
conn._cursor_execute(
self.cursor,
self._opt_encode(
"SET IDENTITY_INSERT %s OFF"
% self.dialect.identifier_preparer.format_table(
self.compiled.statement.table
)
),
(),
self,
)
def get_lastrowid(self):
return self._lastrowid
def handle_dbapi_exception(self, e):
if self._enable_identity_insert:
try:
self.cursor.execute(
self._opt_encode(
"SET IDENTITY_INSERT %s OFF"
% self.dialect.identifier_preparer.format_table(
self.compiled.statement.table
)
)
)
except Exception:
pass
def get_result_proxy(self):
if self._result_proxy:
return self._result_proxy
else:
return engine.ResultProxy(self)
class MSSQLCompiler(compiler.SQLCompiler):
returning_precedes_values = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
"doy": "dayofyear",
"dow": "weekday",
"milliseconds": "millisecond",
"microseconds": "microsecond",
},
)
def __init__(self, *args, **kwargs):
self.tablealiases = {}
super(MSSQLCompiler, self).__init__(*args, **kwargs)
def _with_legacy_schema_aliasing(fn):
def decorate(self, *arg, **kw):
if self.dialect.legacy_schema_aliasing:
return fn(self, *arg, **kw)
else:
super_ = getattr(super(MSSQLCompiler, self), fn.__name__)
return super_(*arg, **kw)
return decorate
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_current_date_func(self, fn, **kw):
return "GETDATE()"
def visit_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_char_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_concat_op_binary(self, binary, operator, **kw):
return "%s + %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_true(self, expr, **kw):
return "1"
def visit_false(self, expr, **kw):
return "0"
def visit_match_op_binary(self, binary, operator, **kw):
return "CONTAINS (%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def get_select_precolumns(self, select, **kw):
""" MS-SQL puts TOP, it's version of LIMIT here """
s = ""
if select._distinct:
s += "DISTINCT "
if select._simple_int_limit and (
select._offset_clause is None
or (select._simple_int_offset and select._offset == 0)
):
# ODBC drivers and possibly others
# don't support bind params in the SELECT clause on SQL Server.
# so have to use literal here.
s += "TOP %d " % select._limit
if s:
return s
else:
return compiler.SQLCompiler.get_select_precolumns(
self, select, **kw
)
def get_from_hint_text(self, table, text):
return text
def get_crud_hint_text(self, table, text):
return text
def limit_clause(self, select, **kw):
# Limit in mssql is after the select keyword
return ""
def visit_try_cast(self, element, **kw):
return "TRY_CAST (%s AS %s)" % (
self.process(element.clause, **kw),
self.process(element.typeclause, **kw),
)
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``row_number()`` criterion.
"""
if (
(not select._simple_int_limit and select._limit_clause is not None)
or (
select._offset_clause is not None
and not select._simple_int_offset
or select._offset
)
) and not getattr(select, "_mssql_visit", None):
# to use ROW_NUMBER(), an ORDER BY is required.
if not select._order_by_clause.clauses:
raise exc.CompileError(
"MSSQL requires an order_by when "
"using an OFFSET or a non-simple "
"LIMIT clause"
)
_order_by_clauses = [
sql_util.unwrap_label_reference(elem)
for elem in select._order_by_clause.clauses
]
limit_clause = select._limit_clause
offset_clause = select._offset_clause
kwargs["select_wraps_for"] = select
select = select._generate()
select._mssql_visit = True
select = (
select.column(
sql.func.ROW_NUMBER()
.over(order_by=_order_by_clauses)
.label("mssql_rn")
)
.order_by(None)
.alias()
)
mssql_rn = sql.column("mssql_rn")
limitselect = sql.select(
[c for c in select.c if c.key != "mssql_rn"]
)
if offset_clause is not None:
limitselect.append_whereclause(mssql_rn > offset_clause)
if limit_clause is not None:
limitselect.append_whereclause(
mssql_rn <= (limit_clause + offset_clause)
)
else:
limitselect.append_whereclause(mssql_rn <= (limit_clause))
return self.process(limitselect, **kwargs)
else:
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
@_with_legacy_schema_aliasing
def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs):
if mssql_aliased is table or iscrud:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
# alias schema-qualified tables
alias = self._schema_aliased_table(table)
if alias is not None:
return self.process(alias, mssql_aliased=table, **kwargs)
else:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
@_with_legacy_schema_aliasing
def visit_alias(self, alias, **kw):
# translate for schema-qualified table aliases
kw["mssql_aliased"] = alias.original
return super(MSSQLCompiler, self).visit_alias(alias, **kw)
@_with_legacy_schema_aliasing
def visit_column(self, column, add_to_result_map=None, **kw):
if (
column.table is not None
and (not self.isupdate and not self.isdelete)
or self.is_subquery()
):
# translate for schema-qualified table aliases
t = self._schema_aliased_table(column.table)
if t is not None:
converted = expression._corresponding_column_or_error(
t, column
)
if add_to_result_map is not None:
add_to_result_map(
column.name,
column.name,
(column, column.name, column.key),
column.type,
)
return super(MSSQLCompiler, self).visit_column(converted, **kw)
return super(MSSQLCompiler, self).visit_column(
column, add_to_result_map=add_to_result_map, **kw
)
def _schema_aliased_table(self, table):
if getattr(table, "schema", None) is not None:
if table not in self.tablealiases:
self.tablealiases[table] = table.alias()
return self.tablealiases[table]
else:
return None
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return "DATEPART(%s, %s)" % (field, self.process(extract.expr, **kw))
def visit_savepoint(self, savepoint_stmt):
return "SAVE TRANSACTION %s" % self.preparer.format_savepoint(
savepoint_stmt
)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TRANSACTION %s" % self.preparer.format_savepoint(
savepoint_stmt
)
def visit_binary(self, binary, **kwargs):
"""Move bind parameters to the right-hand side of an operator, where
possible.
"""
if (
isinstance(binary.left, expression.BindParameter)
and binary.operator == operator.eq
and not isinstance(binary.right, expression.BindParameter)
):
return self.process(
expression.BinaryExpression(
binary.right, binary.left, binary.operator
),
**kwargs
)
return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
def returning_clause(self, stmt, returning_cols):
if self.isinsert or self.isupdate:
target = stmt.table.alias("inserted")
else:
target = stmt.table.alias("deleted")
adapter = sql_util.ClauseAdapter(target)
columns = [
self._label_select_column(
None, adapter.traverse(c), True, False, {}
)
for c in expression._select_iterables(returning_cols)
]
return "OUTPUT " + ", ".join(columns)
def get_cte_preamble(self, recursive):
# SQL Server finds it too inconvenient to accept
# an entirely optional, SQL standard specified,
# "RECURSIVE" word with their "WITH",
# so here we go
return "WITH"
def label_select_column(self, select, column, asfrom):
if isinstance(column, expression.Function):
return column.label(None)
else:
return super(MSSQLCompiler, self).label_select_column(
select, column, asfrom
)
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
# SQLAlchemy doesn't use
return ""
def order_by_clause(self, select, **kw):
# MSSQL only allows ORDER BY in subqueries if there is a LIMIT
if self.is_subquery() and not select._limit:
# avoid processing the order by clause if we won't end up
# using it, because we don't want all the bind params tacked
# onto the positional list if that is what the dbapi requires
return ""
order_by = self.process(select._order_by_clause, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the UPDATE..FROM clause specific to MSSQL.
In MSSQL, if the UPDATE statement involves an alias of the table to
be updated, then the table itself must be added to the FROM list as
well. Otherwise, it is optional. Here, we add it regardless.
"""
return "FROM " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in [from_table] + extra_froms
)
def delete_table_clause(self, delete_stmt, from_table, extra_froms):
"""If we have extra froms make sure we render any alias as hint."""
ashint = False
if extra_froms:
ashint = True
return from_table._compiler_dispatch(
self, asfrom=True, iscrud=True, ashint=ashint
)
def delete_extra_from_clause(
self, delete_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the DELETE .. FROM clause specific to MSSQL.
Yes, it has the FROM keyword twice.
"""
return "FROM " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in [from_table] + extra_froms
)
def visit_empty_set_expr(self, type_):
return "SELECT 1 WHERE 1!=1"
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "NOT EXISTS (SELECT %s INTERSECT SELECT %s)" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_isnot_distinct_from_binary(self, binary, operator, **kw):
return "EXISTS (SELECT %s INTERSECT SELECT %s)" % (
self.process(binary.left),
self.process(binary.right),
)
class MSSQLStrictCompiler(MSSQLCompiler):
"""A subclass of MSSQLCompiler which disables the usage of bind
parameters where not allowed natively by MS-SQL.
A dialect may use this compiler on a platform where native
binds are used.
"""
ansi_bind_rules = True
def visit_in_op_binary(self, binary, operator, **kw):
kw["literal_binds"] = True
return "%s IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_notin_op_binary(self, binary, operator, **kw):
kw["literal_binds"] = True
return "%s NOT IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def render_literal_value(self, value, type_):
"""
For date and datetime values, convert to a string
format acceptable to MSSQL. That seems to be the
so-called ODBC canonical date format which looks
like this:
yyyy-mm-dd hh:mi:ss.mmm(24h)
For other data types, call the base class implementation.
"""
# datetime and date are both subclasses of datetime.date
if issubclass(type(value), datetime.date):
# SQL Server wants single quotes around the date string.
return "'" + str(value) + "'"
else:
return super(MSSQLStrictCompiler, self).render_literal_value(
value, type_
)
class MSDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
# type is not accepted in a computed column
if column.computed is not None:
colspec += " " + self.process(column.computed)
else:
colspec += " " + self.dialect.type_compiler.process(
column.type, type_expression=column
)
if column.nullable is not None:
if (
not column.nullable
or column.primary_key
or isinstance(column.default, sa_schema.Sequence)
or column.autoincrement is True
):
colspec += " NOT NULL"
elif column.computed is None:
# don't specify "NULL" for computed columns
colspec += " NULL"
if column.table is None:
raise exc.CompileError(
"mssql requires Table-bound columns "
"in order to generate DDL"
)
# install an IDENTITY Sequence if we either a sequence or an implicit
# IDENTITY column
if isinstance(column.default, sa_schema.Sequence):
if (
column.default.start is not None
or column.default.increment is not None
or column is not column.table._autoincrement_column
):
util.warn_deprecated(
"Use of Sequence with SQL Server in order to affect the "
"parameters of the IDENTITY value is deprecated, as "
"Sequence "
"will correspond to an actual SQL Server "
"CREATE SEQUENCE in "
"a future release. Please use the mssql_identity_start "
"and mssql_identity_increment parameters."
)
if column.default.start == 0:
start = 0
else:
start = column.default.start or 1
colspec += " IDENTITY(%s,%s)" % (
start,
column.default.increment or 1,
)
elif (
column is column.table._autoincrement_column
or column.autoincrement is True
):
start = column.dialect_options["mssql"]["identity_start"]
increment = column.dialect_options["mssql"]["identity_increment"]
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_create_index(self, create, include_schema=False):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
# handle clustering option
clustered = index.dialect_options["mssql"]["clustered"]
if clustered is not None:
if clustered:
text += "CLUSTERED "
else:
text += "NONCLUSTERED "
text += "INDEX %s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=include_schema),
preparer.format_table(index.table),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
whereclause = index.dialect_options["mssql"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
whereclause, include_table=False, literal_binds=True
)
text += " WHERE " + where_compiled
# handle other included columns
if index.dialect_options["mssql"]["include"]:
inclusions = [
index.table.c[col]
if isinstance(col, util.string_types)
else col
for col in index.dialect_options["mssql"]["include"]
]
text += " INCLUDE (%s)" % ", ".join(
[preparer.quote(c.name) for c in inclusions]
)
return text
def visit_drop_index(self, drop):
return "\nDROP INDEX %s ON %s" % (
self._prepared_index_name(drop.element, include_schema=False),
self.preparer.format_table(drop.element.table),
)
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % self.preparer.format_constraint(
constraint
)
text += "PRIMARY KEY "
clustered = constraint.dialect_options["mssql"]["clustered"]
if clustered is not None:
if clustered:
text += "CLUSTERED "
else:
text += "NONCLUSTERED "
text += "(%s)" % ", ".join(
self.preparer.quote(c.name) for c in constraint
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE "
clustered = constraint.dialect_options["mssql"]["clustered"]
if clustered is not None:
if clustered:
text += "CLUSTERED "
else:
text += "NONCLUSTERED "
text += "(%s)" % ", ".join(
self.preparer.quote(c.name) for c in constraint
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_computed_column(self, generated):
text = "AS (%s)" % self.sql_compiler.process(
generated.sqltext, include_table=False, literal_binds=True
)
# explicitly check for True|False since None means server default
if generated.persisted is True:
text += " PERSISTED"
return text
class MSIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect):
super(MSIdentifierPreparer, self).__init__(
dialect,
initial_quote="[",
final_quote="]",
quote_case_sensitive_collations=False,
)
def _escape_identifier(self, value):
return value
def quote_schema(self, schema, force=None):
"""Prepare a quoted table and schema name."""
# need to re-implement the deprecation warning entirely
if force is not None:
# not using the util.deprecated_params() decorator in this
# case because of the additional function call overhead on this
# very performance-critical spot.
util.warn_deprecated(
"The IdentifierPreparer.quote_schema.force parameter is "
"deprecated and will be removed in a future release. This "
"flag has no effect on the behavior of the "
"IdentifierPreparer.quote method; please refer to "
"quoted_name()."
)
dbname, owner = _schema_elements(schema)
if dbname:
result = "%s.%s" % (self.quote(dbname), self.quote(owner))
elif owner:
result = self.quote(owner)
else:
result = ""
return result
def _db_plus_owner_listing(fn):
def wrap(dialect, connection, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(
dbname,
connection,
fn,
dialect,
connection,
dbname,
owner,
schema,
**kw
)
return update_wrapper(wrap, fn)
def _db_plus_owner(fn):
def wrap(dialect, connection, tablename, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(
dbname,
connection,
fn,
dialect,
connection,
tablename,
dbname,
owner,
schema,
**kw
)
return update_wrapper(wrap, fn)
def _switch_db(dbname, connection, fn, *arg, **kw):
if dbname:
current_db = connection.scalar("select db_name()")
if current_db != dbname:
connection.execute(
"use %s" % connection.dialect.identifier_preparer.quote(dbname)
)
try:
return fn(*arg, **kw)
finally:
if dbname and current_db != dbname:
connection.execute(
"use %s"
% connection.dialect.identifier_preparer.quote(current_db)
)
def _owner_plus_db(dialect, schema):
if not schema:
return None, dialect.default_schema_name
elif "." in schema:
return _schema_elements(schema)
else:
return None, schema
_memoized_schema = util.LRUCache()
def _schema_elements(schema):
if isinstance(schema, quoted_name) and schema.quote:
return None, schema
if schema in _memoized_schema:
return _memoized_schema[schema]
# tests for this function are in:
# test/dialect/mssql/test_reflection.py ->
# OwnerPlusDBTest.test_owner_database_pairs
# test/dialect/mssql/test_compiler.py -> test_force_schema_*
# test/dialect/mssql/test_compiler.py -> test_schema_many_tokens_*
#
push = []
symbol = ""
bracket = False
has_brackets = False
for token in re.split(r"(\[|\]|\.)", schema):
if not token:
continue
if token == "[":
bracket = True
has_brackets = True
elif token == "]":
bracket = False
elif not bracket and token == ".":
if has_brackets:
push.append("[%s]" % symbol)
else:
push.append(symbol)
symbol = ""
has_brackets = False
else:
symbol += token
if symbol:
push.append(symbol)
if len(push) > 1:
dbname, owner = ".".join(push[0:-1]), push[-1]
# test for internal brackets
if re.match(r".*\].*\[.*", dbname[1:-1]):
dbname = quoted_name(dbname, quote=False)
else:
dbname = dbname.lstrip("[").rstrip("]")
elif len(push):
dbname, owner = None, push[0]
else:
dbname, owner = None, None
_memoized_schema[schema] = dbname, owner
return dbname, owner
class MSDialect(default.DefaultDialect):
name = "mssql"
supports_default_values = True
supports_empty_insert = False
execution_ctx_cls = MSExecutionContext
use_scope_identity = True
max_identifier_length = 128
schema_name = "dbo"
colspecs = {
sqltypes.DateTime: _MSDateTime,
sqltypes.Date: _MSDate,
sqltypes.Time: TIME,
sqltypes.Unicode: _MSUnicode,
sqltypes.UnicodeText: _MSUnicodeText,
}
engine_config_types = default.DefaultDialect.engine_config_types.union(
[("legacy_schema_aliasing", util.asbool)]
)
ischema_names = ischema_names
supports_native_boolean = False
non_native_boolean_check_constraint = False
supports_unicode_binds = True
postfetch_lastrowid = True
_supports_nvarchar_max = False
server_version_info = ()
statement_compiler = MSSQLCompiler
ddl_compiler = MSDDLCompiler
type_compiler = MSTypeCompiler
preparer = MSIdentifierPreparer
construct_arguments = [
(sa_schema.PrimaryKeyConstraint, {"clustered": None}),
(sa_schema.UniqueConstraint, {"clustered": None}),
(sa_schema.Index, {"clustered": None, "include": None, "where": None}),
(sa_schema.Column, {"identity_start": 1, "identity_increment": 1}),
]
def __init__(
self,
query_timeout=None,
use_scope_identity=True,
schema_name="dbo",
isolation_level=None,
deprecate_large_types=None,
legacy_schema_aliasing=False,
**opts
):
self.query_timeout = int(query_timeout or 0)
self.schema_name = schema_name
self.use_scope_identity = use_scope_identity
self.deprecate_large_types = deprecate_large_types
self.legacy_schema_aliasing = legacy_schema_aliasing
super(MSDialect, self).__init__(**opts)
self.isolation_level = isolation_level
def do_savepoint(self, connection, name):
# give the DBAPI a push
connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
super(MSDialect, self).do_savepoint(connection, name)
def do_release_savepoint(self, connection, name):
# SQL Server does not support RELEASE SAVEPOINT
pass
_isolation_lookup = set(
[
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
"SNAPSHOT",
]
)
def set_isolation_level(self, connection, level):
level = level.replace("_", " ")
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("SET TRANSACTION ISOLATION LEVEL %s" % level)
cursor.close()
if level == "SNAPSHOT":
connection.commit()
def get_isolation_level(self, connection):
if self.server_version_info < MS_2005_VERSION:
raise NotImplementedError(
"Can't fetch isolation level prior to SQL Server 2005"
)
last_error = None
views = ("sys.dm_exec_sessions", "sys.dm_pdw_nodes_exec_sessions")
for view in views:
cursor = connection.cursor()
try:
cursor.execute(
"""
SELECT CASE transaction_isolation_level
WHEN 0 THEN NULL
WHEN 1 THEN 'READ UNCOMMITTED'
WHEN 2 THEN 'READ COMMITTED'
WHEN 3 THEN 'REPEATABLE READ'
WHEN 4 THEN 'SERIALIZABLE'
WHEN 5 THEN 'SNAPSHOT' END AS TRANSACTION_ISOLATION_LEVEL
FROM %s
where session_id = @@SPID
"""
% view
)
val = cursor.fetchone()[0]
except self.dbapi.Error as err:
# Python3 scoping rules
last_error = err
continue
else:
return val.upper()
finally:
cursor.close()
else:
# note that the NotImplementedError is caught by
# DefaultDialect, so the warning here is all that displays
util.warn(
"Could not fetch transaction isolation level, "
"tried views: %s; final error was: %s" % (views, last_error)
)
raise NotImplementedError(
"Can't fetch isolation level on this particular "
"SQL Server version. tried views: %s; final error was: %s"
% (views, last_error)
)
def initialize(self, connection):
super(MSDialect, self).initialize(connection)
self._setup_version_attributes()
self._setup_supports_nvarchar_max(connection)
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
def _setup_version_attributes(self):
if self.server_version_info[0] not in list(range(8, 17)):
util.warn(
"Unrecognized server version info '%s'. Some SQL Server "
"features may not function properly."
% ".".join(str(x) for x in self.server_version_info)
)
if (
self.server_version_info >= MS_2005_VERSION
and "implicit_returning" not in self.__dict__
):
self.implicit_returning = True
if self.server_version_info >= MS_2008_VERSION:
self.supports_multivalues_insert = True
if self.deprecate_large_types is None:
self.deprecate_large_types = (
self.server_version_info >= MS_2012_VERSION
)
def _setup_supports_nvarchar_max(self, connection):
try:
connection.scalar(
sql.text("SELECT CAST('test max support' AS NVARCHAR(max))")
)
except exc.DBAPIError:
self._supports_nvarchar_max = False
else:
self._supports_nvarchar_max = True
def _get_default_schema_name(self, connection):
if self.server_version_info < MS_2005_VERSION:
return self.schema_name
else:
query = sql.text("SELECT schema_name()")
default_schema_name = connection.scalar(query)
if default_schema_name is not None:
# guard against the case where the default_schema_name is being
# fed back into a table reflection function.
return quoted_name(default_schema_name, quote=True)
else:
return self.schema_name
@_db_plus_owner
def has_table(self, connection, tablename, dbname, owner, schema):
columns = ischema.columns
whereclause = columns.c.table_name == tablename
if owner:
whereclause = sql.and_(
whereclause, columns.c.table_schema == owner
)
s = sql.select([columns], whereclause)
c = connection.execute(s)
return c.first() is not None
@reflection.cache
def get_schema_names(self, connection, **kw):
s = sql.select(
[ischema.schemata.c.schema_name],
order_by=[ischema.schemata.c.schema_name],
)
schema_names = [r[0] for r in connection.execute(s)]
return schema_names
@reflection.cache
@_db_plus_owner_listing
def get_table_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select(
[tables.c.table_name],
sql.and_(
tables.c.table_schema == owner,
tables.c.table_type == "BASE TABLE",
),
order_by=[tables.c.table_name],
)
table_names = [r[0] for r in connection.execute(s)]
return table_names
@reflection.cache
@_db_plus_owner_listing
def get_view_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select(
[tables.c.table_name],
sql.and_(
tables.c.table_schema == owner, tables.c.table_type == "VIEW"
),
order_by=[tables.c.table_name],
)
view_names = [r[0] for r in connection.execute(s)]
return view_names
@reflection.cache
@_db_plus_owner
def get_indexes(self, connection, tablename, dbname, owner, schema, **kw):
# using system catalogs, don't support index reflection
# below MS 2005
if self.server_version_info < MS_2005_VERSION:
return []
rp = connection.execute(
sql.text(
"select ind.index_id, ind.is_unique, ind.name "
"from sys.indexes as ind join sys.tables as tab on "
"ind.object_id=tab.object_id "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name = :tabname "
"and sch.name=:schname "
"and ind.is_primary_key=0 and ind.type != 0"
)
.bindparams(
sql.bindparam("tabname", tablename, ischema.CoerceUnicode()),
sql.bindparam("schname", owner, ischema.CoerceUnicode()),
)
.columns(name=sqltypes.Unicode())
)
indexes = {}
for row in rp:
indexes[row["index_id"]] = {
"name": row["name"],
"unique": row["is_unique"] == 1,
"column_names": [],
}
rp = connection.execute(
sql.text(
"select ind_col.index_id, ind_col.object_id, col.name "
"from sys.columns as col "
"join sys.tables as tab on tab.object_id=col.object_id "
"join sys.index_columns as ind_col on "
"(ind_col.column_id=col.column_id and "
"ind_col.object_id=tab.object_id) "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name=:tabname "
"and sch.name=:schname"
)
.bindparams(
sql.bindparam("tabname", tablename, ischema.CoerceUnicode()),
sql.bindparam("schname", owner, ischema.CoerceUnicode()),
)
.columns(name=sqltypes.Unicode())
)
for row in rp:
if row["index_id"] in indexes:
indexes[row["index_id"]]["column_names"].append(row["name"])
return list(indexes.values())
@reflection.cache
@_db_plus_owner
def get_view_definition(
self, connection, viewname, dbname, owner, schema, **kw
):
rp = connection.execute(
sql.text(
"select definition from sys.sql_modules as mod, "
"sys.views as views, "
"sys.schemas as sch"
" where "
"mod.object_id=views.object_id and "
"views.schema_id=sch.schema_id and "
"views.name=:viewname and sch.name=:schname"
).bindparams(
sql.bindparam("viewname", viewname, ischema.CoerceUnicode()),
sql.bindparam("schname", owner, ischema.CoerceUnicode()),
)
)
if rp:
view_def = rp.scalar()
return view_def
@reflection.cache
@_db_plus_owner
def get_columns(self, connection, tablename, dbname, owner, schema, **kw):
# Get base columns
columns = ischema.columns
computed_cols = ischema.computed_columns
if owner:
whereclause = sql.and_(
columns.c.table_name == tablename,
columns.c.table_schema == owner,
)
table_fullname = "%s.%s" % (owner, tablename)
full_name = columns.c.table_schema + "." + columns.c.table_name
join_on = computed_cols.c.object_id == func.object_id(full_name)
else:
whereclause = columns.c.table_name == tablename
table_fullname = tablename
join_on = computed_cols.c.object_id == func.object_id(
columns.c.table_name
)
join_on = sql.and_(
join_on, columns.c.column_name == computed_cols.c.name
)
join = columns.join(computed_cols, onclause=join_on, isouter=True)
if self._supports_nvarchar_max:
computed_definition = computed_cols.c.definition
else:
# tds_version 4.2 does not support NVARCHAR(MAX)
computed_definition = sql.cast(
computed_cols.c.definition, NVARCHAR(4000)
)
s = sql.select(
[columns, computed_definition, computed_cols.c.is_persisted],
whereclause,
from_obj=join,
order_by=[columns.c.ordinal_position],
)
c = connection.execute(s)
cols = []
while True:
row = c.fetchone()
if row is None:
break
name = row[columns.c.column_name]
type_ = row[columns.c.data_type]
nullable = row[columns.c.is_nullable] == "YES"
charlen = row[columns.c.character_maximum_length]
numericprec = row[columns.c.numeric_precision]
numericscale = row[columns.c.numeric_scale]
default = row[columns.c.column_default]
collation = row[columns.c.collation_name]
definition = row[computed_definition]
is_persisted = row[computed_cols.c.is_persisted]
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (
MSString,
MSChar,
MSNVarchar,
MSNChar,
MSText,
MSNText,
MSBinary,
MSVarBinary,
sqltypes.LargeBinary,
):
if charlen == -1:
charlen = None
kwargs["length"] = charlen
if collation:
kwargs["collation"] = collation
if coltype is None:
util.warn(
"Did not recognize type '%s' of column '%s'"
% (type_, name)
)
coltype = sqltypes.NULLTYPE
else:
if issubclass(coltype, sqltypes.Numeric):
kwargs["precision"] = numericprec
if not issubclass(coltype, sqltypes.Float):
kwargs["scale"] = numericscale
coltype = coltype(**kwargs)
cdict = {
"name": name,
"type": coltype,
"nullable": nullable,
"default": default,
"autoincrement": False,
}
if definition is not None and is_persisted is not None:
cdict["computed"] = {
"sqltext": definition,
"persisted": is_persisted,
}
cols.append(cdict)
# autoincrement and identity
colmap = {}
for col in cols:
colmap[col["name"]] = col
# We also run an sp_columns to check for identity columns:
cursor = connection.execute(
"sp_columns @table_name = '%s', "
"@table_owner = '%s'" % (tablename, owner)
)
ic = None
while True:
row = cursor.fetchone()
if row is None:
break
(col_name, type_name) = row[3], row[5]
if type_name.endswith("identity") and col_name in colmap:
ic = col_name
colmap[col_name]["autoincrement"] = True
colmap[col_name]["dialect_options"] = {
"mssql_identity_start": 1,
"mssql_identity_increment": 1,
}
break
cursor.close()
if ic is not None and self.server_version_info >= MS_2005_VERSION:
table_fullname = "%s.%s" % (owner, tablename)
cursor = connection.execute(
"select ident_seed('%s'), ident_incr('%s')"
% (table_fullname, table_fullname)
)
row = cursor.first()
if row is not None and row[0] is not None:
colmap[ic]["dialect_options"].update(
{
"mssql_identity_start": int(row[0]),
"mssql_identity_increment": int(row[1]),
}
)
return cols
@reflection.cache
@_db_plus_owner
def get_pk_constraint(
self, connection, tablename, dbname, owner, schema, **kw
):
pkeys = []
TC = ischema.constraints
C = ischema.key_constraints.alias("C")
# Primary key constraints
s = sql.select(
[C.c.column_name, TC.c.constraint_type, C.c.constraint_name],
sql.and_(
TC.c.constraint_name == C.c.constraint_name,
TC.c.table_schema == C.c.table_schema,
C.c.table_name == tablename,
C.c.table_schema == owner,
),
)
c = connection.execute(s)
constraint_name = None
for row in c:
if "PRIMARY" in row[TC.c.constraint_type.name]:
pkeys.append(row[0])
if constraint_name is None:
constraint_name = row[C.c.constraint_name.name]
return {"constrained_columns": pkeys, "name": constraint_name}
@reflection.cache
@_db_plus_owner
def get_foreign_keys(
self, connection, tablename, dbname, owner, schema, **kw
):
RR = ischema.ref_constraints
C = ischema.key_constraints.alias("C")
R = ischema.key_constraints.alias("R")
# Foreign key constraints
s = sql.select(
[
C.c.column_name,
R.c.table_schema,
R.c.table_name,
R.c.column_name,
RR.c.constraint_name,
RR.c.match_option,
RR.c.update_rule,
RR.c.delete_rule,
],
sql.and_(
C.c.table_name == tablename,
C.c.table_schema == owner,
RR.c.constraint_schema == C.c.table_schema,
C.c.constraint_name == RR.c.constraint_name,
R.c.constraint_name == RR.c.unique_constraint_name,
R.c.constraint_schema == RR.c.unique_constraint_schema,
C.c.ordinal_position == R.c.ordinal_position,
),
order_by=[RR.c.constraint_name, R.c.ordinal_position],
)
# group rows by constraint ID, to handle multi-column FKs
fkeys = []
def fkey_rec():
return {
"name": None,
"constrained_columns": [],
"referred_schema": None,
"referred_table": None,
"referred_columns": [],
}
fkeys = util.defaultdict(fkey_rec)
for r in connection.execute(s).fetchall():
scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
rec = fkeys[rfknm]
rec["name"] = rfknm
if not rec["referred_table"]:
rec["referred_table"] = rtbl
if schema is not None or owner != rschema:
if dbname:
rschema = dbname + "." + rschema
rec["referred_schema"] = rschema
local_cols, remote_cols = (
rec["constrained_columns"],
rec["referred_columns"],
)
local_cols.append(scol)
remote_cols.append(rcol)
return list(fkeys.values())
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/firebird/__init__.py
|
# firebird/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.dialects.firebird.base import BIGINT
from sqlalchemy.dialects.firebird.base import BLOB
from sqlalchemy.dialects.firebird.base import CHAR
from sqlalchemy.dialects.firebird.base import DATE
from sqlalchemy.dialects.firebird.base import FLOAT
from sqlalchemy.dialects.firebird.base import NUMERIC
from sqlalchemy.dialects.firebird.base import SMALLINT
from sqlalchemy.dialects.firebird.base import TEXT
from sqlalchemy.dialects.firebird.base import TIME
from sqlalchemy.dialects.firebird.base import TIMESTAMP
from sqlalchemy.dialects.firebird.base import VARCHAR
from . import base # noqa
from . import fdb # noqa
from . import kinterbasdb # noqa
base.dialect = dialect = fdb.dialect
__all__ = (
"SMALLINT",
"BIGINT",
"FLOAT",
"FLOAT",
"DATE",
"TIME",
"TEXT",
"NUMERIC",
"FLOAT",
"TIMESTAMP",
"VARCHAR",
"CHAR",
"BLOB",
"dialect",
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/firebird/fdb.py
|
# firebird/fdb.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: firebird+fdb
:name: fdb
:dbapi: pyodbc
:connectstring: firebird+fdb://user:password@host:port/path/to/db[?key=value&key=value...]
:url: http://pypi.python.org/pypi/fdb/
fdb is a kinterbasdb compatible DBAPI for Firebird.
.. versionchanged:: 0.9 - The fdb dialect is now the default dialect
under the ``firebird://`` URL space, as ``fdb`` is now the official
Python driver for Firebird.
Arguments
----------
The ``fdb`` dialect is based on the
:mod:`sqlalchemy.dialects.firebird.kinterbasdb` dialect, however does not
accept every argument that Kinterbasdb does.
* ``enable_rowcount`` - True by default, setting this to False disables
the usage of "cursor.rowcount" with the
Kinterbasdb dialect, which SQLAlchemy ordinarily calls upon automatically
after any UPDATE or DELETE statement. When disabled, SQLAlchemy's
ResultProxy will return -1 for result.rowcount. The rationale here is
that Kinterbasdb requires a second round trip to the database when
.rowcount is called - since SQLA's resultproxy automatically closes
the cursor after a non-result-returning statement, rowcount must be
called, if at all, before the result object is returned. Additionally,
cursor.rowcount may not return correct results with older versions
of Firebird, and setting this flag to False will also cause the
SQLAlchemy ORM to ignore its usage. The behavior can also be controlled on a
per-execution basis using the ``enable_rowcount`` option with
:meth:`_engine.Connection.execution_options`::
conn = engine.connect().execution_options(enable_rowcount=True)
r = conn.execute(stmt)
print(r.rowcount)
* ``retaining`` - False by default. Setting this to True will pass the
``retaining=True`` keyword argument to the ``.commit()`` and ``.rollback()``
methods of the DBAPI connection, which can improve performance in some
situations, but apparently with significant caveats.
Please read the fdb and/or kinterbasdb DBAPI documentation in order to
understand the implications of this flag.
.. versionchanged:: 0.9.0 - the ``retaining`` flag defaults to ``False``.
In 0.8 it defaulted to ``True``.
.. seealso::
http://pythonhosted.org/fdb/usage-guide.html#retaining-transactions
- information on the "retaining" flag.
""" # noqa
from .kinterbasdb import FBDialect_kinterbasdb
from ... import util
class FBDialect_fdb(FBDialect_kinterbasdb):
def __init__(self, enable_rowcount=True, retaining=False, **kwargs):
super(FBDialect_fdb, self).__init__(
enable_rowcount=enable_rowcount, retaining=retaining, **kwargs
)
@classmethod
def dbapi(cls):
return __import__("fdb")
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if opts.get("port"):
opts["host"] = "%s/%s" % (opts["host"], opts["port"])
del opts["port"]
opts.update(url.query)
util.coerce_kw_type(opts, "type_conv", int)
return ([], opts)
def _get_server_version_info(self, connection):
"""Get the version of the Firebird server used by a connection.
Returns a tuple of (`major`, `minor`, `build`), three integers
representing the version of the attached server.
"""
# This is the simpler approach (the other uses the services api),
# that for backward compatibility reasons returns a string like
# LI-V6.3.3.12981 Firebird 2.0
# where the first version is a fake one resembling the old
# Interbase signature.
isc_info_firebird_version = 103
fbconn = connection.connection
version = fbconn.db_info(isc_info_firebird_version)
return self._parse_version_info(version)
dialect = FBDialect_fdb
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/firebird/kinterbasdb.py
|
# firebird/kinterbasdb.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: firebird+kinterbasdb
:name: kinterbasdb
:dbapi: kinterbasdb
:connectstring: firebird+kinterbasdb://user:password@host:port/path/to/db[?key=value&key=value...]
:url: http://firebirdsql.org/index.php?op=devel&sub=python
Arguments
----------
The Kinterbasdb backend accepts the ``enable_rowcount`` and ``retaining``
arguments accepted by the :mod:`sqlalchemy.dialects.firebird.fdb` dialect.
In addition, it also accepts the following:
* ``type_conv`` - select the kind of mapping done on the types: by default
SQLAlchemy uses 200 with Unicode, datetime and decimal support. See
the linked documents below for further information.
* ``concurrency_level`` - set the backend policy with regards to threading
issues: by default SQLAlchemy uses policy 1. See the linked documents
below for further information.
.. seealso::
http://sourceforge.net/projects/kinterbasdb
http://kinterbasdb.sourceforge.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation
http://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurrency
""" # noqa
import decimal
from re import match
from .base import FBDialect
from .base import FBExecutionContext
from ... import types as sqltypes
from ... import util
class _kinterbasdb_numeric(object):
def bind_processor(self, dialect):
def process(value):
if isinstance(value, decimal.Decimal):
return str(value)
else:
return value
return process
class _FBNumeric_kinterbasdb(_kinterbasdb_numeric, sqltypes.Numeric):
pass
class _FBFloat_kinterbasdb(_kinterbasdb_numeric, sqltypes.Float):
pass
class FBExecutionContext_kinterbasdb(FBExecutionContext):
@property
def rowcount(self):
if self.execution_options.get(
"enable_rowcount", self.dialect.enable_rowcount
):
return self.cursor.rowcount
else:
return -1
class FBDialect_kinterbasdb(FBDialect):
driver = "kinterbasdb"
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
execution_ctx_cls = FBExecutionContext_kinterbasdb
supports_native_decimal = True
colspecs = util.update_copy(
FBDialect.colspecs,
{
sqltypes.Numeric: _FBNumeric_kinterbasdb,
sqltypes.Float: _FBFloat_kinterbasdb,
},
)
def __init__(
self,
type_conv=200,
concurrency_level=1,
enable_rowcount=True,
retaining=False,
**kwargs
):
super(FBDialect_kinterbasdb, self).__init__(**kwargs)
self.enable_rowcount = enable_rowcount
self.type_conv = type_conv
self.concurrency_level = concurrency_level
self.retaining = retaining
if enable_rowcount:
self.supports_sane_rowcount = True
@classmethod
def dbapi(cls):
return __import__("kinterbasdb")
def do_execute(self, cursor, statement, parameters, context=None):
# kinterbase does not accept a None, but wants an empty list
# when there are no arguments.
cursor.execute(statement, parameters or [])
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback(self.retaining)
def do_commit(self, dbapi_connection):
dbapi_connection.commit(self.retaining)
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if opts.get("port"):
opts["host"] = "%s/%s" % (opts["host"], opts["port"])
del opts["port"]
opts.update(url.query)
util.coerce_kw_type(opts, "type_conv", int)
type_conv = opts.pop("type_conv", self.type_conv)
concurrency_level = opts.pop(
"concurrency_level", self.concurrency_level
)
if self.dbapi is not None:
initialized = getattr(self.dbapi, "initialized", None)
if initialized is None:
# CVS rev 1.96 changed the name of the attribute:
# http://kinterbasdb.cvs.sourceforge.net/viewvc/kinterbasdb/
# Kinterbasdb-3.0/__init__.py?r1=1.95&r2=1.96
initialized = getattr(self.dbapi, "_initialized", False)
if not initialized:
self.dbapi.init(
type_conv=type_conv, concurrency_level=concurrency_level
)
return ([], opts)
def _get_server_version_info(self, connection):
"""Get the version of the Firebird server used by a connection.
Returns a tuple of (`major`, `minor`, `build`), three integers
representing the version of the attached server.
"""
# This is the simpler approach (the other uses the services api),
# that for backward compatibility reasons returns a string like
# LI-V6.3.3.12981 Firebird 2.0
# where the first version is a fake one resembling the old
# Interbase signature.
fbconn = connection.connection
version = fbconn.server_version
return self._parse_version_info(version)
def _parse_version_info(self, version):
m = match(
r"\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?", version
)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % version
)
if m.group(5) != None:
return tuple([int(x) for x in m.group(6, 7, 4)] + ["firebird"])
else:
return tuple([int(x) for x in m.group(1, 2, 3)] + ["interbase"])
def is_disconnect(self, e, connection, cursor):
if isinstance(
e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError)
):
msg = str(e)
return (
"Error writing data to the connection" in msg
or "Unable to complete network request to host" in msg
or "Invalid connection state" in msg
or "Invalid cursor state" in msg
or "connection shutdown" in msg
)
else:
return False
dialect = FBDialect_kinterbasdb
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/firebird/base.py
|
# firebird/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: firebird
:name: Firebird
.. note::
The Firebird dialect within SQLAlchemy **is not currently supported**.
It is not tested within continuous integration and is likely to have
many issues and caveats not currently handled. Consider using the
`external dialect <https://github.com/pauldex/sqlalchemy-firebird>`_
instead.
Firebird Dialects
-----------------
Firebird offers two distinct dialects_ (not to be confused with a
SQLAlchemy ``Dialect``):
dialect 1
This is the old syntax and behaviour, inherited from Interbase pre-6.0.
dialect 3
This is the newer and supported syntax, introduced in Interbase 6.0.
The SQLAlchemy Firebird dialect detects these versions and
adjusts its representation of SQL accordingly. However,
support for dialect 1 is not well tested and probably has
incompatibilities.
Locking Behavior
----------------
Firebird locks tables aggressively. For this reason, a DROP TABLE may
hang until other transactions are released. SQLAlchemy does its best
to release transactions as quickly as possible. The most common cause
of hanging transactions is a non-fully consumed result set, i.e.::
result = engine.execute("select * from table")
row = result.fetchone()
return
Where above, the ``ResultProxy`` has not been fully consumed. The
connection will be returned to the pool and the transactional state
rolled back once the Python garbage collector reclaims the objects
which hold onto the connection, which often occurs asynchronously.
The above use case can be alleviated by calling ``first()`` on the
``ResultProxy`` which will fetch the first row and immediately close
all remaining cursor/connection resources.
RETURNING support
-----------------
Firebird 2.0 supports returning a result set from inserts, and 2.1
extends that to deletes and updates. This is generically exposed by
the SQLAlchemy ``returning()`` method, such as::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\
values(name='foo')
print(result.fetchall())
# UPDATE..RETURNING
raises = empl.update().returning(empl.c.id, empl.c.salary).\
where(empl.c.sales>100).\
values(dict(salary=empl.c.salary * 1.1))
print(raises.fetchall())
.. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html
"""
import datetime
from sqlalchemy import exc
from sqlalchemy import sql
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.engine import default
from sqlalchemy.engine import reflection
from sqlalchemy.sql import compiler
from sqlalchemy.sql import expression
from sqlalchemy.types import BIGINT
from sqlalchemy.types import BLOB
from sqlalchemy.types import DATE
from sqlalchemy.types import FLOAT
from sqlalchemy.types import INTEGER
from sqlalchemy.types import Integer
from sqlalchemy.types import NUMERIC
from sqlalchemy.types import SMALLINT
from sqlalchemy.types import TEXT
from sqlalchemy.types import TIME
from sqlalchemy.types import TIMESTAMP
RESERVED_WORDS = set(
[
"active",
"add",
"admin",
"after",
"all",
"alter",
"and",
"any",
"as",
"asc",
"ascending",
"at",
"auto",
"avg",
"before",
"begin",
"between",
"bigint",
"bit_length",
"blob",
"both",
"by",
"case",
"cast",
"char",
"character",
"character_length",
"char_length",
"check",
"close",
"collate",
"column",
"commit",
"committed",
"computed",
"conditional",
"connect",
"constraint",
"containing",
"count",
"create",
"cross",
"cstring",
"current",
"current_connection",
"current_date",
"current_role",
"current_time",
"current_timestamp",
"current_transaction",
"current_user",
"cursor",
"database",
"date",
"day",
"dec",
"decimal",
"declare",
"default",
"delete",
"desc",
"descending",
"disconnect",
"distinct",
"do",
"domain",
"double",
"drop",
"else",
"end",
"entry_point",
"escape",
"exception",
"execute",
"exists",
"exit",
"external",
"extract",
"fetch",
"file",
"filter",
"float",
"for",
"foreign",
"from",
"full",
"function",
"gdscode",
"generator",
"gen_id",
"global",
"grant",
"group",
"having",
"hour",
"if",
"in",
"inactive",
"index",
"inner",
"input_type",
"insensitive",
"insert",
"int",
"integer",
"into",
"is",
"isolation",
"join",
"key",
"leading",
"left",
"length",
"level",
"like",
"long",
"lower",
"manual",
"max",
"maximum_segment",
"merge",
"min",
"minute",
"module_name",
"month",
"names",
"national",
"natural",
"nchar",
"no",
"not",
"null",
"numeric",
"octet_length",
"of",
"on",
"only",
"open",
"option",
"or",
"order",
"outer",
"output_type",
"overflow",
"page",
"pages",
"page_size",
"parameter",
"password",
"plan",
"position",
"post_event",
"precision",
"primary",
"privileges",
"procedure",
"protected",
"rdb$db_key",
"read",
"real",
"record_version",
"recreate",
"recursive",
"references",
"release",
"reserv",
"reserving",
"retain",
"returning_values",
"returns",
"revoke",
"right",
"rollback",
"rows",
"row_count",
"savepoint",
"schema",
"second",
"segment",
"select",
"sensitive",
"set",
"shadow",
"shared",
"singular",
"size",
"smallint",
"snapshot",
"some",
"sort",
"sqlcode",
"stability",
"start",
"starting",
"starts",
"statistics",
"sub_type",
"sum",
"suspend",
"table",
"then",
"time",
"timestamp",
"to",
"trailing",
"transaction",
"trigger",
"trim",
"uncommitted",
"union",
"unique",
"update",
"upper",
"user",
"using",
"value",
"values",
"varchar",
"variable",
"varying",
"view",
"wait",
"when",
"where",
"while",
"with",
"work",
"write",
"year",
]
)
class _StringType(sqltypes.String):
"""Base for Firebird string types."""
def __init__(self, charset=None, **kw):
self.charset = charset
super(_StringType, self).__init__(**kw)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""Firebird VARCHAR type"""
__visit_name__ = "VARCHAR"
def __init__(self, length=None, **kwargs):
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""Firebird CHAR type"""
__visit_name__ = "CHAR"
def __init__(self, length=None, **kwargs):
super(CHAR, self).__init__(length=length, **kwargs)
class _FBDateTime(sqltypes.DateTime):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
colspecs = {sqltypes.DateTime: _FBDateTime}
ischema_names = {
"SHORT": SMALLINT,
"LONG": INTEGER,
"QUAD": FLOAT,
"FLOAT": FLOAT,
"DATE": DATE,
"TIME": TIME,
"TEXT": TEXT,
"INT64": BIGINT,
"DOUBLE": FLOAT,
"TIMESTAMP": TIMESTAMP,
"VARYING": VARCHAR,
"CSTRING": CHAR,
"BLOB": BLOB,
}
# TODO: date conversion types (should be implemented as _FBDateTime,
# _FBDate, etc. as bind/result functionality is required)
class FBTypeCompiler(compiler.GenericTypeCompiler):
def visit_boolean(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_TIMESTAMP(type_, **kw)
def visit_TEXT(self, type_, **kw):
return "BLOB SUB_TYPE 1"
def visit_BLOB(self, type_, **kw):
return "BLOB SUB_TYPE 0"
def _extend_string(self, type_, basic):
charset = getattr(type_, "charset", None)
if charset is None:
return basic
else:
return "%s CHARACTER SET %s" % (basic, charset)
def visit_CHAR(self, type_, **kw):
basic = super(FBTypeCompiler, self).visit_CHAR(type_, **kw)
return self._extend_string(type_, basic)
def visit_VARCHAR(self, type_, **kw):
if not type_.length:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" % self.dialect.name
)
basic = super(FBTypeCompiler, self).visit_VARCHAR(type_, **kw)
return self._extend_string(type_, basic)
class FBCompiler(sql.compiler.SQLCompiler):
"""Firebird specific idiosyncrasies"""
ansi_bind_rules = True
# def visit_contains_op_binary(self, binary, operator, **kw):
# cant use CONTAINING b.c. it's case insensitive.
# def visit_notcontains_op_binary(self, binary, operator, **kw):
# cant use NOT CONTAINING b.c. it's case insensitive.
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_startswith_op_binary(self, binary, operator, **kw):
return "%s STARTING WITH %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
)
def visit_notstartswith_op_binary(self, binary, operator, **kw):
return "%s NOT STARTING WITH %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
)
def visit_mod_binary(self, binary, operator, **kw):
return "mod(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_alias(self, alias, asfrom=False, **kwargs):
if self.dialect._version_two:
return super(FBCompiler, self).visit_alias(
alias, asfrom=asfrom, **kwargs
)
else:
# Override to not use the AS keyword which FB 1.5 does not like
if asfrom:
alias_name = (
isinstance(alias.name, expression._truncated_label)
and self._truncated_identifier("alias", alias.name)
or alias.name
)
return (
self.process(alias.original, asfrom=asfrom, **kwargs)
+ " "
+ self.preparer.format_alias(alias, alias_name)
)
else:
return self.process(alias.original, **kwargs)
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0])
start = self.process(func.clauses.clauses[1])
if len(func.clauses.clauses) > 2:
length = self.process(func.clauses.clauses[2])
return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
else:
return "SUBSTRING(%s FROM %s)" % (s, start)
def visit_length_func(self, function, **kw):
if self.dialect._version_two:
return "char_length" + self.function_argspec(function)
else:
return "strlen" + self.function_argspec(function)
visit_char_length_func = visit_length_func
def function_argspec(self, func, **kw):
# TODO: this probably will need to be
# narrowed to a fixed list, some no-arg functions
# may require parens - see similar example in the oracle
# dialect
if func.clauses is not None and len(func.clauses):
return self.process(func.clause_expr, **kw)
else:
return ""
def default_from(self):
return " FROM rdb$database"
def visit_sequence(self, seq, **kw):
return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list Firebird puts the limit and offset right
after the ``SELECT``...
"""
result = ""
if select._limit_clause is not None:
result += "FIRST %s " % self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
result += "SKIP %s " % self.process(select._offset_clause, **kw)
if select._distinct:
result += "DISTINCT "
return result
def limit_clause(self, select, **kw):
"""Already taken care of in the `get_select_precolumns` method."""
return ""
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in expression._select_iterables(returning_cols)
]
return "RETURNING " + ", ".join(columns)
class FBDDLCompiler(sql.compiler.DDLCompiler):
"""Firebird syntactic idiosyncrasies"""
def visit_create_sequence(self, create):
"""Generate a ``CREATE GENERATOR`` statement for the sequence."""
# no syntax for these
# http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html
if create.element.start is not None:
raise NotImplementedError(
"Firebird SEQUENCE doesn't support START WITH"
)
if create.element.increment is not None:
raise NotImplementedError(
"Firebird SEQUENCE doesn't support INCREMENT BY"
)
if self.dialect._version_two:
return "CREATE SEQUENCE %s" % self.preparer.format_sequence(
create.element
)
else:
return "CREATE GENERATOR %s" % self.preparer.format_sequence(
create.element
)
def visit_drop_sequence(self, drop):
"""Generate a ``DROP GENERATOR`` statement for the sequence."""
if self.dialect._version_two:
return "DROP SEQUENCE %s" % self.preparer.format_sequence(
drop.element
)
else:
return "DROP GENERATOR %s" % self.preparer.format_sequence(
drop.element
)
def visit_computed_column(self, generated):
if generated.persisted is not None:
raise exc.CompileError(
"Firebird computed columns do not support a persistence "
"method setting; set the 'persisted' flag to None for "
"Firebird support."
)
return "GENERATED ALWAYS AS (%s)" % self.sql_compiler.process(
generated.sqltext, include_table=False, literal_binds=True
)
class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
"""Install Firebird specific reserved words."""
reserved_words = RESERVED_WORDS
illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union(
["_"]
)
def __init__(self, dialect):
super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)
class FBExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
"""Get the next value from the sequence using ``gen_id()``."""
return self._execute_scalar(
"SELECT gen_id(%s, 1) FROM rdb$database"
% self.dialect.identifier_preparer.format_sequence(seq),
type_,
)
class FBDialect(default.DefaultDialect):
"""Firebird dialect"""
name = "firebird"
max_identifier_length = 31
supports_sequences = True
sequences_optional = False
supports_default_values = True
postfetch_lastrowid = False
supports_native_boolean = False
requires_name_normalize = True
supports_empty_insert = False
statement_compiler = FBCompiler
ddl_compiler = FBDDLCompiler
preparer = FBIdentifierPreparer
type_compiler = FBTypeCompiler
execution_ctx_cls = FBExecutionContext
colspecs = colspecs
ischema_names = ischema_names
construct_arguments = []
# defaults to dialect ver. 3,
# will be autodetected off upon
# first connect
_version_two = True
def initialize(self, connection):
super(FBDialect, self).initialize(connection)
self._version_two = (
"firebird" in self.server_version_info
and self.server_version_info >= (2,)
) or (
"interbase" in self.server_version_info
and self.server_version_info >= (6,)
)
if not self._version_two:
# TODO: whatever other pre < 2.0 stuff goes here
self.ischema_names = ischema_names.copy()
self.ischema_names["TIMESTAMP"] = sqltypes.DATE
self.colspecs = {sqltypes.DateTime: sqltypes.DATE}
self.implicit_returning = self._version_two and self.__dict__.get(
"implicit_returning", True
)
def has_table(self, connection, table_name, schema=None):
"""Return ``True`` if the given table exists, ignoring
the `schema`."""
tblqry = """
SELECT 1 AS has_table FROM rdb$database
WHERE EXISTS (SELECT rdb$relation_name
FROM rdb$relations
WHERE rdb$relation_name=?)
"""
c = connection.execute(tblqry, [self.denormalize_name(table_name)])
return c.first() is not None
def has_sequence(self, connection, sequence_name, schema=None):
"""Return ``True`` if the given sequence (generator) exists."""
genqry = """
SELECT 1 AS has_sequence FROM rdb$database
WHERE EXISTS (SELECT rdb$generator_name
FROM rdb$generators
WHERE rdb$generator_name=?)
"""
c = connection.execute(genqry, [self.denormalize_name(sequence_name)])
return c.first() is not None
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
# there are two queries commonly mentioned for this.
# this one, using view_blr, is at the Firebird FAQ among other places:
# http://www.firebirdfaq.org/faq174/
s = """
select rdb$relation_name
from rdb$relations
where rdb$view_blr is null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
# the other query is this one. It's not clear if there's really
# any difference between these two. This link:
# http://www.alberton.info/firebird_sql_meta_info.html#.Ur3vXfZGni8
# states them as interchangeable. Some discussion at [ticket:2898]
# SELECT DISTINCT rdb$relation_name
# FROM rdb$relation_fields
# WHERE rdb$system_flag=0 AND rdb$view_context IS NULL
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
# see http://www.firebirdfaq.org/faq174/
s = """
select rdb$relation_name
from rdb$relations
where rdb$view_blr is not null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
qry = """
SELECT rdb$view_source AS view_source
FROM rdb$relations
WHERE rdb$relation_name=?
"""
rp = connection.execute(qry, [self.denormalize_name(view_name)])
row = rp.first()
if row:
return row["view_source"]
else:
return None
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
# Query to extract the PK/FK constrained fields of the given table
keyqry = """
SELECT se.rdb$field_name AS fname
FROM rdb$relation_constraints rc
JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
"""
tablename = self.denormalize_name(table_name)
# get primary key fields
c = connection.execute(keyqry, ["PRIMARY KEY", tablename])
pkfields = [self.normalize_name(r["fname"]) for r in c.fetchall()]
return {"constrained_columns": pkfields, "name": None}
@reflection.cache
def get_column_sequence(
self, connection, table_name, column_name, schema=None, **kw
):
tablename = self.denormalize_name(table_name)
colname = self.denormalize_name(column_name)
# Heuristic-query to determine the generator associated to a PK field
genqry = """
SELECT trigdep.rdb$depended_on_name AS fgenerator
FROM rdb$dependencies tabdep
JOIN rdb$dependencies trigdep
ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name
AND trigdep.rdb$depended_on_type=14
AND trigdep.rdb$dependent_type=2
JOIN rdb$triggers trig ON
trig.rdb$trigger_name=tabdep.rdb$dependent_name
WHERE tabdep.rdb$depended_on_name=?
AND tabdep.rdb$depended_on_type=0
AND trig.rdb$trigger_type=1
AND tabdep.rdb$field_name=?
AND (SELECT count(*)
FROM rdb$dependencies trigdep2
WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2
"""
genr = connection.execute(genqry, [tablename, colname]).first()
if genr is not None:
return dict(name=self.normalize_name(genr["fgenerator"]))
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
# Query to extract the details of all the fields of the given table
tblqry = """
SELECT r.rdb$field_name AS fname,
r.rdb$null_flag AS null_flag,
t.rdb$type_name AS ftype,
f.rdb$field_sub_type AS stype,
f.rdb$field_length/
COALESCE(cs.rdb$bytes_per_character,1) AS flen,
f.rdb$field_precision AS fprec,
f.rdb$field_scale AS fscale,
COALESCE(r.rdb$default_source,
f.rdb$default_source) AS fdefault
FROM rdb$relation_fields r
JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
JOIN rdb$types t
ON t.rdb$type=f.rdb$field_type AND
t.rdb$field_name='RDB$FIELD_TYPE'
LEFT JOIN rdb$character_sets cs ON
f.rdb$character_set_id=cs.rdb$character_set_id
WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
ORDER BY r.rdb$field_position
"""
# get the PK, used to determine the eventual associated sequence
pk_constraint = self.get_pk_constraint(connection, table_name)
pkey_cols = pk_constraint["constrained_columns"]
tablename = self.denormalize_name(table_name)
# get all of the fields for this table
c = connection.execute(tblqry, [tablename])
cols = []
while True:
row = c.fetchone()
if row is None:
break
name = self.normalize_name(row["fname"])
orig_colname = row["fname"]
# get the data type
colspec = row["ftype"].rstrip()
coltype = self.ischema_names.get(colspec)
if coltype is None:
util.warn(
"Did not recognize type '%s' of column '%s'"
% (colspec, name)
)
coltype = sqltypes.NULLTYPE
elif issubclass(coltype, Integer) and row["fprec"] != 0:
coltype = NUMERIC(
precision=row["fprec"], scale=row["fscale"] * -1
)
elif colspec in ("VARYING", "CSTRING"):
coltype = coltype(row["flen"])
elif colspec == "TEXT":
coltype = TEXT(row["flen"])
elif colspec == "BLOB":
if row["stype"] == 1:
coltype = TEXT()
else:
coltype = BLOB()
else:
coltype = coltype()
# does it have a default value?
defvalue = None
if row["fdefault"] is not None:
# the value comes down as "DEFAULT 'value'": there may be
# more than one whitespace around the "DEFAULT" keyword
# and it may also be lower case
# (see also http://tracker.firebirdsql.org/browse/CORE-356)
defexpr = row["fdefault"].lstrip()
assert defexpr[:8].rstrip().upper() == "DEFAULT", (
"Unrecognized default value: %s" % defexpr
)
defvalue = defexpr[8:].strip()
if defvalue == "NULL":
# Redundant
defvalue = None
col_d = {
"name": name,
"type": coltype,
"nullable": not bool(row["null_flag"]),
"default": defvalue,
"autoincrement": "auto",
}
if orig_colname.lower() == orig_colname:
col_d["quote"] = True
# if the PK is a single field, try to see if its linked to
# a sequence thru a trigger
if len(pkey_cols) == 1 and name == pkey_cols[0]:
seq_d = self.get_column_sequence(connection, tablename, name)
if seq_d is not None:
col_d["sequence"] = seq_d
cols.append(col_d)
return cols
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# Query to extract the details of each UK/FK of the given table
fkqry = """
SELECT rc.rdb$constraint_name AS cname,
cse.rdb$field_name AS fname,
ix2.rdb$relation_name AS targetrname,
se.rdb$field_name AS targetfname
FROM rdb$relation_constraints rc
JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
JOIN rdb$index_segments cse ON
cse.rdb$index_name=ix1.rdb$index_name
JOIN rdb$index_segments se
ON se.rdb$index_name=ix2.rdb$index_name
AND se.rdb$field_position=cse.rdb$field_position
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
ORDER BY se.rdb$index_name, se.rdb$field_position
"""
tablename = self.denormalize_name(table_name)
c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
fks = util.defaultdict(
lambda: {
"name": None,
"constrained_columns": [],
"referred_schema": None,
"referred_table": None,
"referred_columns": [],
}
)
for row in c:
cname = self.normalize_name(row["cname"])
fk = fks[cname]
if not fk["name"]:
fk["name"] = cname
fk["referred_table"] = self.normalize_name(row["targetrname"])
fk["constrained_columns"].append(self.normalize_name(row["fname"]))
fk["referred_columns"].append(
self.normalize_name(row["targetfname"])
)
return list(fks.values())
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
qry = """
SELECT ix.rdb$index_name AS index_name,
ix.rdb$unique_flag AS unique_flag,
ic.rdb$field_name AS field_name
FROM rdb$indices ix
JOIN rdb$index_segments ic
ON ix.rdb$index_name=ic.rdb$index_name
LEFT OUTER JOIN rdb$relation_constraints
ON rdb$relation_constraints.rdb$index_name =
ic.rdb$index_name
WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL
AND rdb$relation_constraints.rdb$constraint_type IS NULL
ORDER BY index_name, ic.rdb$field_position
"""
c = connection.execute(qry, [self.denormalize_name(table_name)])
indexes = util.defaultdict(dict)
for row in c:
indexrec = indexes[row["index_name"]]
if "name" not in indexrec:
indexrec["name"] = self.normalize_name(row["index_name"])
indexrec["column_names"] = []
indexrec["unique"] = bool(row["unique_flag"])
indexrec["column_names"].append(
self.normalize_name(row["field_name"])
)
return list(indexes.values())
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/sync.py
|
# orm/sync.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used for copying data
between instances based on join conditions.
"""
from . import attributes
from . import exc
from . import util as orm_util
from .. import util
def populate(
source,
source_mapper,
dest,
dest_mapper,
synchronize_pairs,
uowcommit,
flag_cascaded_pks,
):
source_dict = source.dict
dest_dict = dest.dict
for l, r in synchronize_pairs:
try:
# inline of source_mapper._get_state_attr_by_column
prop = source_mapper._columntoproperty[l]
value = source.manager[prop.key].impl.get(
source, source_dict, attributes.PASSIVE_OFF
)
except exc.UnmappedColumnError as err:
_raise_col_to_prop(False, source_mapper, l, dest_mapper, r, err)
try:
# inline of dest_mapper._set_state_attr_by_column
prop = dest_mapper._columntoproperty[r]
dest.manager[prop.key].impl.set(dest, dest_dict, value, None)
except exc.UnmappedColumnError as err:
_raise_col_to_prop(True, source_mapper, l, dest_mapper, r, err)
# technically the "r.primary_key" check isn't
# needed here, but we check for this condition to limit
# how often this logic is invoked for memory/performance
# reasons, since we only need this info for a primary key
# destination.
if (
flag_cascaded_pks
and l.primary_key
and r.primary_key
and r.references(l)
):
uowcommit.attributes[("pk_cascaded", dest, r)] = True
def bulk_populate_inherit_keys(source_dict, source_mapper, synchronize_pairs):
# a simplified version of populate() used by bulk insert mode
for l, r in synchronize_pairs:
try:
prop = source_mapper._columntoproperty[l]
value = source_dict[prop.key]
except exc.UnmappedColumnError as err:
_raise_col_to_prop(False, source_mapper, l, source_mapper, r, err)
try:
prop = source_mapper._columntoproperty[r]
source_dict[prop.key] = value
except exc.UnmappedColumnError:
_raise_col_to_prop(True, source_mapper, l, source_mapper, r)
def clear(dest, dest_mapper, synchronize_pairs):
for l, r in synchronize_pairs:
if (
r.primary_key
and dest_mapper._get_state_attr_by_column(dest, dest.dict, r)
not in orm_util._none_set
):
raise AssertionError(
"Dependency rule tried to blank-out primary key "
"column '%s' on instance '%s'" % (r, orm_util.state_str(dest))
)
try:
dest_mapper._set_state_attr_by_column(dest, dest.dict, r, None)
except exc.UnmappedColumnError as err:
_raise_col_to_prop(True, None, l, dest_mapper, r, err)
def update(source, source_mapper, dest, old_prefix, synchronize_pairs):
for l, r in synchronize_pairs:
try:
oldvalue = source_mapper._get_committed_attr_by_column(
source.obj(), l
)
value = source_mapper._get_state_attr_by_column(
source, source.dict, l, passive=attributes.PASSIVE_OFF
)
except exc.UnmappedColumnError as err:
_raise_col_to_prop(False, source_mapper, l, None, r, err)
dest[r.key] = value
dest[old_prefix + r.key] = oldvalue
def populate_dict(source, source_mapper, dict_, synchronize_pairs):
for l, r in synchronize_pairs:
try:
value = source_mapper._get_state_attr_by_column(
source, source.dict, l, passive=attributes.PASSIVE_OFF
)
except exc.UnmappedColumnError as err:
_raise_col_to_prop(False, source_mapper, l, None, r, err)
dict_[r.key] = value
def source_modified(uowcommit, source, source_mapper, synchronize_pairs):
"""return true if the source object has changes from an old to a
new value on the given synchronize pairs
"""
for l, r in synchronize_pairs:
try:
prop = source_mapper._columntoproperty[l]
except exc.UnmappedColumnError as err:
_raise_col_to_prop(False, source_mapper, l, None, r, err)
history = uowcommit.get_attribute_history(
source, prop.key, attributes.PASSIVE_NO_INITIALIZE
)
if bool(history.deleted):
return True
else:
return False
def _raise_col_to_prop(
isdest, source_mapper, source_column, dest_mapper, dest_column, err
):
if isdest:
util.raise_(
exc.UnmappedColumnError(
"Can't execute sync rule for "
"destination column '%s'; mapper '%s' does not map "
"this column. Try using an explicit `foreign_keys` "
"collection which does not include this column (or use "
"a viewonly=True relation)." % (dest_column, dest_mapper)
),
replace_context=err,
)
else:
util.raise_(
exc.UnmappedColumnError(
"Can't execute sync rule for "
"source column '%s'; mapper '%s' does not map this "
"column. Try using an explicit `foreign_keys` "
"collection which does not include destination column "
"'%s' (or use a viewonly=True relation)."
% (source_column, source_mapper, dest_column)
),
replace_context=err,
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/interfaces.py
|
# orm/interfaces.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Contains various base classes used throughout the ORM.
Defines some key base classes prominent within the internals,
as well as the now-deprecated ORM extension classes.
Other than the deprecated extensions, this module and the
classes within are mostly private, though some attributes
are exposed when inspecting mappings.
"""
from __future__ import absolute_import
import collections
from . import exc as orm_exc
from . import path_registry
from .base import _MappedAttribute # noqa
from .base import EXT_CONTINUE
from .base import EXT_SKIP
from .base import EXT_STOP
from .base import InspectionAttr # noqa
from .base import InspectionAttrInfo # noqa
from .base import MANYTOMANY
from .base import MANYTOONE
from .base import NOT_EXTENSION
from .base import ONETOMANY
from .. import inspect
from .. import util
from ..sql import operators
# imported later
MapperExtension = SessionExtension = AttributeExtension = None
__all__ = (
"AttributeExtension",
"EXT_CONTINUE",
"EXT_STOP",
"EXT_SKIP",
"ONETOMANY",
"MANYTOMANY",
"MANYTOONE",
"NOT_EXTENSION",
"LoaderStrategy",
"MapperExtension",
"MapperOption",
"MapperProperty",
"PropComparator",
"SessionExtension",
"StrategizedProperty",
)
class MapperProperty(_MappedAttribute, InspectionAttr, util.MemoizedSlots):
"""Represent a particular class attribute mapped by :class:`_orm.Mapper`.
The most common occurrences of :class:`.MapperProperty` are the
mapped :class:`_schema.Column`, which is represented in a mapping as
an instance of :class:`.ColumnProperty`,
and a reference to another class produced by :func:`_orm.relationship`,
represented in the mapping as an instance of
:class:`.RelationshipProperty`.
"""
__slots__ = (
"_configure_started",
"_configure_finished",
"parent",
"key",
"info",
)
cascade = frozenset()
"""The set of 'cascade' attribute names.
This collection is checked before the 'cascade_iterator' method is called.
The collection typically only applies to a RelationshipProperty.
"""
is_property = True
"""Part of the InspectionAttr interface; states this object is a
mapper property.
"""
def _memoized_attr_info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.InspectionAttr`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`_orm.relationship`, or
:func:`.composite`
functions.
.. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
available on extension types via the
:attr:`.InspectionAttrInfo.info` attribute, so that it can apply
to a wider variety of ORM and extension constructs.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
def setup(self, context, query_entity, path, adapter, **kwargs):
"""Called by Query for the purposes of constructing a SQL statement.
Each MapperProperty associated with the target mapper processes the
statement referenced by the query context, adding columns and/or
criterion as appropriate.
"""
def create_row_processor(
self, context, path, mapper, result, adapter, populators
):
"""Produce row processing functions and append to the given
set of populators lists.
"""
def cascade_iterator(
self, type_, state, visited_instances=None, halt_on=None
):
"""Iterate through instances related to the given instance for
a particular 'cascade', starting with this MapperProperty.
Return an iterator3-tuples (instance, mapper, state).
Note that the 'cascade' collection on this MapperProperty is
checked first for the given type before cascade_iterator is called.
This method typically only applies to RelationshipProperty.
"""
return iter(())
def set_parent(self, parent, init):
"""Set the parent mapper that references this MapperProperty.
This method is overridden by some subclasses to perform extra
setup when the mapper is first known.
"""
self.parent = parent
def instrument_class(self, mapper):
"""Hook called by the Mapper to the property to initiate
instrumentation of the class attribute managed by this
MapperProperty.
The MapperProperty here will typically call out to the
attributes module to set up an InstrumentedAttribute.
This step is the first of two steps to set up an InstrumentedAttribute,
and is called early in the mapper setup process.
The second step is typically the init_class_attribute step,
called from StrategizedProperty via the post_instrument_class()
hook. This step assigns additional state to the InstrumentedAttribute
(specifically the "impl") which has been determined after the
MapperProperty has determined what kind of persistence
management it needs to do (e.g. scalar, object, collection, etc).
"""
def __init__(self):
self._configure_started = False
self._configure_finished = False
def init(self):
"""Called after all mappers are created to assemble
relationships between mappers and perform other post-mapper-creation
initialization steps.
"""
self._configure_started = True
self.do_init()
self._configure_finished = True
@property
def class_attribute(self):
"""Return the class-bound descriptor corresponding to this
:class:`.MapperProperty`.
This is basically a ``getattr()`` call::
return getattr(self.parent.class_, self.key)
I.e. if this :class:`.MapperProperty` were named ``addresses``,
and the class to which it is mapped is ``User``, this sequence
is possible::
>>> from sqlalchemy import inspect
>>> mapper = inspect(User)
>>> addresses_property = mapper.attrs.addresses
>>> addresses_property.class_attribute is User.addresses
True
>>> User.addresses.property is addresses_property
True
"""
return getattr(self.parent.class_, self.key)
def do_init(self):
"""Perform subclass-specific initialization post-mapper-creation
steps.
This is a template method called by the ``MapperProperty``
object's init() method.
"""
def post_instrument_class(self, mapper):
"""Perform instrumentation adjustments that need to occur
after init() has completed.
The given Mapper is the Mapper invoking the operation, which
may not be the same Mapper as self.parent in an inheritance
scenario; however, Mapper will always at least be a sub-mapper of
self.parent.
This method is typically used by StrategizedProperty, which delegates
it to LoaderStrategy.init_class_attribute() to perform final setup
on the class-bound InstrumentedAttribute.
"""
def merge(
self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load,
_recursive,
_resolve_conflict_map,
):
"""Merge the attribute represented by this ``MapperProperty``
from source to destination object.
"""
def __repr__(self):
return "<%s at 0x%x; %s>" % (
self.__class__.__name__,
id(self),
getattr(self, "key", "no key"),
)
class PropComparator(operators.ColumnOperators):
r"""Defines SQL operators for :class:`.MapperProperty` objects.
SQLAlchemy allows for operators to
be redefined at both the Core and ORM level. :class:`.PropComparator`
is the base class of operator redefinition for ORM-level operations,
including those of :class:`.ColumnProperty`,
:class:`.RelationshipProperty`, and :class:`.CompositeProperty`.
.. note:: With the advent of Hybrid properties introduced in SQLAlchemy
0.7, as well as Core-level operator redefinition in
SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator`
instances is extremely rare. See :ref:`hybrids_toplevel` as well
as :ref:`types_operators`.
User-defined subclasses of :class:`.PropComparator` may be created. The
built-in Python comparison and math operator methods, such as
:meth:`.operators.ColumnOperators.__eq__`,
:meth:`.operators.ColumnOperators.__lt__`, and
:meth:`.operators.ColumnOperators.__add__`, can be overridden to provide
new operator behavior. The custom :class:`.PropComparator` is passed to
the :class:`.MapperProperty` instance via the ``comparator_factory``
argument. In each case,
the appropriate subclass of :class:`.PropComparator` should be used::
# definition of custom PropComparator subclasses
from sqlalchemy.orm.properties import \
ColumnProperty,\
CompositeProperty,\
RelationshipProperty
class MyColumnComparator(ColumnProperty.Comparator):
def __eq__(self, other):
return self.__clause_element__() == other
class MyRelationshipComparator(RelationshipProperty.Comparator):
def any(self, expression):
"define the 'any' operation"
# ...
class MyCompositeComparator(CompositeProperty.Comparator):
def __gt__(self, other):
"redefine the 'greater than' operation"
return sql.and_(*[a>b for a, b in
zip(self.__clause_element__().clauses,
other.__composite_values__())])
# application of custom PropComparator subclasses
from sqlalchemy.orm import column_property, relationship, composite
from sqlalchemy import Column, String
class SomeMappedClass(Base):
some_column = column_property(Column("some_column", String),
comparator_factory=MyColumnComparator)
some_relationship = relationship(SomeOtherClass,
comparator_factory=MyRelationshipComparator)
some_composite = composite(
Column("a", String), Column("b", String),
comparator_factory=MyCompositeComparator
)
Note that for column-level operator redefinition, it's usually
simpler to define the operators at the Core level, using the
:attr:`.TypeEngine.comparator_factory` attribute. See
:ref:`types_operators` for more detail.
.. seealso::
:class:`.ColumnProperty.Comparator`
:class:`.RelationshipProperty.Comparator`
:class:`.CompositeProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__slots__ = "prop", "property", "_parententity", "_adapt_to_entity"
def __init__(self, prop, parentmapper, adapt_to_entity=None):
self.prop = self.property = prop
self._parententity = adapt_to_entity or parentmapper
self._adapt_to_entity = adapt_to_entity
def __clause_element__(self):
raise NotImplementedError("%r" % self)
def _query_clause_element(self):
return self.__clause_element__()
def _bulk_update_tuples(self, value):
return [(self.__clause_element__(), value)]
def adapt_to_entity(self, adapt_to_entity):
"""Return a copy of this PropComparator which will use the given
:class:`.AliasedInsp` to produce corresponding expressions.
"""
return self.__class__(self.prop, self._parententity, adapt_to_entity)
@property
def _parentmapper(self):
"""legacy; this is renamed to _parententity to be
compatible with QueryableAttribute."""
return inspect(self._parententity).mapper
@property
def adapter(self):
"""Produce a callable that adapts column expressions
to suit an aliased version of this comparator.
"""
if self._adapt_to_entity is None:
return None
else:
return self._adapt_to_entity._adapt_element
@property
def info(self):
return self.property.info
@staticmethod
def any_op(a, b, **kwargs):
return a.any(b, **kwargs)
@staticmethod
def has_op(a, b, **kwargs):
return a.has(b, **kwargs)
@staticmethod
def of_type_op(a, class_):
return a.of_type(class_)
def of_type(self, class_):
r"""Redefine this object in terms of a polymorphic subclass,
:func:`.with_polymorphic` construct, or :func:`.aliased` construct.
Returns a new PropComparator from which further criterion can be
evaluated.
e.g.::
query.join(Company.employees.of_type(Engineer)).\
filter(Engineer.name=='foo')
:param \class_: a class or mapper indicating that criterion will be
against this specific subclass.
.. seealso::
:ref:`inheritance_of_type`
"""
return self.operate(PropComparator.of_type_op, class_)
def any(self, criterion=None, **kwargs):
r"""Return true if this collection contains any member that meets the
given criterion.
The usual implementation of ``any()`` is
:meth:`.RelationshipProperty.Comparator.any`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.any_op, criterion, **kwargs)
def has(self, criterion=None, **kwargs):
r"""Return true if this element references a member which meets the
given criterion.
The usual implementation of ``has()`` is
:meth:`.RelationshipProperty.Comparator.has`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.has_op, criterion, **kwargs)
class StrategizedProperty(MapperProperty):
"""A MapperProperty which uses selectable strategies to affect
loading behavior.
There is a single strategy selected by default. Alternate
strategies can be selected at Query time through the usage of
``StrategizedOption`` objects via the Query.options() method.
The mechanics of StrategizedProperty are used for every Query
invocation for every mapped attribute participating in that Query,
to determine first how the attribute will be rendered in SQL
and secondly how the attribute will retrieve a value from a result
row and apply it to a mapped object. The routines here are very
performance-critical.
"""
__slots__ = (
"_strategies",
"strategy",
"_wildcard_token",
"_default_path_loader_key",
)
strategy_wildcard_key = None
def _memoized_attr__wildcard_token(self):
return (
"%s:%s"
% (self.strategy_wildcard_key, path_registry._WILDCARD_TOKEN),
)
def _memoized_attr__default_path_loader_key(self):
return (
"loader",
(
"%s:%s"
% (self.strategy_wildcard_key, path_registry._DEFAULT_TOKEN),
),
)
def _get_context_loader(self, context, path):
load = None
search_path = path[self]
# search among: exact match, "attr.*", "default" strategy
# if any.
for path_key in (
search_path._loader_key,
search_path._wildcard_path_loader_key,
search_path._default_path_loader_key,
):
if path_key in context.attributes:
load = context.attributes[path_key]
break
return load
def _get_strategy(self, key):
try:
return self._strategies[key]
except KeyError:
pass
# run outside to prevent transfer of exception context
cls = self._strategy_lookup(self, *key)
self._strategies[key] = self._strategies[cls] = strategy = cls(
self, key
)
return strategy
def setup(self, context, query_entity, path, adapter, **kwargs):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.setup_query(
context, query_entity, path, loader, adapter, **kwargs
)
def create_row_processor(
self, context, path, mapper, result, adapter, populators
):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.create_row_processor(
context, path, loader, mapper, result, adapter, populators
)
def do_init(self):
self._strategies = {}
self.strategy = self._get_strategy(self.strategy_key)
def post_instrument_class(self, mapper):
if (
not self.parent.non_primary
and not mapper.class_manager._attr_has_impl(self.key)
):
self.strategy.init_class_attribute(mapper)
_all_strategies = collections.defaultdict(dict)
@classmethod
def strategy_for(cls, **kw):
def decorate(dec_cls):
# ensure each subclass of the strategy has its
# own _strategy_keys collection
if "_strategy_keys" not in dec_cls.__dict__:
dec_cls._strategy_keys = []
key = tuple(sorted(kw.items()))
cls._all_strategies[cls][key] = dec_cls
dec_cls._strategy_keys.append(key)
return dec_cls
return decorate
@classmethod
def _strategy_lookup(cls, requesting_property, *key):
for prop_cls in cls.__mro__:
if prop_cls in cls._all_strategies:
strategies = cls._all_strategies[prop_cls]
try:
return strategies[key]
except KeyError:
pass
for property_type, strats in cls._all_strategies.items():
if key in strats:
intended_property_type = property_type
actual_strategy = strats[key]
break
else:
intended_property_type = None
actual_strategy = None
raise orm_exc.LoaderStrategyException(
cls,
requesting_property,
intended_property_type,
actual_strategy,
key,
)
class MapperOption(object):
"""Describe a modification to a Query."""
propagate_to_loaders = False
"""if True, indicate this option should be carried along
to "secondary" Query objects produced during lazy loads
or refresh operations.
"""
def process_query(self, query):
"""Apply a modification to the given :class:`_query.Query`."""
def process_query_conditionally(self, query):
"""same as process_query(), except that this option may not
apply to the given query.
This is typically used during a lazy load or scalar refresh
operation to propagate options stated in the original Query to the
new Query being used for the load. It occurs for those options that
specify propagate_to_loaders=True.
"""
self.process_query(query)
def _generate_cache_key(self, path):
"""Used by the "baked lazy loader" to see if this option can be cached.
The "baked lazy loader" refers to the :class:`_query.Query` that is
produced during a lazy load operation for a mapped relationship.
It does not yet apply to the "lazy" load operation for deferred
or expired column attributes, however this may change in the future.
This loader generates SQL for a query only once and attempts to cache
it; from that point on, if the SQL has been cached it will no longer
run the :meth:`_query.Query.options` method of the
:class:`_query.Query`. The
:class:`.MapperOption` object that wishes to participate within a lazy
load operation therefore needs to tell the baked loader that it either
needs to forego this caching, or that it needs to include the state of
the :class:`.MapperOption` itself as part of its cache key, otherwise
SQL or other query state that has been affected by the
:class:`.MapperOption` may be cached in place of a query that does not
include these modifications, or the option may not be invoked at all.
By default, this method returns the value ``False``, which means
the :class:`.BakedQuery` generated by the lazy loader will
not cache the SQL when this :class:`.MapperOption` is present.
This is the safest option and ensures both that the option is
invoked every time, and also that the cache isn't filled up with
an unlimited number of :class:`_query.Query` objects for an unlimited
number of :class:`.MapperOption` objects.
.. versionchanged:: 1.2.8 the default return value of
:meth:`.MapperOption._generate_cache_key` is False; previously it
was ``None`` indicating "safe to cache, don't include as part of
the cache key"
To enable caching of :class:`_query.Query` objects within lazy loaders
, a
given :class:`.MapperOption` that returns a cache key must return a key
that uniquely identifies the complete state of this option, which will
match any other :class:`.MapperOption` that itself retains the
identical state. This includes path options, flags, etc. It should
be a state that is repeatable and part of a limited set of possible
options.
If the :class:`.MapperOption` does not apply to the given path and
would not affect query results on such a path, it should return None,
indicating the :class:`_query.Query` is safe to cache for this given
loader path and that this :class:`.MapperOption` need not be
part of the cache key.
"""
return False
class LoaderStrategy(object):
"""Describe the loading behavior of a StrategizedProperty object.
The ``LoaderStrategy`` interacts with the querying process in three
ways:
* it controls the configuration of the ``InstrumentedAttribute``
placed on a class to handle the behavior of the attribute. this
may involve setting up class-level callable functions to fire
off a select operation when the attribute is first accessed
(i.e. a lazy load)
* it processes the ``QueryContext`` at statement construction time,
where it can modify the SQL statement that is being produced.
For example, simple column attributes will add their represented
column to the list of selected columns, a joined eager loader
may establish join clauses to add to the statement.
* It produces "row processor" functions at result fetching time.
These "row processor" functions populate a particular attribute
on a particular mapped instance.
"""
__slots__ = (
"parent_property",
"is_class_level",
"parent",
"key",
"strategy_key",
"strategy_opts",
)
def __init__(self, parent, strategy_key):
self.parent_property = parent
self.is_class_level = False
self.parent = self.parent_property.parent
self.key = self.parent_property.key
self.strategy_key = strategy_key
self.strategy_opts = dict(strategy_key)
def init_class_attribute(self, mapper):
pass
def setup_query(
self, context, query_entity, path, loadopt, adapter, **kwargs
):
"""Establish column and other state for a given QueryContext.
This method fulfills the contract specified by MapperProperty.setup().
StrategizedProperty delegates its setup() method
directly to this method.
"""
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
"""Establish row processing functions for a given QueryContext.
This method fulfills the contract specified by
MapperProperty.create_row_processor().
StrategizedProperty delegates its create_row_processor() method
directly to this method.
"""
def __str__(self):
return str(self.parent_property)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/instrumentation.py
|
# orm/instrumentation.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines SQLAlchemy's system of class instrumentation.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
instrumentation.py deals with registration of end-user classes
for state tracking. It interacts closely with state.py
and attributes.py which establish per-instance and per-class-attribute
instrumentation, respectively.
The class instrumentation system can be customized on a per-class
or global basis using the :mod:`sqlalchemy.ext.instrumentation`
module, which provides the means to build and specify
alternate instrumentation forms.
.. versionchanged: 0.8
The instrumentation extension system was moved out of the
ORM and into the external :mod:`sqlalchemy.ext.instrumentation`
package. When that package is imported, it installs
itself within sqlalchemy.orm so that its more comprehensive
resolution mechanics take effect.
"""
from . import base
from . import collections
from . import exc
from . import interfaces
from . import state
from .. import util
_memoized_key_collection = util.group_expirable_memoized_property()
class ClassManager(dict):
"""tracks state information at the class level."""
MANAGER_ATTR = base.DEFAULT_MANAGER_ATTR
STATE_ATTR = base.DEFAULT_STATE_ATTR
_state_setter = staticmethod(util.attrsetter(STATE_ATTR))
deferred_scalar_loader = None
original_init = object.__init__
factory = None
def __init__(self, class_):
self.class_ = class_
self.info = {}
self.new_init = None
self.local_attrs = {}
self.originals = {}
self._bases = [
mgr
for mgr in [
manager_of_class(base)
for base in self.class_.__bases__
if isinstance(base, type)
]
if mgr is not None
]
for base_ in self._bases:
self.update(base_)
self.dispatch._events._new_classmanager_instance(class_, self)
# events._InstanceEventsHold.populate(class_, self)
for basecls in class_.__mro__:
mgr = manager_of_class(basecls)
if mgr is not None:
self.dispatch._update(mgr.dispatch)
self.manage()
self._instrument_init()
if "__del__" in class_.__dict__:
util.warn(
"__del__() method on class %s will "
"cause unreachable cycles and memory leaks, "
"as SQLAlchemy instrumentation often creates "
"reference cycles. Please remove this method." % class_
)
def __hash__(self):
return id(self)
def __eq__(self, other):
return other is self
@property
def is_mapped(self):
return "mapper" in self.__dict__
@_memoized_key_collection
def _all_key_set(self):
return frozenset(self)
@_memoized_key_collection
def _collection_impl_keys(self):
return frozenset(
[attr.key for attr in self.values() if attr.impl.collection]
)
@_memoized_key_collection
def _scalar_loader_impls(self):
return frozenset(
[
attr.impl
for attr in self.values()
if attr.impl.accepts_scalar_loader
]
)
@util.memoized_property
def mapper(self):
# raises unless self.mapper has been assigned
raise exc.UnmappedClassError(self.class_)
def _all_sqla_attributes(self, exclude=None):
"""return an iterator of all classbound attributes that are
implement :class:`.InspectionAttr`.
This includes :class:`.QueryableAttribute` as well as extension
types such as :class:`.hybrid_property` and
:class:`.AssociationProxy`.
"""
if exclude is None:
exclude = set()
for supercls in self.class_.__mro__:
for key in set(supercls.__dict__).difference(exclude):
exclude.add(key)
val = supercls.__dict__[key]
if (
isinstance(val, interfaces.InspectionAttr)
and val.is_attribute
):
yield key, val
def _get_class_attr_mro(self, key, default=None):
"""return an attribute on the class without tripping it."""
for supercls in self.class_.__mro__:
if key in supercls.__dict__:
return supercls.__dict__[key]
else:
return default
def _attr_has_impl(self, key):
"""Return True if the given attribute is fully initialized.
i.e. has an impl.
"""
return key in self and self[key].impl is not None
def _subclass_manager(self, cls):
"""Create a new ClassManager for a subclass of this ClassManager's
class.
This is called automatically when attributes are instrumented so that
the attributes can be propagated to subclasses against their own
class-local manager, without the need for mappers etc. to have already
pre-configured managers for the full class hierarchy. Mappers
can post-configure the auto-generated ClassManager when needed.
"""
manager = manager_of_class(cls)
if manager is None:
manager = _instrumentation_factory.create_manager_for_cls(cls)
return manager
def _instrument_init(self):
# TODO: self.class_.__init__ is often the already-instrumented
# __init__ from an instrumented superclass. We still need to make
# our own wrapper, but it would
# be nice to wrap the original __init__ and not our existing wrapper
# of such, since this adds method overhead.
self.original_init = self.class_.__init__
self.new_init = _generate_init(self.class_, self)
self.install_member("__init__", self.new_init)
def _uninstrument_init(self):
if self.new_init:
self.uninstall_member("__init__")
self.new_init = None
@util.memoized_property
def _state_constructor(self):
self.dispatch.first_init(self, self.class_)
return state.InstanceState
def manage(self):
"""Mark this instance as the manager for its class."""
setattr(self.class_, self.MANAGER_ATTR, self)
def dispose(self):
"""Dissasociate this manager from its class."""
delattr(self.class_, self.MANAGER_ATTR)
@util.hybridmethod
def manager_getter(self):
return _default_manager_getter
@util.hybridmethod
def state_getter(self):
"""Return a (instance) -> InstanceState callable.
"state getter" callables should raise either KeyError or
AttributeError if no InstanceState could be found for the
instance.
"""
return _default_state_getter
@util.hybridmethod
def dict_getter(self):
return _default_dict_getter
def instrument_attribute(self, key, inst, propagated=False):
if propagated:
if key in self.local_attrs:
return # don't override local attr with inherited attr
else:
self.local_attrs[key] = inst
self.install_descriptor(key, inst)
_memoized_key_collection.expire_instance(self)
self[key] = inst
for cls in self.class_.__subclasses__():
manager = self._subclass_manager(cls)
manager.instrument_attribute(key, inst, True)
def subclass_managers(self, recursive):
for cls in self.class_.__subclasses__():
mgr = manager_of_class(cls)
if mgr is not None and mgr is not self:
yield mgr
if recursive:
for m in mgr.subclass_managers(True):
yield m
def post_configure_attribute(self, key):
_instrumentation_factory.dispatch.attribute_instrument(
self.class_, key, self[key]
)
def uninstrument_attribute(self, key, propagated=False):
if key not in self:
return
if propagated:
if key in self.local_attrs:
return # don't get rid of local attr
else:
del self.local_attrs[key]
self.uninstall_descriptor(key)
_memoized_key_collection.expire_instance(self)
del self[key]
for cls in self.class_.__subclasses__():
manager = manager_of_class(cls)
if manager:
manager.uninstrument_attribute(key, True)
def unregister(self):
"""remove all instrumentation established by this ClassManager."""
self._uninstrument_init()
self.mapper = self.dispatch = None
self.info.clear()
for key in list(self):
if key in self.local_attrs:
self.uninstrument_attribute(key)
def install_descriptor(self, key, inst):
if key in (self.STATE_ATTR, self.MANAGER_ATTR):
raise KeyError(
"%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." % key
)
setattr(self.class_, key, inst)
def uninstall_descriptor(self, key):
delattr(self.class_, key)
def install_member(self, key, implementation):
if key in (self.STATE_ATTR, self.MANAGER_ATTR):
raise KeyError(
"%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." % key
)
self.originals.setdefault(key, getattr(self.class_, key, None))
setattr(self.class_, key, implementation)
def uninstall_member(self, key):
original = self.originals.pop(key, None)
if original is not None:
setattr(self.class_, key, original)
def instrument_collection_class(self, key, collection_class):
return collections.prepare_instrumentation(collection_class)
def initialize_collection(self, key, state, factory):
user_data = factory()
adapter = collections.CollectionAdapter(
self.get_impl(key), state, user_data
)
return adapter, user_data
def is_instrumented(self, key, search=False):
if search:
return key in self
else:
return key in self.local_attrs
def get_impl(self, key):
return self[key].impl
@property
def attributes(self):
return iter(self.values())
# InstanceState management
def new_instance(self, state=None):
instance = self.class_.__new__(self.class_)
if state is None:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
return instance
def setup_instance(self, instance, state=None):
if state is None:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
def teardown_instance(self, instance):
delattr(instance, self.STATE_ATTR)
def _serialize(self, state, state_dict):
return _SerializeManager(state, state_dict)
def _new_state_if_none(self, instance):
"""Install a default InstanceState if none is present.
A private convenience method used by the __init__ decorator.
"""
if hasattr(instance, self.STATE_ATTR):
return False
elif self.class_ is not instance.__class__ and self.is_mapped:
# this will create a new ClassManager for the
# subclass, without a mapper. This is likely a
# user error situation but allow the object
# to be constructed, so that it is usable
# in a non-ORM context at least.
return self._subclass_manager(
instance.__class__
)._new_state_if_none(instance)
else:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
return state
def has_state(self, instance):
return hasattr(instance, self.STATE_ATTR)
def has_parent(self, state, key, optimistic=False):
"""TODO"""
return self.get_impl(key).hasparent(state, optimistic=optimistic)
def __bool__(self):
"""All ClassManagers are non-zero regardless of attribute state."""
return True
__nonzero__ = __bool__
def __repr__(self):
return "<%s of %r at %x>" % (
self.__class__.__name__,
self.class_,
id(self),
)
class _SerializeManager(object):
"""Provide serialization of a :class:`.ClassManager`.
The :class:`.InstanceState` uses ``__init__()`` on serialize
and ``__call__()`` on deserialize.
"""
def __init__(self, state, d):
self.class_ = state.class_
manager = state.manager
manager.dispatch.pickle(state, d)
def __call__(self, state, inst, state_dict):
state.manager = manager = manager_of_class(self.class_)
if manager is None:
raise exc.UnmappedInstanceError(
inst,
"Cannot deserialize object of type %r - "
"no mapper() has "
"been configured for this class within the current "
"Python process!" % self.class_,
)
elif manager.is_mapped and not manager.mapper.configured:
manager.mapper._configure_all()
# setup _sa_instance_state ahead of time so that
# unpickle events can access the object normally.
# see [ticket:2362]
if inst is not None:
manager.setup_instance(inst, state)
manager.dispatch.unpickle(state, state_dict)
class InstrumentationFactory(object):
"""Factory for new ClassManager instances."""
def create_manager_for_cls(self, class_):
assert class_ is not None
assert manager_of_class(class_) is None
# give a more complicated subclass
# a chance to do what it wants here
manager, factory = self._locate_extended_factory(class_)
if factory is None:
factory = ClassManager
manager = factory(class_)
self._check_conflicts(class_, factory)
manager.factory = factory
self.dispatch.class_instrument(class_)
return manager
def _locate_extended_factory(self, class_):
"""Overridden by a subclass to do an extended lookup."""
return None, None
def _check_conflicts(self, class_, factory):
"""Overridden by a subclass to test for conflicting factories."""
return
def unregister(self, class_):
manager = manager_of_class(class_)
manager.unregister()
manager.dispose()
self.dispatch.class_uninstrument(class_)
if ClassManager.MANAGER_ATTR in class_.__dict__:
delattr(class_, ClassManager.MANAGER_ATTR)
# this attribute is replaced by sqlalchemy.ext.instrumentation
# when importred.
_instrumentation_factory = InstrumentationFactory()
# these attributes are replaced by sqlalchemy.ext.instrumentation
# when a non-standard InstrumentationManager class is first
# used to instrument a class.
instance_state = _default_state_getter = base.instance_state
instance_dict = _default_dict_getter = base.instance_dict
manager_of_class = _default_manager_getter = base.manager_of_class
def register_class(class_):
"""Register class instrumentation.
Returns the existing or newly created class manager.
"""
manager = manager_of_class(class_)
if manager is None:
manager = _instrumentation_factory.create_manager_for_cls(class_)
return manager
def unregister_class(class_):
"""Unregister class instrumentation."""
_instrumentation_factory.unregister(class_)
def is_instrumented(instance, key):
"""Return True if the given attribute on the given instance is
instrumented by the attributes package.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
"""
return manager_of_class(instance.__class__).is_instrumented(
key, search=True
)
def _generate_init(class_, class_manager):
"""Build an __init__ decorator that triggers ClassManager events."""
# TODO: we should use the ClassManager's notion of the
# original '__init__' method, once ClassManager is fixed
# to always reference that.
original__init__ = class_.__init__
assert original__init__
# Go through some effort here and don't change the user's __init__
# calling signature, including the unlikely case that it has
# a return value.
# FIXME: need to juggle local names to avoid constructor argument
# clashes.
func_body = """\
def __init__(%(apply_pos)s):
new_state = class_manager._new_state_if_none(%(self_arg)s)
if new_state:
return new_state._initialize_instance(%(apply_kw)s)
else:
return original__init__(%(apply_kw)s)
"""
func_vars = util.format_argspec_init(original__init__, grouped=False)
func_text = func_body % func_vars
if util.py2k:
func = getattr(original__init__, "im_func", original__init__)
func_defaults = getattr(func, "func_defaults", None)
else:
func_defaults = getattr(original__init__, "__defaults__", None)
func_kw_defaults = getattr(original__init__, "__kwdefaults__", None)
env = locals().copy()
exec(func_text, env)
__init__ = env["__init__"]
__init__.__doc__ = original__init__.__doc__
__init__._sa_original_init = original__init__
if func_defaults:
__init__.__defaults__ = func_defaults
if not util.py2k and func_kw_defaults:
__init__.__kwdefaults__ = func_kw_defaults
return __init__
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/loading.py
|
# orm/loading.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to convert database
rows into object instances and associated state.
the functions here are called primarily by Query, Mapper,
as well as some of the attribute loading strategies.
"""
from __future__ import absolute_import
import collections
from . import attributes
from . import exc as orm_exc
from . import path_registry
from . import strategy_options
from .base import _DEFER_FOR_STATE
from .base import _SET_DEFERRED_EXPIRED
from .util import _none_set
from .util import state_str
from .. import exc as sa_exc
from .. import util
from ..sql import util as sql_util
_new_runid = util.counter()
def instances(query, cursor, context):
"""Return an ORM result as an iterator."""
context.runid = _new_runid()
context.post_load_paths = {}
filtered = query._has_mapper_entities
single_entity = query.is_single_entity
if filtered:
if single_entity:
filter_fn = id
else:
def filter_fn(row):
return tuple(
id(item) if ent.use_id_for_hash else item
for ent, item in zip(query._entities, row)
)
try:
(process, labels) = list(
zip(
*[
query_entity.row_processor(query, context, cursor)
for query_entity in query._entities
]
)
)
if not single_entity:
keyed_tuple = util.lightweight_named_tuple("result", labels)
while True:
context.partials = {}
if query._yield_per:
fetch = cursor.fetchmany(query._yield_per)
if not fetch:
break
else:
fetch = cursor.fetchall()
if single_entity:
proc = process[0]
rows = [proc(row) for row in fetch]
else:
rows = [
keyed_tuple([proc(row) for proc in process])
for row in fetch
]
for path, post_load in context.post_load_paths.items():
post_load.invoke(context, path)
if filtered:
rows = util.unique_list(rows, filter_fn)
for row in rows:
yield row
if not query._yield_per:
break
except Exception:
with util.safe_reraise():
cursor.close()
@util.dependencies("sqlalchemy.orm.query")
def merge_result(querylib, query, iterator, load=True):
"""Merge a result into this :class:`_query.Query` object's Session."""
session = query.session
if load:
# flush current contents if we expect to load data
session._autoflush()
autoflush = session.autoflush
try:
session.autoflush = False
single_entity = len(query._entities) == 1
if single_entity:
if isinstance(query._entities[0], querylib._MapperEntity):
result = [
session._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load,
_recursive={},
_resolve_conflict_map={},
)
for instance in iterator
]
else:
result = list(iterator)
else:
mapped_entities = [
i
for i, e in enumerate(query._entities)
if isinstance(e, querylib._MapperEntity)
]
result = []
keys = [ent._label_name for ent in query._entities]
keyed_tuple = util.lightweight_named_tuple("result", keys)
for row in iterator:
newrow = list(row)
for i in mapped_entities:
if newrow[i] is not None:
newrow[i] = session._merge(
attributes.instance_state(newrow[i]),
attributes.instance_dict(newrow[i]),
load=load,
_recursive={},
_resolve_conflict_map={},
)
result.append(keyed_tuple(newrow))
return iter(result)
finally:
session.autoflush = autoflush
def get_from_identity(session, mapper, key, passive):
"""Look up the given key in the given session's identity map,
check the object for expired state if found.
"""
instance = session.identity_map.get(key)
if instance is not None:
state = attributes.instance_state(instance)
if mapper.inherits and not state.mapper.isa(mapper):
return attributes.PASSIVE_CLASS_MISMATCH
# expired - ensure it still exists
if state.expired:
if not passive & attributes.SQL_OK:
# TODO: no coverage here
return attributes.PASSIVE_NO_RESULT
elif not passive & attributes.RELATED_OBJECT_OK:
# this mode is used within a flush and the instance's
# expired state will be checked soon enough, if necessary
return instance
try:
state._load_expired(state, passive)
except orm_exc.ObjectDeletedError:
session._remove_newly_deleted([state])
return None
return instance
else:
return None
def load_on_ident(
query, key, refresh_state=None, with_for_update=None, only_load_props=None
):
"""Load the given identity key from the database."""
if key is not None:
ident = key[1]
identity_token = key[2]
else:
ident = identity_token = None
return load_on_pk_identity(
query,
ident,
refresh_state=refresh_state,
with_for_update=with_for_update,
only_load_props=only_load_props,
identity_token=identity_token,
)
def load_on_pk_identity(
query,
primary_key_identity,
refresh_state=None,
with_for_update=None,
only_load_props=None,
identity_token=None,
):
"""Load the given primary key identity from the database."""
if refresh_state is None:
q = query._clone()
q._get_condition()
else:
q = query._clone()
if primary_key_identity is not None:
mapper = query._mapper_zero()
(_get_clause, _get_params) = mapper._get_clause
# None present in ident - turn those comparisons
# into "IS NULL"
if None in primary_key_identity:
nones = set(
[
_get_params[col].key
for col, value in zip(
mapper.primary_key, primary_key_identity
)
if value is None
]
)
_get_clause = sql_util.adapt_criterion_to_null(_get_clause, nones)
if len(nones) == len(primary_key_identity):
util.warn(
"fully NULL primary key identity cannot load any "
"object. This condition may raise an error in a future "
"release."
)
_get_clause = q._adapt_clause(_get_clause, True, False)
q._criterion = _get_clause
params = dict(
[
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(
primary_key_identity, mapper.primary_key
)
]
)
q._params = params
# with_for_update needs to be query.LockmodeArg()
if with_for_update is not None:
version_check = True
q._for_update_arg = with_for_update
elif query._for_update_arg is not None:
version_check = True
q._for_update_arg = query._for_update_arg
else:
version_check = False
q._get_options(
populate_existing=bool(refresh_state),
version_check=version_check,
only_load_props=only_load_props,
refresh_state=refresh_state,
identity_token=identity_token,
)
q._order_by = None
try:
return q.one()
except orm_exc.NoResultFound:
return None
def _setup_entity_query(
context,
mapper,
query_entity,
path,
adapter,
column_collection,
with_polymorphic=None,
only_load_props=None,
polymorphic_discriminator=None,
**kw
):
if with_polymorphic:
poly_properties = mapper._iterate_polymorphic_properties(
with_polymorphic
)
else:
poly_properties = mapper._polymorphic_properties
quick_populators = {}
path.set(context.attributes, "memoized_setups", quick_populators)
for value in poly_properties:
if only_load_props and value.key not in only_load_props:
continue
value.setup(
context,
query_entity,
path,
adapter,
only_load_props=only_load_props,
column_collection=column_collection,
memoized_populators=quick_populators,
**kw
)
if (
polymorphic_discriminator is not None
and polymorphic_discriminator is not mapper.polymorphic_on
):
if adapter:
pd = adapter.columns[polymorphic_discriminator]
else:
pd = polymorphic_discriminator
column_collection.append(pd)
def _warn_for_runid_changed(state):
util.warn(
"Loading context for %s has changed within a load/refresh "
"handler, suggesting a row refresh operation took place. If this "
"event handler is expected to be "
"emitting row refresh operations within an existing load or refresh "
"operation, set restore_load_context=True when establishing the "
"listener to ensure the context remains unchanged when the event "
"handler completes." % (state_str(state),)
)
def _instance_processor(
mapper,
context,
result,
path,
adapter,
only_load_props=None,
refresh_state=None,
polymorphic_discriminator=None,
_polymorphic_from=None,
):
"""Produce a mapper level row processor callable
which processes rows into mapped instances."""
# note that this method, most of which exists in a closure
# called _instance(), resists being broken out, as
# attempts to do so tend to add significant function
# call overhead. _instance() is the most
# performance-critical section in the whole ORM.
pk_cols = mapper.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
identity_class = mapper._identity_class
populators = collections.defaultdict(list)
props = mapper._prop_set
if only_load_props is not None:
props = props.intersection(mapper._props[k] for k in only_load_props)
quick_populators = path.get(
context.attributes, "memoized_setups", _none_set
)
for prop in props:
if prop in quick_populators:
# this is an inlined path just for column-based attributes.
col = quick_populators[prop]
if col is _DEFER_FOR_STATE:
populators["new"].append(
(prop.key, prop._deferred_column_loader)
)
elif col is _SET_DEFERRED_EXPIRED:
# note that in this path, we are no longer
# searching in the result to see if the column might
# be present in some unexpected way.
populators["expire"].append((prop.key, False))
else:
getter = None
# the "adapter" can be here via different paths,
# e.g. via adapter present at setup_query or adapter
# applied to the query afterwards via eager load subquery.
# If the column here
# were already a product of this adapter, sending it through
# the adapter again can return a totally new expression that
# won't be recognized in the result, and the ColumnAdapter
# currently does not accommodate for this. OTOH, if the
# column were never applied through this adapter, we may get
# None back, in which case we still won't get our "getter".
# so try both against result._getter(). See issue #4048
if adapter:
adapted_col = adapter.columns[col]
if adapted_col is not None:
getter = result._getter(adapted_col, False)
if not getter:
getter = result._getter(col, False)
if getter:
populators["quick"].append((prop.key, getter))
else:
# fall back to the ColumnProperty itself, which
# will iterate through all of its columns
# to see if one fits
prop.create_row_processor(
context, path, mapper, result, adapter, populators
)
else:
prop.create_row_processor(
context, path, mapper, result, adapter, populators
)
propagate_options = context.propagate_options
load_path = (
context.query._current_path + path
if context.query._current_path.path
else path
)
session_identity_map = context.session.identity_map
populate_existing = context.populate_existing or mapper.always_refresh
load_evt = bool(mapper.class_manager.dispatch.load)
refresh_evt = bool(mapper.class_manager.dispatch.refresh)
persistent_evt = bool(context.session.dispatch.loaded_as_persistent)
if persistent_evt:
loaded_as_persistent = context.session.dispatch.loaded_as_persistent
instance_state = attributes.instance_state
instance_dict = attributes.instance_dict
session_id = context.session.hash_key
version_check = context.version_check
runid = context.runid
identity_token = context.identity_token
if not refresh_state and _polymorphic_from is not None:
key = ("loader", path.path)
if key in context.attributes and context.attributes[key].strategy == (
("selectinload_polymorphic", True),
):
selectin_load_via = mapper._should_selectin_load(
context.attributes[key].local_opts["entities"],
_polymorphic_from,
)
else:
selectin_load_via = mapper._should_selectin_load(
None, _polymorphic_from
)
if selectin_load_via and selectin_load_via is not _polymorphic_from:
# only_load_props goes w/ refresh_state only, and in a refresh
# we are a single row query for the exact entity; polymorphic
# loading does not apply
assert only_load_props is None
callable_ = _load_subclass_via_in(context, path, selectin_load_via)
PostLoad.callable_for_path(
context,
load_path,
selectin_load_via.mapper,
selectin_load_via,
callable_,
selectin_load_via,
)
post_load = PostLoad.for_context(context, load_path, only_load_props)
if refresh_state:
refresh_identity_key = refresh_state.key
if refresh_identity_key is None:
# super-rare condition; a refresh is being called
# on a non-instance-key instance; this is meant to only
# occur within a flush()
refresh_identity_key = mapper._identity_key_from_state(
refresh_state
)
else:
refresh_identity_key = None
if mapper.allow_partial_pks:
is_not_primary_key = _none_set.issuperset
else:
is_not_primary_key = _none_set.intersection
def _instance(row):
# determine the state that we'll be populating
if refresh_identity_key:
# fixed state that we're refreshing
state = refresh_state
instance = state.obj()
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = True
loaded_instance = False
else:
# look at the row, see if that identity is in the
# session, or we have to create a new one
identitykey = (
identity_class,
tuple([row[column] for column in pk_cols]),
identity_token,
)
instance = session_identity_map.get(identitykey)
if instance is not None:
# existing instance
state = instance_state(instance)
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = not isnew
loaded_instance = False
if version_check and not currentload:
_validate_version_id(mapper, state, dict_, row, adapter)
else:
# create a new instance
# check for non-NULL values in the primary key columns,
# else no entity is returned for the row
if is_not_primary_key(identitykey[1]):
return None
isnew = True
currentload = True
loaded_instance = True
instance = mapper.class_manager.new_instance()
dict_ = instance_dict(instance)
state = instance_state(instance)
state.key = identitykey
state.identity_token = identity_token
# attach instance to session.
state.session_id = session_id
session_identity_map._add_unpresent(state, identitykey)
# populate. this looks at whether this state is new
# for this load or was existing, and whether or not this
# row is the first row with this identity.
if currentload or populate_existing:
# full population routines. Objects here are either
# just created, or we are doing a populate_existing
# be conservative about setting load_path when populate_existing
# is in effect; want to maintain options from the original
# load. see test_expire->test_refresh_maintains_deferred_options
if isnew and (propagate_options or not populate_existing):
state.load_options = propagate_options
state.load_path = load_path
_populate_full(
context,
row,
state,
dict_,
isnew,
load_path,
loaded_instance,
populate_existing,
populators,
)
if isnew:
# state.runid should be equal to context.runid / runid
# here, however for event checks we are being more conservative
# and checking against existing run id
# assert state.runid == runid
existing_runid = state.runid
if loaded_instance:
if load_evt:
state.manager.dispatch.load(state, context)
if state.runid != existing_runid:
_warn_for_runid_changed(state)
if persistent_evt:
loaded_as_persistent(context.session, state)
if state.runid != existing_runid:
_warn_for_runid_changed(state)
elif refresh_evt:
state.manager.dispatch.refresh(
state, context, only_load_props
)
if state.runid != runid:
_warn_for_runid_changed(state)
if populate_existing or state.modified:
if refresh_state and only_load_props:
state._commit(dict_, only_load_props)
else:
state._commit_all(dict_, session_identity_map)
if post_load:
post_load.add_state(state, True)
else:
# partial population routines, for objects that were already
# in the Session, but a row matches them; apply eager loaders
# on existing objects, etc.
unloaded = state.unloaded
isnew = state not in context.partials
if not isnew or unloaded or populators["eager"]:
# state is having a partial set of its attributes
# refreshed. Populate those attributes,
# and add to the "context.partials" collection.
to_load = _populate_partial(
context,
row,
state,
dict_,
isnew,
load_path,
unloaded,
populators,
)
if isnew:
if refresh_evt:
existing_runid = state.runid
state.manager.dispatch.refresh(state, context, to_load)
if state.runid != existing_runid:
_warn_for_runid_changed(state)
state._commit(dict_, to_load)
if post_load and context.invoke_all_eagers:
post_load.add_state(state, False)
return instance
if mapper.polymorphic_map and not _polymorphic_from and not refresh_state:
# if we are doing polymorphic, dispatch to a different _instance()
# method specific to the subclass mapper
_instance = _decorate_polymorphic_switch(
_instance,
context,
mapper,
result,
path,
polymorphic_discriminator,
adapter,
)
return _instance
def _load_subclass_via_in(context, path, entity):
mapper = entity.mapper
zero_idx = len(mapper.base_mapper.primary_key) == 1
if entity.is_aliased_class:
q, enable_opt, disable_opt = mapper._subclass_load_via_in(entity)
else:
q, enable_opt, disable_opt = mapper._subclass_load_via_in_mapper
def do_load(context, path, states, load_only, effective_entity):
orig_query = context.query
q2 = q._with_lazyload_options(
(enable_opt,) + orig_query._with_options + (disable_opt,),
path.parent,
cache_path=path,
)
if orig_query._populate_existing:
q2.add_criteria(lambda q: q.populate_existing())
q2(context.session).params(
primary_keys=[
state.key[1][0] if zero_idx else state.key[1]
for state, load_attrs in states
]
).all()
return do_load
def _populate_full(
context,
row,
state,
dict_,
isnew,
load_path,
loaded_instance,
populate_existing,
populators,
):
if isnew:
# first time we are seeing a row with this identity.
state.runid = context.runid
for key, getter in populators["quick"]:
dict_[key] = getter(row)
if populate_existing:
for key, set_callable in populators["expire"]:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
else:
for key, set_callable in populators["expire"]:
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
populator(state, dict_, row)
elif load_path != state.load_path:
# new load path, e.g. object is present in more than one
# column position in a series of rows
state.load_path = load_path
# if we have data, and the data isn't in the dict, OK, let's put
# it in.
for key, getter in populators["quick"]:
if key not in dict_:
dict_[key] = getter(row)
# otherwise treat like an "already seen" row
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: allow "existing" populator to know this is
# a new path for the state:
# populator(state, dict_, row, new_path=True)
else:
# have already seen rows with this identity in this same path.
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: same path
# populator(state, dict_, row, new_path=False)
def _populate_partial(
context, row, state, dict_, isnew, load_path, unloaded, populators
):
if not isnew:
to_load = context.partials[state]
for key, populator in populators["existing"]:
if key in to_load:
populator(state, dict_, row)
else:
to_load = unloaded
context.partials[state] = to_load
for key, getter in populators["quick"]:
if key in to_load:
dict_[key] = getter(row)
for key, set_callable in populators["expire"]:
if key in to_load:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["eager"]:
if key not in unloaded:
populator(state, dict_, row)
return to_load
def _validate_version_id(mapper, state, dict_, row, adapter):
version_id_col = mapper.version_id_col
if version_id_col is None:
return
if adapter:
version_id_col = adapter.columns[version_id_col]
if (
mapper._get_state_attr_by_column(state, dict_, mapper.version_id_col)
!= row[version_id_col]
):
raise orm_exc.StaleDataError(
"Instance '%s' has version id '%s' which "
"does not match database-loaded version id '%s'."
% (
state_str(state),
mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col
),
row[version_id_col],
)
)
def _decorate_polymorphic_switch(
instance_fn,
context,
mapper,
result,
path,
polymorphic_discriminator,
adapter,
):
if polymorphic_discriminator is not None:
polymorphic_on = polymorphic_discriminator
else:
polymorphic_on = mapper.polymorphic_on
if polymorphic_on is None:
return instance_fn
if adapter:
polymorphic_on = adapter.columns[polymorphic_on]
def configure_subclass_mapper(discriminator):
try:
sub_mapper = mapper.polymorphic_map[discriminator]
except KeyError:
raise AssertionError(
"No such polymorphic_identity %r is defined" % discriminator
)
else:
if sub_mapper is mapper:
return None
return _instance_processor(
sub_mapper,
context,
result,
path,
adapter,
_polymorphic_from=mapper,
)
polymorphic_instances = util.PopulateDict(configure_subclass_mapper)
def polymorphic_instance(row):
discriminator = row[polymorphic_on]
if discriminator is not None:
_instance = polymorphic_instances[discriminator]
if _instance:
return _instance(row)
return instance_fn(row)
return polymorphic_instance
class PostLoad(object):
"""Track loaders and states for "post load" operations.
"""
__slots__ = "loaders", "states", "load_keys"
def __init__(self):
self.loaders = {}
self.states = util.OrderedDict()
self.load_keys = None
def add_state(self, state, overwrite):
# the states for a polymorphic load here are all shared
# within a single PostLoad object among multiple subtypes.
# Filtering of callables on a per-subclass basis needs to be done at
# the invocation level
self.states[state] = overwrite
def invoke(self, context, path):
if not self.states:
return
path = path_registry.PathRegistry.coerce(path)
for token, limit_to_mapper, loader, arg, kw in self.loaders.values():
states = [
(state, overwrite)
for state, overwrite in self.states.items()
if state.manager.mapper.isa(limit_to_mapper)
]
if states:
loader(context, path, states, self.load_keys, *arg, **kw)
self.states.clear()
@classmethod
def for_context(cls, context, path, only_load_props):
pl = context.post_load_paths.get(path.path)
if pl is not None and only_load_props:
pl.load_keys = only_load_props
return pl
@classmethod
def path_exists(self, context, path, key):
return (
path.path in context.post_load_paths
and key in context.post_load_paths[path.path].loaders
)
@classmethod
def callable_for_path(
cls, context, path, limit_to_mapper, token, loader_callable, *arg, **kw
):
if path.path in context.post_load_paths:
pl = context.post_load_paths[path.path]
else:
pl = context.post_load_paths[path.path] = PostLoad()
pl.loaders[token] = (token, limit_to_mapper, loader_callable, arg, kw)
def load_scalar_attributes(mapper, state, attribute_names):
"""initiate a column-based attribute refresh operation."""
# assert mapper is _state_mapper(state)
session = state.session
if not session:
raise orm_exc.DetachedInstanceError(
"Instance %s is not bound to a Session; "
"attribute refresh operation cannot proceed" % (state_str(state))
)
has_key = bool(state.key)
result = False
# in the case of inheritance, particularly concrete and abstract
# concrete inheritance, the class manager might have some keys
# of attributes on the superclass that we didn't actually map.
# These could be mapped as "concrete, dont load" or could be completely
# exluded from the mapping and we know nothing about them. Filter them
# here to prevent them from coming through.
if attribute_names:
attribute_names = attribute_names.intersection(mapper.attrs.keys())
if mapper.inherits and not mapper.concrete:
# because we are using Core to produce a select() that we
# pass to the Query, we aren't calling setup() for mapped
# attributes; in 1.0 this means deferred attrs won't get loaded
# by default
statement = mapper._optimized_get_statement(state, attribute_names)
if statement is not None:
result = load_on_ident(
session.query(mapper)
.options(strategy_options.Load(mapper).undefer("*"))
.from_statement(statement),
None,
only_load_props=attribute_names,
refresh_state=state,
)
if result is False:
if has_key:
identity_key = state.key
else:
# this codepath is rare - only valid when inside a flush, and the
# object is becoming persistent but hasn't yet been assigned
# an identity_key.
# check here to ensure we have the attrs we need.
pk_attrs = [
mapper._columntoproperty[col].key for col in mapper.primary_key
]
if state.expired_attributes.intersection(pk_attrs):
raise sa_exc.InvalidRequestError(
"Instance %s cannot be refreshed - it's not "
" persistent and does not "
"contain a full primary key." % state_str(state)
)
identity_key = mapper._identity_key_from_state(state)
if (
_none_set.issubset(identity_key) and not mapper.allow_partial_pks
) or _none_set.issuperset(identity_key):
util.warn_limited(
"Instance %s to be refreshed doesn't "
"contain a full primary key - can't be refreshed "
"(and shouldn't be expired, either).",
state_str(state),
)
return
result = load_on_ident(
session.query(mapper),
identity_key,
refresh_state=state,
only_load_props=attribute_names,
)
# if instance is pending, a refresh operation
# may not complete (even if PK attributes are assigned)
if has_key and result is None:
raise orm_exc.ObjectDeletedError(state)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/persistence.py
|
# orm/persistence.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`_orm.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
from itertools import chain
from itertools import groupby
import operator
from . import attributes
from . import evaluator
from . import exc as orm_exc
from . import loading
from . import sync
from .base import _entity_descriptor
from .base import state_str
from .. import exc as sa_exc
from .. import sql
from .. import util
from ..sql import expression
from ..sql.base import _from_objects
def _bulk_insert(
mapper,
mappings,
session_transaction,
isstates,
return_defaults,
render_nulls,
):
base_mapper = mapper.base_mapper
cached_connections = _cached_connection_dict(base_mapper)
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_insert()"
)
if isstates:
if return_defaults:
states = [(state, state.dict) for state in mappings]
mappings = [dict_ for (state, dict_) in states]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = (
(
None,
state_dict,
params,
mapper,
connection,
value_params,
has_all_pks,
has_all_defaults,
)
for (
state,
state_dict,
params,
mp,
conn,
value_params,
has_all_pks,
has_all_defaults,
) in _collect_insert_commands(
table,
((None, mapping, mapper, connection) for mapping in mappings),
bulk=True,
return_defaults=return_defaults,
render_nulls=render_nulls,
)
)
_emit_insert_statements(
base_mapper,
None,
cached_connections,
super_mapper,
table,
records,
bookkeeping=return_defaults,
)
if return_defaults and isstates:
identity_cls = mapper._identity_class
identity_props = [p.key for p in mapper._identity_key_props]
for state, dict_ in states:
state.key = (
identity_cls,
tuple([dict_[key] for key in identity_props]),
)
def _bulk_update(
mapper, mappings, session_transaction, isstates, update_changed_only
):
base_mapper = mapper.base_mapper
cached_connections = _cached_connection_dict(base_mapper)
search_keys = mapper._primary_key_propkeys
if mapper._version_id_prop:
search_keys = {mapper._version_id_prop.key}.union(search_keys)
def _changed_dict(mapper, state):
return dict(
(k, v)
for k, v in state.dict.items()
if k in state.committed_state or k in search_keys
)
if isstates:
if update_changed_only:
mappings = [_changed_dict(mapper, state) for state in mappings]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_update()"
)
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = _collect_update_commands(
None,
table,
(
(
None,
mapping,
mapper,
connection,
(
mapping[mapper._version_id_prop.key]
if mapper._version_id_prop
else None
),
)
for mapping in mappings
),
bulk=True,
)
_emit_update_statements(
base_mapper,
None,
cached_connections,
super_mapper,
table,
records,
bookkeeping=False,
)
def save_obj(base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(base_mapper, states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_update = []
states_to_insert = []
cached_connections = _cached_connection_dict(base_mapper)
for (
state,
dict_,
mapper,
connection,
has_identity,
row_switch,
update_version_id,
) in _organize_states_for_save(base_mapper, states, uowtransaction):
if has_identity or row_switch:
states_to_update.append(
(state, dict_, mapper, connection, update_version_id)
)
else:
states_to_insert.append((state, dict_, mapper, connection))
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
insert = _collect_insert_commands(table, states_to_insert)
update = _collect_update_commands(
uowtransaction, table, states_to_update
)
_emit_update_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
update,
)
_emit_insert_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
insert,
)
_finalize_insert_update_commands(
base_mapper,
uowtransaction,
chain(
(
(state, state_dict, mapper, connection, False)
for (state, state_dict, mapper, connection) in states_to_insert
),
(
(state, state_dict, mapper, connection, True)
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_update
),
),
)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = list(
_organize_states_for_post_update(base_mapper, states, uowtransaction)
)
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
update = (
(
state,
state_dict,
sub_mapper,
connection,
mapper._get_committed_state_attr_by_column(
state, state_dict, mapper.version_id_col
)
if mapper.version_id_col is not None
else None,
)
for state, state_dict, sub_mapper, connection in states_to_update
if table in sub_mapper._pks_by_table
)
update = _collect_post_update_commands(
base_mapper, uowtransaction, table, update, post_update_cols
)
_emit_post_update_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
update,
)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = list(
_organize_states_for_delete(base_mapper, states, uowtransaction)
)
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
mapper = table_to_mapper[table]
if table not in mapper._pks_by_table:
continue
elif mapper.inherits and mapper.passive_deletes:
continue
delete = _collect_delete_commands(
base_mapper, uowtransaction, table, states_to_delete
)
_emit_delete_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
delete,
)
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction, states
):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = update_version_id = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
if mapper._validate_polymorphic_identity:
mapper._validate_polymorphic_identity(mapper, state, dict_)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if (
not has_identity
and instance_key in uowtransaction.session.identity_map
):
instance = uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.was_already_deleted(existing):
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s"
% (state_str(state), instance_key, state_str(existing))
)
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction",
instance_key,
state_str(state),
state_str(existing),
)
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if (has_identity or row_switch) and mapper.version_id_col is not None:
update_version_id = mapper._get_committed_state_attr_by_column(
row_switch if row_switch else state,
row_switch.dict if row_switch else dict_,
mapper.version_id_col,
)
yield (
state,
dict_,
mapper,
connection,
has_identity,
row_switch,
update_version_id,
)
def _organize_states_for_post_update(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return _connections_for_states(base_mapper, uowtransaction, states)
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction, states
):
mapper.dispatch.before_delete(mapper, connection, state)
if mapper.version_id_col is not None:
update_version_id = mapper._get_committed_state_attr_by_column(
state, dict_, mapper.version_id_col
)
else:
update_version_id = None
yield (state, dict_, mapper, connection, update_version_id)
def _collect_insert_commands(
table,
states_to_insert,
bulk=False,
return_defaults=False,
render_nulls=False,
):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
for state, state_dict, mapper, connection in states_to_insert:
if table not in mapper._pks_by_table:
continue
params = {}
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
eval_none = mapper._insert_cols_evaluating_none[table]
for propkey in set(propkey_to_col).intersection(state_dict):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if value is None and col not in eval_none and not render_nulls:
continue
elif not bulk and (
hasattr(value, "__clause_element__")
or isinstance(value, sql.ClauseElement)
):
value_params[col] = (
value.__clause_element__()
if hasattr(value, "__clause_element__")
else value
)
else:
params[col.key] = value
if not bulk:
# for all the columns that have no default and we don't have
# a value and where "None" is not a special value, add
# explicit None to the INSERT. This is a legacy behavior
# which might be worth removing, as it should not be necessary
# and also produces confusion, given that "missing" and None
# now have distinct meanings
for colkey in (
mapper._insert_cols_as_none[table]
.difference(params)
.difference([c.key for c in value_params])
):
params[colkey] = None
if not bulk or return_defaults:
# params are in terms of Column key objects, so
# compare to pk_keys_by_table
has_all_pks = mapper._pk_keys_by_table[table].issubset(params)
if mapper.base_mapper.eager_defaults:
has_all_defaults = mapper._server_default_cols[table].issubset(
params
)
else:
has_all_defaults = True
else:
has_all_defaults = has_all_pks = True
if (
mapper.version_id_generator is not False
and mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
params[mapper.version_id_col.key] = mapper.version_id_generator(
None
)
yield (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_pks,
has_all_defaults,
)
def _collect_update_commands(
uowtransaction, table, states_to_update, bulk=False
):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
if bulk:
# keys here are mapped attribute keys, so
# look at mapper attribute keys for pk
params = dict(
(propkey_to_col[propkey].key, state_dict[propkey])
for propkey in set(propkey_to_col)
.intersection(state_dict)
.difference(mapper._pk_attr_keys_by_table[table])
)
has_all_defaults = True
else:
params = {}
for propkey in set(propkey_to_col).intersection(
state.committed_state
):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if hasattr(value, "__clause_element__") or isinstance(
value, sql.ClauseElement
):
value_params[col] = (
value.__clause_element__()
if hasattr(value, "__clause_element__")
else value
)
# guard against values that generate non-__nonzero__
# objects for __eq__()
elif (
state.manager[propkey].impl.is_equal(
value, state.committed_state[propkey]
)
is not True
):
params[col.key] = value
if mapper.base_mapper.eager_defaults:
has_all_defaults = (
mapper._server_onupdate_default_cols[table]
).issubset(params)
else:
has_all_defaults = True
if (
update_version_id is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
if not bulk and not (params or value_params):
# HACK: check for history in other tables, in case the
# history is only in a different table than the one
# where the version_id_col is. This logic was lost
# from 0.9 -> 1.0.0 and restored in 1.0.6.
for prop in mapper._columntoproperty.values():
history = state.manager[prop.key].impl.get_history(
state, state_dict, attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
break
else:
# no net change, break
continue
col = mapper.version_id_col
no_params = not params and not value_params
params[col._label] = update_version_id
if (
bulk or col.key not in params
) and mapper.version_id_generator is not False:
val = mapper.version_id_generator(update_version_id)
params[col.key] = val
elif mapper.version_id_generator is False and no_params:
# no version id generator, no values set on the table,
# and version id wasn't manually incremented.
# set version id to itself so we get an UPDATE
# statement
params[col.key] = update_version_id
elif not (params or value_params):
continue
has_all_pks = True
expect_pk_cascaded = False
if bulk:
# keys here are mapped attribute keys, so
# look at mapper attribute keys for pk
pk_params = dict(
(propkey_to_col[propkey]._label, state_dict.get(propkey))
for propkey in set(propkey_to_col).intersection(
mapper._pk_attr_keys_by_table[table]
)
)
else:
pk_params = {}
for col in pks:
propkey = mapper._columntoproperty[col].key
history = state.manager[propkey].impl.get_history(
state, state_dict, attributes.PASSIVE_OFF
)
if history.added:
if (
not history.deleted
or ("pk_cascaded", state, col)
in uowtransaction.attributes
):
expect_pk_cascaded = True
pk_params[col._label] = history.added[0]
params.pop(col.key, None)
else:
# else, use the old value to locate the row
pk_params[col._label] = history.deleted[0]
if col in value_params:
has_all_pks = False
else:
pk_params[col._label] = history.unchanged[0]
if pk_params[col._label] is None:
raise orm_exc.FlushError(
"Can't update table %s using NULL for primary "
"key value on column %s" % (table, col)
)
if params or value_params:
params.update(pk_params)
yield (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
)
elif expect_pk_cascaded:
# no UPDATE occurs on this table, but we expect that CASCADE rules
# have changed the primary key of the row; propagate this event to
# other columns that expect to have been modified. this normally
# occurs after the UPDATE is emitted however we invoke it here
# explicitly in the absence of our invoking an UPDATE
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(
state,
m,
state,
m,
equated_pairs,
uowtransaction,
mapper.passive_updates,
)
def _collect_post_update_commands(
base_mapper, uowtransaction, table, states_to_update, post_update_cols
):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_update:
# assert table in mapper._pks_by_table
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = mapper._get_state_attr_by_column(
state, state_dict, col, passive=attributes.PASSIVE_OFF
)
elif col in post_update_cols or col.onupdate is not None:
prop = mapper._columntoproperty[col]
history = state.manager[prop.key].impl.get_history(
state, state_dict, attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
if (
update_version_id is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
col = mapper.version_id_col
params[col._label] = update_version_id
if (
bool(state.key)
and col.key not in params
and mapper.version_id_generator is not False
):
val = mapper.version_id_generator(update_version_id)
params[col.key] = val
yield state, state_dict, mapper, connection, params
def _collect_delete_commands(
base_mapper, uowtransaction, table, states_to_delete
):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_delete:
if table not in mapper._pks_by_table:
continue
params = {}
for col in mapper._pks_by_table[table]:
params[
col.key
] = value = mapper._get_committed_state_attr_by_column(
state, state_dict, col
)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table %s "
"using NULL for primary "
"key value on column %s" % (table, col)
)
if (
update_version_id is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
params[mapper.version_id_col.key] = update_version_id
yield params, connection
def _emit_update_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
update,
bookkeeping=True,
):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
)
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col._label, type_=col.type)
)
if needs_version_id:
clause.clauses.append(
mapper.version_id_col
== sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type,
)
)
stmt = table.update(clause)
return stmt
cached_stmt = base_mapper._memo(("update", table), update_stmt)
for (
(connection, paramkeys, hasvalue, has_all_defaults, has_all_pks),
records,
) in groupby(
update,
lambda rec: (
rec[4], # connection
set(rec[2]), # set of parameter keys
bool(rec[5]), # whether or not we have "value" parameters
rec[6], # has_all_defaults
rec[7], # has all pks
),
):
rows = 0
records = list(records)
statement = cached_stmt
return_defaults = False
if not has_all_pks:
statement = statement.return_defaults()
return_defaults = True
elif (
bookkeeping
and not has_all_defaults
and mapper.base_mapper.eager_defaults
):
statement = statement.return_defaults()
return_defaults = True
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
return_defaults = True
assert_singlerow = (
connection.dialect.supports_sane_rowcount
if not return_defaults
else connection.dialect.supports_sane_rowcount_returning
)
assert_multirow = (
assert_singlerow
and connection.dialect.supports_sane_multi_rowcount
)
allow_multirow = has_all_defaults and not needs_version_id
if hasvalue:
for (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
) in records:
c = connection.execute(statement.values(value_params), params)
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params,
True,
)
rows += c.rowcount
check_rowcount = assert_singlerow
else:
if not allow_multirow:
check_rowcount = assert_singlerow
for (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
) in records:
c = cached_connections[connection].execute(
statement, params
)
# TODO: why with bookkeeping=False?
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params,
True,
)
rows += c.rowcount
else:
multiparams = [rec[2] for rec in records]
check_rowcount = assert_multirow or (
assert_singlerow and len(multiparams) == 1
)
c = cached_connections[connection].execute(
statement, multiparams
)
rows += c.rowcount
for (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
) in records:
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params,
True,
)
if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched."
% (table.description, len(records), rows)
)
elif needs_version_id:
util.warn(
"Dialect %s does not support updated rowcount "
"- versioning cannot be verified."
% c.dialect.dialect_description
)
def _emit_insert_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
insert,
bookkeeping=True,
):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
cached_stmt = base_mapper._memo(("insert", table), table.insert)
for (
(connection, pkeys, hasvalue, has_all_pks, has_all_defaults),
records,
) in groupby(
insert,
lambda rec: (
rec[4], # connection
set(rec[2]), # parameter keys
bool(rec[5]), # whether we have "value" parameters
rec[6],
rec[7],
),
):
statement = cached_stmt
if (
not bookkeeping
or (
has_all_defaults
or not base_mapper.eager_defaults
or not connection.dialect.implicit_returning
)
and has_all_pks
and not hasvalue
):
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].execute(statement, multiparams)
if bookkeeping:
for (
(
state,
state_dict,
params,
mapper_rec,
conn,
value_params,
has_all_pks,
has_all_defaults,
),
last_inserted_params,
) in zip(records, c.context.compiled_parameters):
if state:
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
last_inserted_params,
value_params,
False,
)
else:
_postfetch_bulk_save(mapper_rec, state_dict, table)
else:
if not has_all_defaults and base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
for (
state,
state_dict,
params,
mapper_rec,
connection,
value_params,
has_all_pks,
has_all_defaults,
) in records:
if value_params:
result = connection.execute(
statement.values(value_params), params
)
else:
result = cached_connections[connection].execute(
statement, params
)
primary_key = result.context.inserted_primary_key
if primary_key is not None:
# set primary key attributes
for pk, col in zip(
primary_key, mapper._pks_by_table[table]
):
prop = mapper_rec._columntoproperty[col]
if pk is not None and (
col in value_params
or state_dict.get(prop.key) is None
):
state_dict[prop.key] = pk
if bookkeeping:
if state:
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
result,
result.context.compiled_parameters[0],
value_params,
False,
)
else:
_postfetch_bulk_save(mapper_rec, state_dict, table)
def _emit_post_update_statements(
base_mapper, uowtransaction, cached_connections, mapper, table, update
):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
needs_version_id = (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
)
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col._label, type_=col.type)
)
if needs_version_id:
clause.clauses.append(
mapper.version_id_col
== sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type,
)
)
stmt = table.update(clause)
if mapper.version_id_col is not None:
stmt = stmt.return_defaults(mapper.version_id_col)
return stmt
statement = base_mapper._memo(("post_update", table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, records in groupby(
update,
lambda rec: (rec[3], set(rec[4])), # connection # parameter keys
):
rows = 0
records = list(records)
connection = key[0]
assert_singlerow = (
connection.dialect.supports_sane_rowcount
if mapper.version_id_col is None
else connection.dialect.supports_sane_rowcount_returning
)
assert_multirow = (
assert_singlerow
and connection.dialect.supports_sane_multi_rowcount
)
allow_multirow = not needs_version_id or assert_multirow
if not allow_multirow:
check_rowcount = assert_singlerow
for state, state_dict, mapper_rec, connection, params in records:
c = cached_connections[connection].execute(statement, params)
_postfetch_post_update(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
)
rows += c.rowcount
else:
multiparams = [
params
for state, state_dict, mapper_rec, conn, params in records
]
check_rowcount = assert_multirow or (
assert_singlerow and len(multiparams) == 1
)
c = cached_connections[connection].execute(statement, multiparams)
rows += c.rowcount
for state, state_dict, mapper_rec, connection, params in records:
_postfetch_post_update(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
)
if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched."
% (table.description, len(records), rows)
)
elif needs_version_id:
util.warn(
"Dialect %s does not support updated rowcount "
"- versioning cannot be verified."
% c.dialect.dialect_description
)
def _emit_delete_statements(
base_mapper, uowtransaction, cached_connections, mapper, table, delete
):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
)
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type)
)
if need_version_id:
clause.clauses.append(
mapper.version_id_col
== sql.bindparam(
mapper.version_id_col.key, type_=mapper.version_id_col.type
)
)
return table.delete(clause)
statement = base_mapper._memo(("delete", table), delete_stmt)
for connection, recs in groupby(delete, lambda rec: rec[1]): # connection
del_objects = [params for params, connection in recs]
connection = cached_connections[connection]
expected = len(del_objects)
rows_matched = -1
only_warn = False
if (
need_version_id
and not connection.dialect.supports_sane_multi_rowcount
):
if connection.dialect.supports_sane_rowcount:
rows_matched = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows_matched += c.rowcount
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified."
% connection.dialect.dialect_description
)
connection.execute(statement, del_objects)
else:
c = connection.execute(statement, del_objects)
if not need_version_id:
only_warn = True
rows_matched = c.rowcount
if (
base_mapper.confirm_deleted_rows
and rows_matched > -1
and expected != rows_matched
and (
connection.dialect.supports_sane_multi_rowcount
or len(del_objects) == 1
)
):
# TODO: why does this "only warn" if versioning is turned off,
# whereas the UPDATE raises?
if only_warn:
util.warn(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning."
% (table.description, expected, rows_matched)
)
else:
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning."
% (table.description, expected, rows_matched)
)
def _finalize_insert_update_commands(base_mapper, uowtransaction, states):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity in states:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[
p.key
for p in mapper._readonly_props
if (
p.expire_on_flush
and (not p.deferred or p.key in state.dict)
)
or (
not p.expire_on_flush
and not p.deferred
and p.key not in state.dict
)
]
)
if readonly:
state._expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled, load
# all expired cols. Else if we have a version_id_col, make sure
# it isn't expired.
toload_now = []
if base_mapper.eager_defaults:
toload_now.extend(
state._unloaded_non_object.intersection(
mapper._server_default_plus_onupdate_propkeys
)
)
if (
mapper.version_id_col is not None
and mapper.version_id_generator is False
):
if mapper._version_id_prop.key in state.unloaded:
toload_now.extend([mapper._version_id_prop.key])
if toload_now:
state.key = base_mapper._identity_key_from_state(state)
loading.load_on_ident(
uowtransaction.session.query(mapper),
state.key,
refresh_state=state,
only_load_props=toload_now,
)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
if (
mapper.version_id_generator is False
and mapper.version_id_col is not None
):
if state_dict[mapper._version_id_prop.key] is None:
raise orm_exc.FlushError(
"Instance does not contain a non-NULL version value"
)
def _postfetch_post_update(
mapper, uowtransaction, table, state, dict_, result, params
):
if uowtransaction.is_deleted(state):
return
prefetch_cols = result.context.compiled.prefetch
postfetch_cols = result.context.compiled.postfetch
if (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush)
if refresh_flush:
load_evt_attrs = []
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
dict_[mapper._columntoproperty[c].key] = params[c.key]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[c].key)
if refresh_flush and load_evt_attrs:
mapper.class_manager.dispatch.refresh_flush(
state, uowtransaction, load_evt_attrs
)
if postfetch_cols:
state._expire_attributes(
state.dict,
[
mapper._columntoproperty[c].key
for c in postfetch_cols
if c in mapper._columntoproperty
],
)
def _postfetch(
mapper,
uowtransaction,
table,
state,
dict_,
result,
params,
value_params,
isupdate,
):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
prefetch_cols = result.context.compiled.prefetch
postfetch_cols = result.context.compiled.postfetch
returning_cols = result.context.compiled.returning
if (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush)
if refresh_flush:
load_evt_attrs = []
if returning_cols:
row = result.context.returned_defaults
if row is not None:
for col in returning_cols:
# pk cols returned from insert are handled
# distinctly, don't step on the values here
if col.primary_key and result.context.isinsert:
continue
# note that columns can be in the "return defaults" that are
# not mapped to this mapper, typically because they are
# "excluded", which can be specified directly or also occurs
# when using declarative w/ single table inheritance
prop = mapper._columntoproperty.get(col)
if prop:
dict_[prop.key] = row[col]
if refresh_flush:
load_evt_attrs.append(prop.key)
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
dict_[mapper._columntoproperty[c].key] = params[c.key]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[c].key)
if refresh_flush and load_evt_attrs:
mapper.class_manager.dispatch.refresh_flush(
state, uowtransaction, load_evt_attrs
)
if isupdate and value_params:
# explicitly suit the use case specified by
# [ticket:3801], PK SQL expressions for UPDATE on non-RETURNING
# database which are set to themselves in order to do a version bump.
postfetch_cols.extend(
[
col
for col in value_params
if col.primary_key and col not in returning_cols
]
)
if postfetch_cols:
state._expire_attributes(
state.dict,
[
mapper._columntoproperty[c].key
for c in postfetch_cols
if c in mapper._columntoproperty
],
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(
state,
m,
state,
m,
equated_pairs,
uowtransaction,
mapper.passive_updates,
)
def _postfetch_bulk_save(mapper, dict_, table):
for m, equated_pairs in mapper._table_to_equated[table]:
sync.bulk_populate_inherit_keys(dict_, m, equated_pairs)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = uowtransaction.session.connection_callable
else:
connection = uowtransaction.transaction.connection(base_mapper)
connection_callable = None
for state in _sort_states(base_mapper, states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
mapper = state.manager.mapper
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn: conn.execution_options(
compiled_cache=base_mapper._compiled_cache
)
)
def _sort_states(mapper, states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
try:
persistent_sorted = sorted(
persistent, key=mapper._persistent_sortkey_fn
)
except TypeError as err:
util.raise_(
sa_exc.InvalidRequestError(
"Could not sort objects by primary key; primary key "
"values must be sortable in Python (was: %s)" % err
),
replace_context=err,
)
return (
sorted(pending, key=operator.attrgetter("insert_order"))
+ persistent_sorted
)
class BulkUD(object):
"""Handle bulk update and deletes via a :class:`_query.Query`."""
def __init__(self, query):
self.query = query.enable_eagerloads(False)
self.mapper = self.query._bind_mapper()
self._validate_query_state()
def _validate_query_state(self):
for attr, methname, notset, op in (
("_limit", "limit()", None, operator.is_),
("_offset", "offset()", None, operator.is_),
("_order_by", "order_by()", False, operator.is_),
("_group_by", "group_by()", False, operator.is_),
("_distinct", "distinct()", False, operator.is_),
(
"_from_obj",
"join(), outerjoin(), select_from(), or from_self()",
(),
operator.eq,
),
):
if not op(getattr(self.query, attr), notset):
raise sa_exc.InvalidRequestError(
"Can't call Query.update() or Query.delete() "
"when %s has been called" % (methname,)
)
@property
def session(self):
return self.query.session
@classmethod
def _factory(cls, lookup, synchronize_session, *arg):
try:
klass = lookup[synchronize_session]
except KeyError as err:
util.raise_(
sa_exc.ArgumentError(
"Valid strategies for session synchronization "
"are %s" % (", ".join(sorted(repr(x) for x in lookup)))
),
replace_context=err,
)
else:
return klass(*arg)
def exec_(self):
self._do_before_compile()
self._do_pre()
self._do_pre_synchronize()
self._do_exec()
self._do_post_synchronize()
self._do_post()
def _execute_stmt(self, stmt):
self.result = self.query._execute_crud(stmt, self.mapper)
self.rowcount = self.result.rowcount
def _do_before_compile(self):
raise NotImplementedError()
@util.dependencies("sqlalchemy.orm.query")
def _do_pre(self, querylib):
query = self.query
self.context = querylib.QueryContext(query)
if isinstance(query._entities[0], querylib._ColumnEntity):
# check for special case of query(table)
tables = set()
for ent in query._entities:
if not isinstance(ent, querylib._ColumnEntity):
tables.clear()
break
else:
tables.update(_from_objects(ent.column))
if len(tables) != 1:
raise sa_exc.InvalidRequestError(
"This operation requires only one Table or "
"entity be specified as the target."
)
else:
self.primary_table = tables.pop()
else:
self.primary_table = query._only_entity_zero(
"This operation requires only one Table or "
"entity be specified as the target."
).mapper.local_table
session = query.session
if query._autoflush:
session._autoflush()
def _do_pre_synchronize(self):
pass
def _do_post_synchronize(self):
pass
class BulkEvaluate(BulkUD):
"""BulkUD which does the 'evaluate' method of session state resolution."""
def _additional_evaluators(self, evaluator_compiler):
pass
def _do_pre_synchronize(self):
query = self.query
target_cls = query._mapper_zero().class_
try:
evaluator_compiler = evaluator.EvaluatorCompiler(target_cls)
if query.whereclause is not None:
eval_condition = evaluator_compiler.process(query.whereclause)
else:
def eval_condition(obj):
return True
self._additional_evaluators(evaluator_compiler)
except evaluator.UnevaluatableError as err:
util.raise_(
sa_exc.InvalidRequestError(
'Could not evaluate current criteria in Python: "%s". '
"Specify 'fetch' or False for the "
"synchronize_session parameter." % err
),
from_=err,
)
# TODO: detect when the where clause is a trivial primary key match
self.matched_objects = [
obj
for (
cls,
pk,
identity_token,
), obj in query.session.identity_map.items()
if issubclass(cls, target_cls) and eval_condition(obj)
]
class BulkFetch(BulkUD):
"""BulkUD which does the 'fetch' method of session state resolution."""
def _do_pre_synchronize(self):
query = self.query
session = query.session
context = query._compile_context()
select_stmt = context.statement.with_only_columns(
self.primary_table.primary_key
)
self.matched_rows = session.execute(
select_stmt, mapper=self.mapper, params=query._params
).fetchall()
class BulkUpdate(BulkUD):
"""BulkUD which handles UPDATEs."""
def __init__(self, query, values, update_kwargs):
super(BulkUpdate, self).__init__(query)
self.values = values
self.update_kwargs = update_kwargs
@classmethod
def factory(cls, query, synchronize_session, values, update_kwargs):
return BulkUD._factory(
{
"evaluate": BulkUpdateEvaluate,
"fetch": BulkUpdateFetch,
False: BulkUpdate,
},
synchronize_session,
query,
values,
update_kwargs,
)
def _do_before_compile(self):
if self.query.dispatch.before_compile_update:
for fn in self.query.dispatch.before_compile_update:
new_query = fn(self.query, self)
if new_query is not None:
self.query = new_query
@property
def _resolved_values(self):
values = []
for k, v in (
self.values.items()
if hasattr(self.values, "items")
else self.values
):
if self.mapper:
if isinstance(k, util.string_types):
desc = _entity_descriptor(self.mapper, k)
values.extend(desc._bulk_update_tuples(v))
elif isinstance(k, attributes.QueryableAttribute):
values.extend(k._bulk_update_tuples(v))
else:
values.append((k, v))
else:
values.append((k, v))
return values
@property
def _resolved_values_keys_as_propnames(self):
values = []
for k, v in self._resolved_values:
if isinstance(k, attributes.QueryableAttribute):
values.append((k.key, v))
continue
elif hasattr(k, "__clause_element__"):
k = k.__clause_element__()
if self.mapper and isinstance(k, expression.ColumnElement):
try:
attr = self.mapper._columntoproperty[k]
except orm_exc.UnmappedColumnError:
pass
else:
values.append((attr.key, v))
else:
raise sa_exc.InvalidRequestError(
"Invalid expression type: %r" % k
)
return values
def _do_exec(self):
values = self._resolved_values
if not self.update_kwargs.get("preserve_parameter_order", False):
values = dict(values)
update_stmt = sql.update(
self.primary_table,
self.context.whereclause,
values,
**self.update_kwargs
)
self._execute_stmt(update_stmt)
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_update(self)
class BulkDelete(BulkUD):
"""BulkUD which handles DELETEs."""
def __init__(self, query):
super(BulkDelete, self).__init__(query)
@classmethod
def factory(cls, query, synchronize_session):
return BulkUD._factory(
{
"evaluate": BulkDeleteEvaluate,
"fetch": BulkDeleteFetch,
False: BulkDelete,
},
synchronize_session,
query,
)
def _do_before_compile(self):
if self.query.dispatch.before_compile_delete:
for fn in self.query.dispatch.before_compile_delete:
new_query = fn(self.query, self)
if new_query is not None:
self.query = new_query
def _do_exec(self):
delete_stmt = sql.delete(self.primary_table, self.context.whereclause)
self._execute_stmt(delete_stmt)
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_delete(self)
class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
"""BulkUD which handles UPDATEs using the "evaluate"
method of session resolution."""
def _additional_evaluators(self, evaluator_compiler):
self.value_evaluators = {}
values = self._resolved_values_keys_as_propnames
for key, value in values:
self.value_evaluators[key] = evaluator_compiler.process(
expression._literal_as_binds(value)
)
def _do_post_synchronize(self):
session = self.query.session
states = set()
evaluated_keys = list(self.value_evaluators.keys())
for obj in self.matched_objects:
state, dict_ = (
attributes.instance_state(obj),
attributes.instance_dict(obj),
)
# only evaluate unmodified attributes
to_evaluate = state.unmodified.intersection(evaluated_keys)
for key in to_evaluate:
dict_[key] = self.value_evaluators[key](obj)
state.manager.dispatch.refresh(state, None, to_evaluate)
state._commit(dict_, list(to_evaluate))
# expire attributes with pending changes
# (there was no autoflush, so they are overwritten)
state._expire_attributes(
dict_, set(evaluated_keys).difference(to_evaluate)
)
states.add(state)
session._register_altered(states)
class BulkDeleteEvaluate(BulkEvaluate, BulkDelete):
"""BulkUD which handles DELETEs using the "evaluate"
method of session resolution."""
def _do_post_synchronize(self):
self.query.session._remove_newly_deleted(
[attributes.instance_state(obj) for obj in self.matched_objects]
)
class BulkUpdateFetch(BulkFetch, BulkUpdate):
"""BulkUD which handles UPDATEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
states = set(
[
attributes.instance_state(session.identity_map[identity_key])
for identity_key in [
target_mapper.identity_key_from_primary_key(
list(primary_key)
)
for primary_key in self.matched_rows
]
if identity_key in session.identity_map
]
)
values = self._resolved_values_keys_as_propnames
attrib = set(k for k, v in values)
for state in states:
to_expire = attrib.intersection(state.dict)
if to_expire:
session._expire_state(state, to_expire)
session._register_altered(states)
class BulkDeleteFetch(BulkFetch, BulkDelete):
"""BulkUD which handles DELETEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
for primary_key in self.matched_rows:
# TODO: inline this and call remove_newly_deleted
# once
identity_key = target_mapper.identity_key_from_primary_key(
list(primary_key)
)
if identity_key in session.identity_map:
session._remove_newly_deleted(
[
attributes.instance_state(
session.identity_map[identity_key]
)
]
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/path_registry.py
|
# orm/path_registry.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Path tracking utilities, representing mapper graph traversals.
"""
from itertools import chain
import logging
from .base import class_mapper
from .. import exc
from .. import inspection
from .. import util
log = logging.getLogger(__name__)
def _unreduce_path(path):
return PathRegistry.deserialize(path)
_WILDCARD_TOKEN = "*"
_DEFAULT_TOKEN = "_sa_default"
class PathRegistry(object):
"""Represent query load paths and registry functions.
Basically represents structures like:
(<User mapper>, "orders", <Order mapper>, "items", <Item mapper>)
These structures are generated by things like
query options (joinedload(), subqueryload(), etc.) and are
used to compose keys stored in the query._attributes dictionary
for various options.
They are then re-composed at query compile/result row time as
the query is formed and as rows are fetched, where they again
serve to compose keys to look up options in the context.attributes
dictionary, which is copied from query._attributes.
The path structure has a limited amount of caching, where each
"root" ultimately pulls from a fixed registry associated with
the first mapper, that also contains elements for each of its
property keys. However paths longer than two elements, which
are the exception rather than the rule, are generated on an
as-needed basis.
"""
__slots__ = ()
is_token = False
is_root = False
def __eq__(self, other):
try:
return other is not None and self.path == other.path
except AttributeError:
util.warn(
"Comparison of PathRegistry to %r is not supported"
% (type(other))
)
return False
def __ne__(self, other):
try:
return other is None or self.path != other.path
except AttributeError:
util.warn(
"Comparison of PathRegistry to %r is not supported"
% (type(other))
)
return True
def set(self, attributes, key, value):
log.debug("set '%s' on path '%s' to '%s'", key, self, value)
attributes[(key, self.natural_path)] = value
def setdefault(self, attributes, key, value):
log.debug("setdefault '%s' on path '%s' to '%s'", key, self, value)
attributes.setdefault((key, self.natural_path), value)
def get(self, attributes, key, value=None):
key = (key, self.natural_path)
if key in attributes:
return attributes[key]
else:
return value
def __len__(self):
return len(self.path)
@property
def length(self):
return len(self.path)
def pairs(self):
path = self.path
for i in range(0, len(path), 2):
yield path[i], path[i + 1]
def contains_mapper(self, mapper):
for path_mapper in [self.path[i] for i in range(0, len(self.path), 2)]:
if path_mapper.is_mapper and path_mapper.isa(mapper):
return True
else:
return False
def contains(self, attributes, key):
return (key, self.path) in attributes
def __reduce__(self):
return _unreduce_path, (self.serialize(),)
@classmethod
def _serialize_path(cls, path):
return list(
zip(
[m.class_ for m in [path[i] for i in range(0, len(path), 2)]],
[path[i].key for i in range(1, len(path), 2)] + [None],
)
)
@classmethod
def _deserialize_path(cls, path):
p = tuple(
chain(
*[
(
class_mapper(mcls),
class_mapper(mcls).attrs[key]
if key is not None
else None,
)
for mcls, key in path
]
)
)
if p and p[-1] is None:
p = p[0:-1]
return p
@classmethod
def serialize_context_dict(cls, dict_, tokens):
return [
((key, cls._serialize_path(path)), value)
for (key, path), value in [
(k, v)
for k, v in dict_.items()
if isinstance(k, tuple) and k[0] in tokens
]
]
@classmethod
def deserialize_context_dict(cls, serialized):
return util.OrderedDict(
((key, tuple(cls._deserialize_path(path))), value)
for (key, path), value in serialized
)
def serialize(self):
path = self.path
return self._serialize_path(path)
@classmethod
def deserialize(cls, path):
if path is None:
return None
p = cls._deserialize_path(path)
return cls.coerce(p)
@classmethod
def per_mapper(cls, mapper):
if mapper.is_mapper:
return CachingEntityRegistry(cls.root, mapper)
else:
return SlotsEntityRegistry(cls.root, mapper)
@classmethod
def coerce(cls, raw):
return util.reduce(lambda prev, next: prev[next], raw, cls.root)
def token(self, token):
if token.endswith(":" + _WILDCARD_TOKEN):
return TokenRegistry(self, token)
elif token.endswith(":" + _DEFAULT_TOKEN):
return TokenRegistry(self.root, token)
else:
raise exc.ArgumentError("invalid token: %s" % token)
def __add__(self, other):
return util.reduce(lambda prev, next: prev[next], other.path, self)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.path)
class RootRegistry(PathRegistry):
"""Root registry, defers to mappers so that
paths are maintained per-root-mapper.
"""
path = natural_path = ()
has_entity = False
is_aliased_class = False
is_root = True
def __getitem__(self, entity):
return entity._path_registry
PathRegistry.root = RootRegistry()
class TokenRegistry(PathRegistry):
__slots__ = ("token", "parent", "path", "natural_path")
def __init__(self, parent, token):
self.token = token
self.parent = parent
self.path = parent.path + (token,)
self.natural_path = parent.natural_path + (token,)
has_entity = False
is_token = True
def generate_for_superclasses(self):
if not self.parent.is_aliased_class and not self.parent.is_root:
for ent in self.parent.mapper.iterate_to_root():
yield TokenRegistry(self.parent.parent[ent], self.token)
elif (
self.parent.is_aliased_class
and self.parent.entity._is_with_polymorphic
):
yield self
for ent in self.parent.entity._with_polymorphic_entities:
yield TokenRegistry(self.parent.parent[ent], self.token)
else:
yield self
def __getitem__(self, entity):
raise NotImplementedError()
class PropRegistry(PathRegistry):
is_unnatural = False
def __init__(self, parent, prop):
# restate this path in terms of the
# given MapperProperty's parent.
insp = inspection.inspect(parent[-1])
natural_parent = parent
if not insp.is_aliased_class or insp._use_mapper_path:
parent = natural_parent = parent.parent[prop.parent]
elif (
insp.is_aliased_class
and insp.with_polymorphic_mappers
and prop.parent in insp.with_polymorphic_mappers
):
subclass_entity = parent[-1]._entity_for_mapper(prop.parent)
parent = parent.parent[subclass_entity]
# when building a path where with_polymorphic() is in use,
# special logic to determine the "natural path" when subclass
# entities are used.
#
# here we are trying to distinguish between a path that starts
# on a the with_polymorhpic entity vs. one that starts on a
# normal entity that introduces a with_polymorphic() in the
# middle using of_type():
#
# # as in test_polymorphic_rel->
# # test_subqueryload_on_subclass_uses_path_correctly
# wp = with_polymorphic(RegularEntity, "*")
# sess.query(wp).options(someload(wp.SomeSubEntity.foos))
#
# vs
#
# # as in test_relationship->JoinedloadWPolyOfTypeContinued
# wp = with_polymorphic(SomeFoo, "*")
# sess.query(RegularEntity).options(
# someload(RegularEntity.foos.of_type(wp))
# .someload(wp.SubFoo.bar)
# )
#
# in the former case, the Query as it generates a path that we
# want to match will be in terms of the with_polymorphic at the
# beginning. in the latter case, Query will generate simple
# paths that don't know about this with_polymorphic, so we must
# use a separate natural path.
#
#
if parent.parent:
natural_parent = parent.parent[subclass_entity.mapper]
self.is_unnatural = True
else:
natural_parent = parent
elif (
natural_parent.parent
and insp.is_aliased_class
and prop.parent # this should always be the case here
is not insp.mapper
and insp.mapper.isa(prop.parent)
):
natural_parent = parent.parent[prop.parent]
self.prop = prop
self.parent = parent
self.path = parent.path + (prop,)
self.natural_path = natural_parent.natural_path + (prop,)
self._wildcard_path_loader_key = (
"loader",
parent.path + self.prop._wildcard_token,
)
self._default_path_loader_key = self.prop._default_path_loader_key
self._loader_key = ("loader", self.path)
def __str__(self):
return " -> ".join(str(elem) for elem in self.path)
@util.memoized_property
def has_entity(self):
return hasattr(self.prop, "mapper")
@util.memoized_property
def entity(self):
return self.prop.mapper
@property
def mapper(self):
return self.entity
@property
def entity_path(self):
return self[self.entity]
def __getitem__(self, entity):
if isinstance(entity, (int, slice)):
return self.path[entity]
else:
return SlotsEntityRegistry(self, entity)
class AbstractEntityRegistry(PathRegistry):
__slots__ = ()
has_entity = True
def __init__(self, parent, entity):
self.key = entity
self.parent = parent
self.is_aliased_class = entity.is_aliased_class
self.entity = entity
self.path = parent.path + (entity,)
# the "natural path" is the path that we get when Query is traversing
# from the lead entities into the various relationships; it corresponds
# to the structure of mappers and relationships. when we are given a
# path that comes from loader options, as of 1.3 it can have ac-hoc
# with_polymorphic() and other AliasedInsp objects inside of it, which
# are usually not present in mappings. So here we track both the
# "enhanced" path in self.path and the "natural" path that doesn't
# include those objects so these two traversals can be matched up.
# the test here for "(self.is_aliased_class or parent.is_unnatural)"
# are to avoid the more expensive conditional logic that follows if we
# know we don't have to do it. This conditional can just as well be
# "if parent.path:", it just is more function calls.
if parent.path and (self.is_aliased_class or parent.is_unnatural):
# this is an infrequent code path used only for loader strategies
# that also make use of of_type().
if entity.mapper.isa(parent.natural_path[-1].entity):
self.natural_path = parent.natural_path + (entity.mapper,)
else:
self.natural_path = parent.natural_path + (
parent.natural_path[-1].entity,
)
else:
self.natural_path = self.path
@property
def entity_path(self):
return self
@property
def mapper(self):
return inspection.inspect(self.entity).mapper
def __bool__(self):
return True
__nonzero__ = __bool__
def __getitem__(self, entity):
if isinstance(entity, (int, slice)):
return self.path[entity]
else:
return PropRegistry(self, entity)
class SlotsEntityRegistry(AbstractEntityRegistry):
# for aliased class, return lightweight, no-cycles created
# version
__slots__ = (
"key",
"parent",
"is_aliased_class",
"entity",
"path",
"natural_path",
)
class CachingEntityRegistry(AbstractEntityRegistry, dict):
# for long lived mapper, return dict based caching
# version that creates reference cycles
def __getitem__(self, entity):
if isinstance(entity, (int, slice)):
return self.path[entity]
else:
return dict.__getitem__(self, entity)
def __missing__(self, key):
self[key] = item = PropRegistry(self, key)
return item
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/query.py
|
# orm/query.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`_query.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`_query.Query` class should not be confused with the
:class:`_expression.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import attributes
from . import exc as orm_exc
from . import interfaces
from . import loading
from . import persistence
from . import properties
from .base import _entity_descriptor
from .base import _generative
from .base import _is_aliased_class
from .base import _is_mapped_class
from .base import _orm_columns
from .base import InspectionAttr
from .path_registry import PathRegistry
from .util import _entity_corresponds_to
from .util import aliased
from .util import AliasedClass
from .util import join as orm_join
from .util import object_mapper
from .util import ORMAdapter
from .util import with_parent
from .. import exc as sa_exc
from .. import inspect
from .. import inspection
from .. import log
from .. import sql
from .. import util
from ..sql import expression
from ..sql import util as sql_util
from ..sql import visitors
from ..sql.base import ColumnCollection
from ..sql.expression import _interpret_as_from
from ..sql.selectable import ForUpdateArg
__all__ = ["Query", "QueryContext", "aliased"]
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`_query.Query`
is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`_query.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`_query.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`, and in
less common cases by instantiating the :class:`_query.Query` directly and
associating with a :class:`.Session` using the
:meth:`_query.Query.with_session`
method.
For a full walkthrough of :class:`_query.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_only_return_tuples = False
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_suffixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_refresh_identity_token = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = ()
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_orm_only_adapt = True
_orm_only_from_obj_alias = True
_current_path = _path_registry
_has_mapper_entities = False
_bake_ok = True
lazy_loaded_from = None
"""An :class:`.InstanceState` that is using this :class:`_query.Query`
for a
lazy load operation.
The primary rationale for this attribute is to support the horizontal
sharding extension, where it is available within specific query
execution time hooks created by this extension. To that end, the
attribute is only intended to be meaningful at **query execution time**,
and importantly not any time prior to that, including query compilation
time.
.. note::
Within the realm of regular :class:`_query.Query` usage,
this attribute is
set by the lazy loader strategy before the query is invoked. However
there is no established hook that is available to reliably intercept
this value programmatically. It is set by the lazy loading strategy
after any mapper option objects would have been applied, and now that
the lazy loading strategy in the ORM makes use of "baked" queries to
cache SQL compilation, the :meth:`.QueryEvents.before_compile` hook is
also not reliable.
Currently, setting the :paramref:`_orm.relationship.bake_queries` to
``False`` on the target :func:`_orm.relationship`,
and then making use of
the :meth:`.QueryEvents.before_compile` event hook, is the only
available programmatic path to intercepting this attribute. In future
releases, there will be new hooks available that allow interception of
the :class:`_query.Query` before it is executed,
rather than before it is
compiled.
.. versionadded:: 1.2.9
"""
def __init__(self, entities, session=None):
"""Construct a :class:`_query.Query` directly.
E.g.::
q = Query([User, Address], session=some_session)
The above is equivalent to::
q = some_session.query(User, Address)
:param entities: a sequence of entities and/or SQL expressions.
:param session: a :class:`.Session` with which the
:class:`_query.Query`
will be associated. Optional; a :class:`_query.Query`
can be associated
with a :class:`.Session` generatively via the
:meth:`_query.Query.with_session` method as well.
.. seealso::
:meth:`.Session.query`
:meth:`_query.Query.with_session`
"""
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
self._has_mapper_entities = False
# 1. don't run util.to_list() or _set_entity_selectables
# if no entities were passed - major performance bottleneck
# from lazy loader implementation when it seeks to use Query
# class for an identity lookup, causes test_orm.py to fail
# with thousands of extra function calls, see issue #4228
# for why this use had to be added
# 2. can't use classmethod on Query because session.query_cls
# is an arbitrary callable in some user recipes, not
# necessarily a class, so we don't have the class available.
# see issue #4256
# 3. can't do "if entities is not None" because we usually get here
# from session.query() which takes in *entities.
# 4. can't do "if entities" because users make use of undocumented
# to_list() behavior here and they pass clause expressions that
# can't be evaluated as boolean. See issue #4269.
if entities != ():
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if (
not ext_info.is_aliased_class
and ext_info.mapper.with_polymorphic
):
if (
ext_info.mapper.persist_selectable
not in self._polymorphic_adapters
):
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns,
),
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (ext_info, aliased_adapter)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, "mapper") and (
info.is_mapper or info.is_aliased_class
):
self._select_from_entity = info
if set_base_alias and not info.is_aliased_class:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set."
)
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance."
)
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if (
set_base_alias
and len(self._from_obj) == 1
and isinstance(select_from_alias, expression.Alias)
):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs
)
elif (
set_base_alias
and len(self._from_obj) == 1
and hasattr(info, "mapper")
and info.is_aliased_class
):
self._from_obj_alias = info._adapter
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations["parententity"]
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, "table"):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o), True, True
)
for o in cols
]
@_generative()
def _set_lazyload_from(self, state):
self.lazy_loaded_from = state
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
if not self._orm_only_adapt:
orm_only = False
if as_filter and self._filter_aliases:
for fa in self._filter_aliases:
adapters.append((orm_only, fa.replace))
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
orm_only if self._orm_only_from_obj_alias else False,
self._from_obj_alias.replace,
)
)
if self._polymorphic_adapters:
adapters.append((orm_only, self._adapt_polymorphic_element))
if not adapters:
return clause
def replace(elem):
is_orm_adapt = (
"_orm_adapt" in elem._annotations
or "parententity" in elem._annotations
)
for _orm_only, adapter in adapters:
if not _orm_only or is_orm_adapt:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(clause, {}, replace)
def _query_entity_zero(self):
"""Return the first QueryEntity."""
return self._entities[0]
def _mapper_zero(self):
"""return the Mapper associated with the first QueryEntity."""
return self._entities[0].mapper
def _entity_zero(self):
"""Return the 'entity' (mapper or AliasedClass) associated
with the first QueryEntity, or alternatively the 'select from'
entity if specified."""
return (
self._select_from_entity
if self._select_from_entity is not None
else self._query_entity_zero().entity_zero
)
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get("_joinpoint_entity", self._entity_zero())
def _bind_mapper(self):
ezero = self._entity_zero()
if ezero is not None:
insp = inspect(ezero)
if not insp.is_clause_element:
return insp.mapper
return None
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname
)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale
or "This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False
)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if (
self._criterion is not None
or self._statement is not None
or self._from_obj
or self._limit is not None
or self._offset is not None
or self._group_by
or (order_by and self._order_by)
or (distinct and self._distinct)
):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth
)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth
)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
(
"Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion."
)
% meth
)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied." % (meth, meth)
)
def _get_options(
self,
populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None,
identity_token=None,
):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
if identity_token:
self._refresh_identity_token = identity_token
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).statement
if self._params:
stmt = stmt.params(self._params)
return stmt
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`_query.Query`, embedded within an
:class:`_expression.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`_expression.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`_query.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True,
:meth:`_expression.Select.reduce_columns` will
be called on the resulting :func:`_expression.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
r"""Return the full SELECT statement represented by this
:class:`_query.Query` represented as a common table expression (CTE).
Parameters and usage are the same as those of the
:meth:`_expression.SelectBase.cte` method; see that method for
further details.
Here is the `PostgreSQL WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`_orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\
filter(Part.part=="our part").\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`_expression.HasCTE.cte`
"""
return self.enable_eagerloads(False).statement.cte(
name=name, recursive=recursive
)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`_query.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`_query.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`_expression.Select` object emitted by this
:class:`_query.Query`.
Used for :func:`_sa.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def only_return_tuples(self, value):
"""When set to True, the query results will always be a tuple.
This is specifically for single element queries. The default is False.
.. versionadded:: 1.2.5
.. seealso::
:meth:`_query.Query.is_single_entity`
"""
self._only_return_tuples = value
@property
def is_single_entity(self):
"""Indicates if this :class:`_query.Query`
returns tuples or single entities.
Returns True if this query returns a single entity for each instance
in its result list, and False if this query returns a tuple of entities
for each result.
.. versionadded:: 1.3.11
.. seealso::
:meth:`_query.Query.only_return_tuples`
"""
return (
not self._only_return_tuples
and len(self._entities) == 1
and self._entities[0].supports_single_entity
)
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`_query.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message
)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
.. note:: The :meth:`_query.Query.with_labels` method *only* applies
the output of :attr:`_query.Query.statement`, and *not* to any of
the result-row invoking systems of :class:`_query.Query` itself, e.
g.
:meth:`_query.Query.first`, :meth:`_query.Query.all`, etc.
To execute
a query using :meth:`_query.Query.with_labels`, invoke the
:attr:`_query.Query.statement` using :meth:`.Session.execute`::
result = session.execute(query.with_labels().statement)
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(
self, cls_or_mappers, selectable=None, polymorphic_on=None
):
"""Load columns for inheriting classes.
:meth:`_query.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`_query.Query`.
The "main" mapped class here means the :class:`_query.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query."
)
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(
self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on,
)
@_generative()
def yield_per(self, count):
r"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`_query.Query.yield_per` method **is not compatible
subqueryload eager loading or joinedload eager loading when
using collections**. It is potentially compatible with "select in"
eager loading, **provided the database driver supports multiple,
independent cursors** (pysqlite and psycopg2 are known to work,
MySQL and SQL Server ODBC drivers do not).
Therefore in some cases, it may be helpful to disable
eager loads, either unconditionally with
:meth:`_query.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2`,
:mod:`~sqlalchemy.dialects.mysql.mysqldb` and
:mod:`~sqlalchemy.dialects.mysql.pymysql` dialects
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`_query.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True, "max_row_buffer": count}
)
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
some_object = session.query(VersionedFoo).get(
{"id": 5, "version_id": 10})
:meth:`_query.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`_query.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`_query.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`_query.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`_query.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`_orm.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`_query.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading_relationships`
for further details on relationship loading.
:param ident: A scalar, tuple, or dictionary representing the
primary key. For a composite (e.g. multiple column) primary key,
a tuple or dictionary should be passed.
For a single-column primary key, the scalar calling form is typically
the most expedient. If the primary key of a row is the value "5",
the call looks like::
my_object = query.get(5)
The tuple form contains primary key values typically in
the order in which they correspond to the mapped
:class:`_schema.Table`
object's primary key columns, or if the
:paramref:`_orm.Mapper.primary_key` configuration parameter were used
, in
the order used for that parameter. For example, if the primary key
of a row is represented by the integer
digits "5, 10" the call would look like::
my_object = query.get((5, 10))
The dictionary form should include as keys the mapped attribute names
corresponding to each element of the primary key. If the mapped class
has the attributes ``id``, ``version_id`` as the attributes which
store the object's primary key value, the call would look like::
my_object = query.get({"id": 5, "version_id": 10})
.. versionadded:: 1.3 the :meth:`_query.Query.get`
method now optionally
accepts a dictionary of attribute names to values in order to
indicate a primary key identifier.
:return: The object instance, or ``None``.
"""
return self._get_impl(ident, loading.load_on_pk_identity)
def _identity_lookup(
self,
mapper,
primary_key_identity,
identity_token=None,
passive=attributes.PASSIVE_OFF,
lazy_loaded_from=None,
):
"""Locate an object in the identity map.
Given a primary key identity, constructs an identity key and then
looks in the session's identity map. If present, the object may
be run through unexpiration rules (e.g. load unloaded attributes,
check if was deleted).
For performance reasons, while the :class:`_query.Query` must be
instantiated, it may be instantiated with no entities, and the
mapper is passed::
obj = session.query()._identity_lookup(inspect(SomeClass), (1, ))
:param mapper: mapper in use
:param primary_key_identity: the primary key we are searching for, as
a tuple.
:param identity_token: identity token that should be used to create
the identity key. Used as is, however overriding subclasses can
repurpose this in order to interpret the value in a special way,
such as if None then look among multiple target tokens.
:param passive: passive load flag passed to
:func:`.loading.get_from_identity`, which impacts the behavior if
the object is found; the object may be validated and/or unexpired
if the flag allows for SQL to be emitted.
:param lazy_loaded_from: an :class:`.InstanceState` that is
specifically asking for this identity as a related identity. Used
for sharding schemes where there is a correspondence between an object
and a related object being lazy-loaded (or otherwise
relationship-loaded).
.. versionadded:: 1.2.9
:return: None if the object is not found in the identity map, *or*
if the object was unexpired and found to have been deleted.
if passive flags disallow SQL and the object is expired, returns
PASSIVE_NO_RESULT. In all other cases the instance is returned.
.. versionadded:: 1.2.7
"""
key = mapper.identity_key_from_primary_key(
primary_key_identity, identity_token=identity_token
)
return loading.get_from_identity(self.session, mapper, key, passive)
def _get_impl(self, primary_key_identity, db_load_fn, identity_token=None):
# convert composite types to individual args
if hasattr(primary_key_identity, "__composite_values__"):
primary_key_identity = primary_key_identity.__composite_values__()
mapper = self._only_full_mapper_zero("get")
is_dict = isinstance(primary_key_identity, dict)
if not is_dict:
primary_key_identity = util.to_list(
primary_key_identity, default=(None,)
)
if len(primary_key_identity) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s"
% ",".join("'%s'" % c for c in mapper.primary_key)
)
if is_dict:
try:
primary_key_identity = list(
primary_key_identity[prop.key]
for prop in mapper._identity_key_props
)
except KeyError as err:
util.raise_(
sa_exc.InvalidRequestError(
"Incorrect names of values in identifier to formulate "
"primary key for query.get(); primary key attribute "
"names are %s"
% ",".join(
"'%s'" % prop.key
for prop in mapper._identity_key_props
)
),
replace_context=err,
)
if (
not self._populate_existing
and not mapper.always_refresh
and self._for_update_arg is None
):
instance = self._identity_lookup(
mapper, primary_key_identity, identity_token=identity_token
)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
elif instance is attributes.PASSIVE_CLASS_MISMATCH:
return None
return db_load_fn(self, primary_key_identity)
@_generative()
def correlate(self, *args):
"""Return a :class:`_query.Query`
construct which will correlate the given
FROM clauses to that of an enclosing :class:`_query.Query` or
:func:`_expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`_expression.Select.correlate`
after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`_query.Query.from_self` is used, or when
a subquery as returned by :meth:`_query.Query.subquery` is
embedded in another :func:`_expression.select` construct.
"""
for s in args:
if s is None:
self._correlate = self._correlate.union([None])
else:
self._correlate = self._correlate.union(
sql_util.surface_selectables(_interpret_as_from(s))
)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`_query.Query`
that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`_query.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None, from_entity=None): # noqa
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`_orm.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to
:meth:`_query.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`_query.Query` object's target mapper.
:param instance:
An instance which has some :func:`_orm.relationship`.
:param property:
String property name, or class-bound attribute, which indicates
what relationship from the instance should be used to reconcile the
parent/child relationship.
:param from_entity:
Entity in which to consider as the left side. This defaults to the
"zero" entity of the :class:`_query.Query` itself.
"""
if from_entity:
entity_zero = inspect(from_entity)
else:
entity_zero = self._entity_zero()
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if (
isinstance(prop, properties.RelationshipProperty)
and prop.mapper is entity_zero.mapper
):
property = prop # noqa
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'"
% (
entity_zero.mapper.class_.__name__,
instance.__class__.__name__,
)
)
return self.filter(with_parent(instance, property, entity_zero.entity))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`_query.Query` that will use the given
:class:`.Session`.
While the :class:`_query.Query`
object is normally instantiated using the
:meth:`.Session.query` method, it is legal to build the
:class:`_query.Query`
directly without necessarily using a :class:`.Session`. Such a
:class:`_query.Query` object, or any :class:`_query.Query`
already associated
with a different :class:`.Session`, can produce a new
:class:`_query.Query`
object associated with a target session using this method::
from sqlalchemy.orm import Query
query = Query([MyClass]).filter(MyClass.id == 5)
result = query.with_session(my_session).one()
"""
self.session = session
def from_self(self, *entities):
r"""return a Query that selects from this Query's
SELECT statement.
:meth:`_query.Query.from_self` essentially turns the SELECT statement
into a SELECT of itself. Given a query such as::
q = session.query(User).filter(User.name.like('e%'))
Given the :meth:`_query.Query.from_self` version::
q = session.query(User).filter(User.name.like('e%')).from_self()
This query renders as:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1) AS anon_1
There are lots of cases where :meth:`_query.Query.from_self`
may be useful.
A simple one is where above, we may want to apply a row LIMIT to
the set of user objects we query against, and then apply additional
joins against that row-limited set::
q = session.query(User).filter(User.name.like('e%')).\
limit(5).from_self().\
join(User.addresses).filter(Address.email.like('q%'))
The above query joins to the ``Address`` entity but only against the
first five results of the ``User`` query:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1
**Automatic Aliasing**
Another key behavior of :meth:`_query.Query.from_self`
is that it applies
**automatic aliasing** to the entities inside the subquery, when
they are referenced on the outside. Above, if we continue to
refer to the ``User`` entity without any additional aliasing applied
to it, those references wil be in terms of the subquery::
q = session.query(User).filter(User.name.like('e%')).\
limit(5).from_self().\
join(User.addresses).filter(Address.email.like('q%')).\
order_by(User.name)
The ORDER BY against ``User.name`` is aliased to be in terms of the
inner subquery:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1 ORDER BY anon_1.user_name
The automatic aliasing feature only works in a **limited** way,
for simple filters and orderings. More ambitious constructions
such as referring to the entity in joins should prefer to use
explicit subquery objects, typically making use of the
:meth:`_query.Query.subquery`
method to produce an explicit subquery object.
Always test the structure of queries by viewing the SQL to ensure
a particular structure does what's expected!
**Changing the Entities**
:meth:`_query.Query.from_self`
also includes the ability to modify what
columns are being queried. In our example, we want ``User.id``
to be queried by the inner query, so that we can join to the
``Address`` entity on the outside, but we only wanted the outer
query to return the ``Address.email`` column::
q = session.query(User).filter(User.name.like('e%')).\
limit(5).from_self(Address.email).\
join(User.addresses).filter(Address.email.like('q%'))
yielding:
.. sourcecode:: sql
SELECT address.email AS address_email
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1
**Looking out for Inner / Outer Columns**
Keep in mind that when referring to columns that originate from
inside the subquery, we need to ensure they are present in the
columns clause of the subquery itself; this is an ordinary aspect of
SQL. For example, if we wanted to load from a joined entity inside
the subquery using :func:`.contains_eager`, we need to add those
columns. Below illustrates a join of ``Address`` to ``User``,
then a subquery, and then we'd like :func:`.contains_eager` to access
the ``User`` columns::
q = session.query(Address).join(Address.user).\
filter(User.name.like('e%'))
q = q.add_entity(User).from_self().\
options(contains_eager(Address.user))
We use :meth:`_query.Query.add_entity` above **before** we call
:meth:`_query.Query.from_self`
so that the ``User`` columns are present
in the inner subquery, so that they are available to the
:func:`.contains_eager` modifier we are using on the outside,
producing:
.. sourcecode:: sql
SELECT anon_1.address_id AS anon_1_address_id,
anon_1.address_email AS anon_1_address_email,
anon_1.address_user_id AS anon_1_address_user_id,
anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (
SELECT address.id AS address_id,
address.email AS address_email,
address.user_id AS address_user_id,
"user".id AS user_id,
"user".name AS user_name
FROM address JOIN "user" ON "user".id = address.user_id
WHERE "user".name LIKE :name_1) AS anon_1
If we didn't call ``add_entity(User)``, but still asked
:func:`.contains_eager` to load the ``User`` entity, it would be
forced to add the table on the outside without the correct
join criteria - note the ``anon1, "user"`` phrase at
the end:
.. sourcecode:: sql
-- incorrect query
SELECT anon_1.address_id AS anon_1_address_id,
anon_1.address_email AS anon_1_address_email,
anon_1.address_user_id AS anon_1_address_user_id,
"user".id AS user_id,
"user".name AS user_name
FROM (
SELECT address.id AS address_id,
address.email AS address_email,
address.user_id AS address_user_id
FROM address JOIN "user" ON "user".id = address.user_id
WHERE "user".name LIKE :name_1) AS anon_1, "user"
:param \*entities: optional list of entities which will replace
those being selected.
"""
fromclause = (
self.with_labels()
.enable_eagerloads(False)
.statement.correlate(None)
)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
q._select_from_entity = self._entity_zero()
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
"_statement",
"_criterion",
"_order_by",
"_group_by",
"_limit",
"_offset",
"_joinpath",
"_joinpoint",
"_distinct",
"_having",
"_prefixes",
"_suffixes",
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
r"""Return a new :class:`_query.Query`
replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\
join(User.address).\
filter(User.name.like('%ed%')).\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\
order_by(None).\
filter(User.id==5).\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\
limit(1)
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation(
"0.7",
":meth:`.add_column` is superseded " "by :meth:`.add_columns`",
False,
)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new :class:`_query.Query` object,
applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded.
.. seealso::
:ref:`deferred_options`
:ref:`relationship_loader_options`
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
if "_unbound_load_dedupes" not in self._attributes:
self._attributes["_unbound_load_dedupes"] = set()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`_query.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`_query.Query`
objects. See the example at :ref:`hybrid_transformers`.
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name="*"):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`_query.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`_schema.Table`, :class:`_expression.Alias`,
or ORM entity / mapped class
/etc.
.. seealso::
:meth:`_query.Query.with_statement_hint`
:meth:`.Query.prefix_with` - generic SELECT prefixing which also
can suit some database-specific HINT syntaxes such as MySQL
optimizer hints
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name="*"):
"""add a statement hint to this :class:`_expression.Select`.
This method is similar to :meth:`_expression.Select.with_hint`
except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into
:meth:`_expression.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`_query.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
def get_execution_options(self):
""" Get the non-SQL options which will take effect during execution.
.. versionadded:: 1.3
.. seealso::
:meth:`_query.Query.execution_options`
"""
return self._execution_options
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`_engine.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
.. seealso::
:meth:`_query.Query.get_execution_options`
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
@util.deprecated(
"0.9",
"The :meth:`_query.Query.with_lockmode` "
"method is deprecated and will "
"be removed in a future release. Please refer to "
":meth:`_query.Query.with_for_update`. ",
)
def with_lockmode(self, mode):
"""Return a new :class:`_query.Query`
object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`_query.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(
self,
read=False,
nowait=False,
of=None,
skip_locked=False,
key_share=False,
):
"""return a new :class:`_query.Query`
with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`_expression.SelectBase.with_for_update`.
When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a PostgreSQL backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`_query.Query.with_for_update`
supersedes
the :meth:`_query.Query.with_lockmode` method.
.. seealso::
:meth:`_expression.GenerativeSelect.with_for_update`
- Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(
read=read,
nowait=nowait,
of=of,
skip_locked=skip_locked,
key_share=key_share,
)
@_generative()
def params(self, *args, **kwargs):
r"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary."
)
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
r"""apply the given filtering criterion to a copy
of this :class:`_query.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria may be specified as comma separated; the effect
is that they will be joined together using the :func:`.and_`
function::
session.query(MyClass).\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`_expression.text`
construct.
.. seealso::
:meth:`_query.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
r"""apply the given filtering criterion to a copy
of this :class:`_query.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria may be specified as comma separated; the effect
is that they will be joined together using the :func:`.and_`
function::
session.query(MyClass).\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`_query.Query.join`.
.. seealso::
:meth:`_query.Query.filter` - filter on SQL expressions.
"""
zero = self._joinpoint_zero()
if zero is None:
raise sa_exc.InvalidRequestError(
"Can't use filter_by when the first entity '%s' of a query "
"is not a mapped class. Please use the filter method instead, "
"or change the order of the entities in the query"
% self._query_entity_zero()
)
clauses = [
_entity_descriptor(zero, key) == value
for key, value in kwargs.items()
]
return self.filter(*clauses)
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ordering configured
on the :func:`.mapper` object using the deprecated
:paramref:`.mapper.order_by` parameter.
"""
if len(criterion) == 1:
if criterion[0] is False:
if "_order_by" in self.__dict__:
self._order_by = False
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`_query.Query`
All existing GROUP BY settings can be suppressed by
passing ``None`` - this will suppress any GROUP BY configured
on mappers as well.
.. versionadded:: 1.1 GROUP BY can be cancelled by passing None,
in the same way as ORDER BY.
"""
if len(criterion) == 1:
if criterion[0] is None:
self._group_by = False
return
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
r"""apply a HAVING criterion to the query and return the
newly resulting :class:`_query.Query`.
:meth:`_query.Query.having` is used in conjunction with
:meth:`_query.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\
join(User.addresses).\
group_by(User.id).\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and not isinstance(
criterion, sql.ClauseElement
):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string"
)
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def _set_op(self, expr_fn, *q):
return self._from_selectable(
expr_fn(*([self] + list(q)))
)._set_enable_single_crit(False)
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`_query.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._set_op(expression.union, *q)
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.union_all, *q)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.intersect, *q)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.intersect_all, *q)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.except_, *q)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.except_all, *q)
def join(self, *props, **kwargs):
r"""Create a SQL JOIN against this :class:`_query.Query`
object's criterion
and apply generatively, returning the newly resulting
:class:`_query.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`_query.Query.join`
is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`_query.Query.join` along
``User.addresses`` will result in SQL approximately equivalent to::
SELECT user.id, User.name
FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`_query.Query.join` as the "on clause", that is, it indicates
how the "ON" portion of the JOIN should be constructed.
To construct a chain of joins, multiple :meth:`_query.Query.join`
calls may be used. The relationship-bound attribute implies both
the left and right side of the join at once::
q = session.query(User).\
join(User.orders).\
join(Order.items).\
join(Item.keywords)
.. note:: as seen in the above example, **the order in which each
call to the join() method occurs is important**. Query would not,
for example, know how to join correctly if we were to specify
``User``, then ``Item``, then ``Order``, in our chain of joins; in
such a case, depending on the arguments passed, it may raise an
error that it doesn't know how to join, or it may produce invalid
SQL in which case the database will raise an error. In correct
practice, the
:meth:`_query.Query.join` method is invoked in such a way that lines
up with how we would want the JOIN clauses in SQL to be
rendered, and each call should represent a clear link from what
precedes it.
**Joins to a Target Entity or Selectable**
A second form of :meth:`_query.Query.join` allows any mapped entity or
core selectable construct as a target. In this usage,
:meth:`_query.Query.join` will attempt to create a JOIN along the
natural foreign key relationship between two entities::
q = session.query(User).join(Address)
In the above calling form, :meth:`_query.Query.join` is called upon to
create the "on clause" automatically for us. This calling form will
ultimately raise an error if either there are no foreign keys between
the two entities, or if there are multiple foreign key linkages between
the target entity and the entity or entities already present on the
left side such that creating a join requires more information. Note
that when indicating a join to a target without any ON clause, ORM
configured relationships are not taken into account.
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. A example that includes
a SQL expression as the ON clause is as follows::
q = session.query(User).join(Address, User.id==Address.user_id)
The above form may also use a relationship-bound attribute as the
ON clause as well::
q = session.query(User).join(Address, User.addresses)
The above syntax can be useful for the case where we wish
to join to an alias of a particular target entity. If we wanted
to join to ``Address`` twice, it could be achieved using two
aliases set up using the :func:`~sqlalchemy.orm.aliased` function::
a1 = aliased(Address)
a2 = aliased(Address)
q = session.query(User).\
join(a1, User.addresses).\
join(a2, User.addresses).\
filter(a1.email_address=='ed@foo.com').\
filter(a2.email_address=='ed@bar.com')
The relationship-bound calling form can also specify a target entity
using the :meth:`_orm.PropComparator.of_type` method; a query
equivalent to the one above would be::
a1 = aliased(Address)
a2 = aliased(Address)
q = session.query(User).\
join(User.addresses.of_type(a1)).\
join(User.addresses.of_type(a2)).\
filter(a1.email_address == 'ed@foo.com').\
filter(a2.email_address == 'ed@bar.com')
**Joining to Tables and Subqueries**
The target of a join may also be any table or SELECT statement,
which may be related to a target entity or not. Use the
appropriate ``.subquery()`` method in order to make a subquery
out of a query::
subq = session.query(Address).\
filter(Address.email_address == 'ed@foo.com').\
subquery()
q = session.query(User).join(
subq, User.id == subq.c.user_id
)
Joining to a subquery in terms of a specific relationship and/or
target entity may be achieved by linking the subquery to the
entity using :func:`_orm.aliased`::
subq = session.query(Address).\
filter(Address.email_address == 'ed@foo.com').\
subquery()
address_subq = aliased(Address, subq)
q = session.query(User).join(
User.addresses.of_type(address_subq)
)
**Controlling what to Join From**
In cases where the left side of the current state of
:class:`_query.Query` is not in line with what we want to join from,
the :meth:`_query.Query.select_from` method may be used::
q = session.query(Address).select_from(User).\
join(User.addresses).\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Legacy Features of Query.join()**
The :meth:`_query.Query.join` method currently supports several
usage patterns and arguments that are considered to be legacy
as of SQLAlchemy 1.3. A deprecation path will follow
in the 1.4 series for the following features:
* Joining on relationship names rather than attributes::
session.query(User).join("addresses")
**Why it's legacy**: the string name does not provide enough context
for :meth:`_query.Query.join` to always know what is desired,
notably in that there is no indication of what the left side
of the join should be. This gives rise to flags like
``from_joinpoint`` as well as the ability to place several
join clauses in a single :meth:`_query.Query.join` call
which don't solve the problem fully while also
adding new calling styles that are unnecessary and expensive to
accommodate internally.
**Modern calling pattern**: Use the actual relationship,
e.g. ``User.addresses`` in the above case::
session.query(User).join(User.addresses)
* Automatic aliasing with the ``aliased=True`` flag::
session.query(Node).join(Node.children, aliased=True).\
filter(Node.name == 'some name')
**Why it's legacy**: the automatic aliasing feature of
:class:`_query.Query` is intensely complicated, both in its internal
implementation as well as in its observed behavior, and is almost
never used. It is difficult to know upon inspection where and when
its aliasing of a target entity, ``Node`` in the above case, will be
applied and when it won't, and additionally the feature has to use
very elaborate heuristics to achieve this implicit behavior.
**Modern calling pattern**: Use the :func:`_orm.aliased` construct
explicitly::
from sqlalchemy.orm import aliased
n1 = aliased(Node)
session.query(Node).join(Node.children.of_type(n1)).\
filter(n1.name == 'some name')
* Multiple joins in one call::
session.query(User).join("orders", "items")
session.query(User).join(User.orders, Order.items)
session.query(User).join(
(Order, User.orders),
(Item, Item.order_id == Order.id)
)
# ... and several more forms actually
**Why it's legacy**: being able to chain multiple ON clauses in one
call to :meth:`_query.Query.join` is yet another attempt to solve
the problem of being able to specify what entity to join from,
and is the source of a large variety of potential calling patterns
that are internally expensive and complicated to parse and
accommodate.
**Modern calling pattern**: Use relationship-bound attributes
or SQL-oriented ON clauses within separate calls, so that
each call to :meth:`_query.Query.join` knows what the left
side should be::
session.query(User).join(User.orders).join(
Item, Item.order_id == Order.id)
:param \*props: Incoming arguments for :meth:`_query.Query.join`,
the props collection in modern use should be considered to be a one
or two argument form, either as a single "target" entity or ORM
attribute-bound relationship, or as a target entity plus an "on
clause" which may be a SQL expression or ORM attribute-bound
relationship.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`_query.Query.outerjoin` method were called.
:param full=False: render FULL OUTER JOIN; implies ``isouter``.
.. versionadded:: 1.1
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. note:: This flag is considered legacy.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`_query.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`_query.Query.reset_joinpoint` is called.
.. note:: This flag is considered legacy.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`_query.Query.join` is used for inheritance relationships.
:func:`_orm.join` - a standalone ORM-level join function,
used internally by :meth:`_query.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter, full = (
kwargs.pop("aliased", False),
kwargs.pop("from_joinpoint", False),
kwargs.pop("isouter", False),
kwargs.pop("full", False),
)
if kwargs:
raise TypeError(
"unknown arguments: %s" % ", ".join(sorted(kwargs))
)
return self._join(
props,
outerjoin=isouter,
full=full,
create_aliases=aliased,
from_joinpoint=from_joinpoint,
)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint, full = (
kwargs.pop("aliased", False),
kwargs.pop("from_joinpoint", False),
kwargs.pop("full", False),
)
if kwargs:
raise TypeError(
"unknown arguments: %s" % ", ".join(sorted(kwargs))
)
return self._join(
props,
outerjoin=True,
full=full,
create_aliases=aliased,
from_joinpoint=from_joinpoint,
)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while "prev" in jp:
f, prev = jp["prev"]
prev = prev.copy()
prev[f] = jp.copy()
jp["prev"] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, full, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if (
len(keys) == 2
and isinstance(
keys[0], (expression.FromClause, type, AliasedClass)
)
and isinstance(
keys[1],
(str, expression.ClauseElement, interfaces.PropComparator),
)
):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
# Query.join() accepts a list of join paths all at once.
# step one is to iterate through these paths and determine the
# intent of each path individually. as we encounter a path token,
# we add a new ORMJoin construct to the self._from_obj tuple,
# either by adding a new element to it, or by replacing an existing
# element with a new ORMJoin.
keylist = util.to_list(keys)
for idx, arg1 in enumerate(keylist):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)
):
right, onclause = arg2, arg1
else:
right, onclause = arg1, arg2
if onclause is None:
r_info = inspect(right)
if not r_info.is_selectable and not hasattr(r_info, "mapper"):
raise sa_exc.ArgumentError(
"Expected mapped entity or "
"selectable/table as join target"
)
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, "_of_type", None)
else:
of_type = None
if isinstance(onclause, util.string_types):
# string given, e.g. query(Foo).join("bar").
# we look to the left entity or what we last joined
# towards
onclause = _entity_descriptor(self._joinpoint_zero(), onclause)
# check for q.join(Class.propname, from_joinpoint=True)
# and Class corresponds at the mapper level to the current
# joinpoint. this match intentionally looks for a non-aliased
# class-bound descriptor as the onclause and if it matches the
# current joinpoint at the mapper level, it's used. This
# is a very old use case that is intended to make it easier
# to work with the aliased=True flag, which is also something
# that probably shouldn't exist on join() due to its high
# complexity/usefulness ratio
elif from_joinpoint and isinstance(
onclause, interfaces.PropComparator
):
jp0 = self._joinpoint_zero()
info = inspect(jp0)
if getattr(info, "mapper", None) is onclause._parententity:
onclause = _entity_descriptor(jp0, onclause.key)
if isinstance(onclause, interfaces.PropComparator):
# descriptor/property given (or determined); this tells
# us explicitly what the expected "left" side of the join is.
if right is None:
if of_type:
right = of_type
else:
right = onclause.property.entity
left = onclause._parententity
alias = self._polymorphic_adapters.get(left, None)
# could be None or could be ColumnAdapter also
if isinstance(alias, ORMAdapter) and alias.mapper.isa(left):
left = alias.aliased_class
onclause = getattr(left, onclause.key)
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left, right, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp["prev"] = (edge, self._joinpoint)
self._update_joinpoint(jp)
# warn only on the last element of the list
if idx == len(keylist) - 1:
util.warn(
"Pathed join target %s has already "
"been joined to; skipping" % prop
)
continue
else:
# no descriptor/property given; we will need to figure out
# what the effective "left" side is
prop = left = None
# figure out the final "left" and "right" sides and create an
# ORMJoin to add to our _from_obj tuple
self._join_left_to_right(
left, right, onclause, prop, create_aliases, outerjoin, full
)
def _join_left_to_right(
self, left, right, onclause, prop, create_aliases, outerjoin, full
):
"""given raw "left", "right", "onclause" parameters consumed from
a particular key within _join(), add a real ORMJoin object to
our _from_obj list (or augment an existing one)
"""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
# left not given (e.g. no relationship object/name specified)
# figure out the best "left" side based on our existing froms /
# entities
assert prop is None
(
left,
replace_from_obj_index,
use_entity_index,
) = self._join_determine_implicit_left_side(left, right, onclause)
else:
# left is given via a relationship/name. Determine where in our
# "froms" list it should be spliced/appended as well as what
# existing entity it corresponds to.
assert prop is not None
(
replace_from_obj_index,
use_entity_index,
) = self._join_place_explicit_left_side(left)
if left is right and not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" % (left, right)
)
# the right side as given often needs to be adapted. additionally
# a lot of things can be wrong with it. handle all that and
# get back the new effective "right" side
r_info, right, onclause = self._join_check_and_adapt_right_side(
left, right, onclause, prop, create_aliases
)
if replace_from_obj_index is not None:
# splice into an existing element in the
# self._from_obj list
left_clause = self._from_obj[replace_from_obj_index]
self._from_obj = (
self._from_obj[:replace_from_obj_index]
+ (
orm_join(
left_clause,
right,
onclause,
isouter=outerjoin,
full=full,
),
)
+ self._from_obj[replace_from_obj_index + 1 :]
)
else:
# add a new element to the self._from_obj list
if use_entity_index is not None:
# why doesn't this work as .entity_zero_or_selectable?
left_clause = self._entities[use_entity_index].selectable
else:
left_clause = left
self._from_obj = self._from_obj + (
orm_join(
left_clause, right, onclause, isouter=outerjoin, full=full
),
)
def _join_determine_implicit_left_side(self, left, right, onclause):
"""When join conditions don't express the left side explicitly,
determine if an existing FROM or entity in this query
can serve as the left hand side.
"""
# when we are here, it means join() was called without an ORM-
# specific way of telling us what the "left" side is, e.g.:
#
# join(RightEntity)
#
# or
#
# join(RightEntity, RightEntity.foo == LeftEntity.bar)
#
r_info = inspect(right)
replace_from_obj_index = use_entity_index = None
if self._from_obj:
# we have a list of FROMs already. So by definition this
# join has to connect to one of those FROMs.
indexes = sql_util.find_left_clause_to_join_from(
self._from_obj, r_info.selectable, onclause
)
if len(indexes) == 1:
replace_from_obj_index = indexes[0]
left = self._from_obj[replace_from_obj_index]
elif len(indexes) > 1:
raise sa_exc.InvalidRequestError(
"Can't determine which FROM clause to join "
"from, there are multiple FROMS which can "
"join to this entity. Please use the .select_from() "
"method to establish an explicit left side, as well as "
"providing an explcit ON clause if not present already to "
"help resolve the ambiguity."
)
else:
raise sa_exc.InvalidRequestError(
"Don't know how to join to %r. "
"Please use the .select_from() "
"method to establish an explicit left side, as well as "
"providing an explcit ON clause if not present already to "
"help resolve the ambiguity." % (right,)
)
elif self._entities:
# we have no explicit FROMs, so the implicit left has to
# come from our list of entities.
potential = {}
for entity_index, ent in enumerate(self._entities):
entity = ent.entity_zero_or_selectable
if entity is None:
continue
ent_info = inspect(entity)
if ent_info is r_info: # left and right are the same, skip
continue
# by using a dictionary with the selectables as keys this
# de-duplicates those selectables as occurs when the query is
# against a series of columns from the same selectable
if isinstance(ent, _MapperEntity):
potential[ent.selectable] = (entity_index, entity)
else:
potential[ent_info.selectable] = (None, entity)
all_clauses = list(potential.keys())
indexes = sql_util.find_left_clause_to_join_from(
all_clauses, r_info.selectable, onclause
)
if len(indexes) == 1:
use_entity_index, left = potential[all_clauses[indexes[0]]]
elif len(indexes) > 1:
raise sa_exc.InvalidRequestError(
"Can't determine which FROM clause to join "
"from, there are multiple FROMS which can "
"join to this entity. Please use the .select_from() "
"method to establish an explicit left side, as well as "
"providing an explcit ON clause if not present already to "
"help resolve the ambiguity."
)
else:
raise sa_exc.InvalidRequestError(
"Don't know how to join to %r. "
"Please use the .select_from() "
"method to establish an explicit left side, as well as "
"providing an explcit ON clause if not present already to "
"help resolve the ambiguity." % (right,)
)
else:
raise sa_exc.InvalidRequestError(
"No entities to join from; please use "
"select_from() to establish the left "
"entity/selectable of this join"
)
return left, replace_from_obj_index, use_entity_index
def _join_place_explicit_left_side(self, left):
"""When join conditions express a left side explicitly, determine
where in our existing list of FROM clauses we should join towards,
or if we need to make a new join, and if so is it from one of our
existing entities.
"""
# when we are here, it means join() was called with an indicator
# as to an exact left side, which means a path to a
# RelationshipProperty was given, e.g.:
#
# join(RightEntity, LeftEntity.right)
#
# or
#
# join(LeftEntity.right)
#
# as well as string forms:
#
# join(RightEntity, "right")
#
# etc.
#
replace_from_obj_index = use_entity_index = None
l_info = inspect(left)
if self._from_obj:
indexes = sql_util.find_left_clause_that_matches_given(
self._from_obj, l_info.selectable
)
if len(indexes) > 1:
raise sa_exc.InvalidRequestError(
"Can't identify which entity in which to assign the "
"left side of this join. Please use a more specific "
"ON clause."
)
# have an index, means the left side is already present in
# an existing FROM in the self._from_obj tuple
if indexes:
replace_from_obj_index = indexes[0]
# no index, means we need to add a new element to the
# self._from_obj tuple
# no from element present, so we will have to add to the
# self._from_obj tuple. Determine if this left side matches up
# with existing mapper entities, in which case we want to apply the
# aliasing / adaptation rules present on that entity if any
if (
replace_from_obj_index is None
and self._entities
and hasattr(l_info, "mapper")
):
for idx, ent in enumerate(self._entities):
# TODO: should we be checking for multiple mapper entities
# matching?
if isinstance(ent, _MapperEntity) and ent.corresponds_to(left):
use_entity_index = idx
break
return replace_from_obj_index, use_entity_index
def _join_check_and_adapt_right_side(
self, left, right, onclause, prop, create_aliases
):
"""transform the "right" side of the join as well as the onclause
according to polymorphic mapping translations, aliasing on the query
or on the join, special cases where the right and left side have
overlapping tables.
"""
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic
or isinstance(right_mapper.persist_selectable, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj
) and sql_util.selectables_overlap(
from_obj, r_info.selectable
):
overlap = True
break
if (
overlap or not create_aliases
) and l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself"
% l_info.selectable
)
right_mapper, right_selectable, right_is_aliased = (
getattr(r_info, "mapper", None),
r_info.selectable,
getattr(r_info, "is_aliased_class", False),
)
if (
right_mapper
and prop
and not right_mapper.common_parent(prop.mapper)
):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
# _join_entities is used as a hint for single-table inheritance
# purposes at the moment
if hasattr(r_info, "mapper"):
self._join_entities += (r_info,)
need_adapter = False
# test for joining to an unmapped selectable as the target
if r_info.is_clause_element:
if prop:
right_mapper = prop.mapper
if right_selectable._is_lateral:
# orm_only is disabled to suit the case where we have to
# adapt an explicit correlate(Entity) - the select() loses
# the ORM-ness in this case right now, ideally it would not
right = self._adapt_clause(right, True, False)
elif prop:
# joining to selectable with a mapper property given
# as the ON clause
if not right_selectable.is_derived_from(
right_mapper.persist_selectable
):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'"
% (
right_selectable.description,
right_mapper.persist_selectable.description,
)
)
# if the destination selectable is a plain select(),
# turn it into an alias().
if isinstance(right_selectable, expression.SelectBase):
right_selectable = right_selectable.alias()
need_adapter = True
# make the right hand side target into an ORM entity
right = aliased(right_mapper, right_selectable)
elif create_aliases:
# it *could* work, but it doesn't right now and I'd rather
# get rid of aliased=True completely
raise sa_exc.InvalidRequestError(
"The aliased=True parameter on query.join() only works "
"with an ORM entity, not a plain selectable, as the "
"target."
)
aliased_entity = (
right_mapper
and not right_is_aliased
and (
right_mapper.with_polymorphic
and isinstance(
right_mapper._with_polymorphic_selectable, expression.Alias
)
or overlap
# test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
if need_adapter:
assert right_mapper
# if an alias() of the right side was generated,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
adapter = ORMAdapter(
right, equivalents=right_mapper._equivalent_columns
)
# current adapter takes highest precedence
self._filter_aliases = (adapter,) + self._filter_aliases
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if not create_aliases:
self._mapper_loads_polymorphically_with(right_mapper, adapter)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint(
{
"_joinpoint_entity": right,
"prev": ((left, right, prop.key), self._joinpoint),
}
)
else:
self._joinpoint = {"_joinpoint_entity": right}
return right, inspect(right), onclause
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = ()
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`_query.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`_query.Query.join`
method. See the example in :meth:`_query.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
r"""Set the FROM clause of this :class:`_query.Query` explicitly.
:meth:`_query.Query.select_from` is often used in conjunction with
:meth:`_query.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`_query.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`_query.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\
join(User.addresses).\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`_orm.Mapper` objects
as well as core :class:`_expression.FromClause`
elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`_query.Query.join`
:meth:`_query.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
r"""Set the FROM clause of this :class:`_query.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
The :meth:`_query.Query.select_entity_from`
method supplies an alternative
approach to the use case of applying an :func:`.aliased` construct
explicitly throughout a query. Instead of referring to the
:func:`.aliased` construct explicitly,
:meth:`_query.Query.select_entity_from` automatically *adapts* all
occurrences of the entity to the target selectable.
Given a case for :func:`.aliased` such as selecting ``User``
objects from a SELECT statement::
select_stmt = select([User]).where(User.id == 7)
user_alias = aliased(User, select_stmt)
q = session.query(user_alias).\
filter(user_alias.name == 'ed')
Above, we apply the ``user_alias`` object explicitly throughout the
query. When it's not feasible for ``user_alias`` to be referenced
explicitly in many places, :meth:`_query.Query.select_entity_from`
may be
used at the start of the query to adapt the existing ``User`` entity::
q = session.query(User).\
select_entity_from(select_stmt).\
filter(User.name == 'ed')
Above, the generated SQL will show that the ``User`` entity is
adapted to our statement, even in the case of the WHERE clause:
.. sourcecode:: sql
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
The :meth:`_query.Query.select_entity_from` method is similar to the
:meth:`_query.Query.select_from` method,
in that it sets the FROM clause
of the query. The difference is that it additionally applies
adaptation to the other parts of the query that refer to the
primary entity. If above we had used :meth:`_query.Query.select_from`
instead, the SQL generated would have been:
.. sourcecode:: sql
-- uses plain select_from(), not select_entity_from()
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
To supply textual SQL to the :meth:`_query.Query.select_entity_from`
method,
we can make use of the :func:`_expression.text` construct. However,
the
:func:`_expression.text`
construct needs to be aligned with the columns of our
entity, which is achieved by making use of the
:meth:`_expression.TextClause.columns` method::
text_stmt = text("select id, name from user").columns(
User.id, User.name)
q = session.query(User).select_entity_from(text_stmt)
:meth:`_query.Query.select_entity_from` itself accepts an
:func:`.aliased`
object, so that the special options of :func:`.aliased` such as
:paramref:`.aliased.adapt_on_names` may be used within the
scope of the :meth:`_query.Query.select_entity_from`
method's adaptation
services. Suppose
a view ``user_view`` also returns rows from ``user``. If
we reflect this view into a :class:`_schema.Table`, this view has no
relationship to the :class:`_schema.Table` to which we are mapped,
however
we can use name matching to select from it::
user_view = Table('user_view', metadata,
autoload_with=engine)
user_view_alias = aliased(
User, user_view, adapt_on_names=True)
q = session.query(User).\
select_entity_from(user_view_alias).\
order_by(User.name)
.. versionchanged:: 1.1.7 The :meth:`_query.Query.select_entity_from`
method now accepts an :func:`.aliased` object as an alternative
to a :class:`_expression.FromClause` object.
:param from_obj: a :class:`_expression.FromClause`
object that will replace
the FROM clause of this :class:`_query.Query`.
It also may be an instance
of :func:`.aliased`.
.. seealso::
:meth:`_query.Query.select_from`
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if (
isinstance(stop, int)
and isinstance(start, int)
and stop - start <= 0
):
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) or (
isinstance(stop, int) and stop < 0
):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None : None : item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item : item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""Computes the "slice" of the :class:`_query.Query` represented by
the given indices and returns the resulting :class:`_query.Query`.
The start and stop indices behave like the argument to Python's
built-in :func:`range` function. This method provides an
alternative to using ``LIMIT``/``OFFSET`` to get a slice of the
query.
For example, ::
session.query(User).order_by(User.id).slice(1, 3)
renders as
.. sourcecode:: sql
SELECT users.id AS users_id,
users.name AS users_name
FROM users ORDER BY users.id
LIMIT ? OFFSET ?
(2, 1)
.. seealso::
:meth:`_query.Query.limit`
:meth:`_query.Query.offset`
"""
if start is not None and stop is not None:
self._offset = self._offset if self._offset is not None else 0
if start != 0:
self._offset += start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = self._offset if self._offset is not None else 0
if start != 0:
self._offset += start
if isinstance(self._offset, int) and self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *expr):
r"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
.. note::
The :meth:`.distinct` call includes logic that will automatically
add columns from the ORDER BY of the query to the columns
clause of the SELECT statement, to satisfy the common need
of the database backend that ORDER BY columns be part of the
SELECT list when DISTINCT is used. These columns *are not*
added to the list of columns actually fetched by the
:class:`_query.Query`, however, so would not affect results.
The columns are passed through when using the
:attr:`_query.Query.statement` accessor, however.
:param \*expr: optional column expressions. When present,
the PostgreSQL dialect will render a ``DISTINCT ON (<expressions>)``
construct.
"""
if not expr:
self._distinct = True
else:
expr = self._adapt_col_list(expr)
if isinstance(self._distinct, list):
self._distinct += expr
else:
self._distinct = expr
@_generative()
def prefix_with(self, *prefixes):
r"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords
and optimizer hints:
e.g.::
query = sess.query(User.name).\
prefix_with('HIGH_PRIORITY').\
prefix_with('SQL_SMALL_RESULT', 'ALL').\
prefix_with('/*+ BKA(user) */')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL /*+ BKA(user) */
users.name AS users_name FROM users
.. seealso::
:meth:`_expression.HasPrefixes.prefix_with`
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
@_generative()
def suffix_with(self, *suffixes):
r"""Apply the suffix to the query and return the newly resulting
``Query``.
:param \*suffixes: optional suffixes, typically strings,
not using any commas.
.. versionadded:: 1.0.0
.. seealso::
:meth:`_query.Query.prefix_with`
:meth:`_expression.HasSuffixes.suffix_with`
"""
if self._suffixes:
self._suffixes += suffixes
else:
self._suffixes = suffixes
def all(self):
"""Return the results represented by this :class:`_query.Query`
as a list.
This results in an execution of the underlying SQL statement.
.. warning:: The :class:`_query.Query` object,
when asked to return either
a sequence or iterator that consists of full ORM-mapped entities,
will **deduplicate entries based on primary key**. See the FAQ for
more details.
.. seealso::
:ref:`faq_query_deduplicating`
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`_expression.text`
or :func:`_expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this
:class:`_query.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(
statement, (expression.TextClause, expression.SelectBase)
):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only."
)
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling :meth:`_query.Query.first`
results in an execution of the underlying
query.
.. seealso::
:meth:`_query.Query.one`
:meth:`_query.Query.one_or_none`
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one_or_none(self):
"""Return at most one result or raise an exception.
Returns ``None`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that returns only scalar values
as opposed to full identity-mapped entities.
Calling :meth:`_query.Query.one_or_none`
results in an execution of the
underlying query.
.. versionadded:: 1.0.9
Added :meth:`_query.Query.one_or_none`
.. seealso::
:meth:`_query.Query.first`
:meth:`_query.Query.one`
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
return None
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one_or_none()"
)
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that returns only scalar values
as opposed to full identity-mapped entities.
Calling :meth:`.one` results in an execution of the underlying query.
.. seealso::
:meth:`_query.Query.first`
:meth:`_query.Query.one_or_none`
"""
try:
ret = self.one_or_none()
except orm_exc.MultipleResultsFound as err:
util.raise_(
orm_exc.MultipleResultsFound(
"Multiple rows were found for one()"
),
replace_context=err,
)
else:
if ret is None:
raise orm_exc.NoResultFound("No row was found for one()")
return ret
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def __str__(self):
context = self._compile_context()
try:
bind = (
self._get_bind_args(context, self.session.get_bind)
if self.session
else None
)
except sa_exc.UnboundExecutionError:
bind = None
return str(context.statement.compile(bind))
def _connection_from_session(self, **kw):
conn = self.session.connection(**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._get_bind_args(
querycontext, self._connection_from_session, close_with_result=True
)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(querycontext.query, result, querycontext)
def _execute_crud(self, stmt, mapper):
conn = self._connection_from_session(
mapper=mapper, clause=stmt, close_with_result=True
)
return conn.execute(stmt, self._params)
def _get_bind_args(self, querycontext, fn, **kw):
return fn(
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`_query.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
'entity': User
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
'entity': User
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias,
'entity': user_alias
}
]
"""
return [
{
"name": ent._label_name,
"type": ent.type,
"aliased": getattr(insp_ent, "is_aliased_class", False),
"expr": ent.expr,
"entity": getattr(insp_ent, "entity", None)
if ent.entity_zero is not None
and not insp_ent.is_clause_element
else None,
}
for ent, insp_ent in [
(
_ent,
(
inspect(_ent.entity_zero)
if _ent.entity_zero is not None
else None
),
)
for _ent in self._entities
]
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`_query.Query` object's Session.
Given an iterator returned by a :class:`_query.Query`
of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`_query.Query` - if these do not correspond,
unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`_query.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`_query.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
"limit": self._limit,
"offset": self._offset,
"distinct": self._distinct,
"prefixes": self._prefixes,
"suffixes": self._suffixes,
"group_by": self._group_by or None,
"having": self._having,
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (
kwargs.get("limit") is not None
or kwargs.get("offset") is not None
or kwargs.get("distinct", False)
or kwargs.get("group_by", False)
)
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(
self.enable_eagerloads(False)
.add_columns(sql.literal_column("1"))
.with_labels()
.statement.with_only_columns([1])
)
def count(self):
r"""Return a count of rows this the SQL formed by this :class:`Query`
would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
The above SQL returns a single row, which is the aggregate value
of the count function; the :meth:`_query.Query.count`
method then returns
that single integer value.
.. warning::
It is important to note that the value returned by
count() is **not the same as the number of ORM objects that this
Query would return from a method such as the .all() method**.
The :class:`_query.Query` object,
when asked to return full entities,
will **deduplicate entries based on primary key**, meaning if the
same primary key value would appear in the results more than once,
only one object of that primary key would be present. This does
not apply to a query that is against individual columns.
.. seealso::
:ref:`faq_query_deduplicating`
:ref:`orm_tutorial_query_returning`
For fine grained control over specific columns to count, to skip the
usage of a subquery or otherwise control of the FROM clause, or to use
other aggregate functions, use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column("*"))
return self.from_self(col).scalar()
def delete(self, synchronize_session="evaluate"):
r"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
E.g.::
sess.query(User).filter(User.age == 25).\
delete(synchronize_session=False)
sess.query(User).filter(User.age == 25).\
delete(synchronize_session='evaluate')
.. warning:: The :meth:`_query.Query.delete`
method is a "bulk" operation,
which bypasses ORM unit-of-work automation in favor of greater
performance. **Please read all caveats and warnings below.**
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
.. warning:: **Additional Caveats for bulk query deletes**
* This method does **not work for joined
inheritance mappings**, since the **multiple table
deletes are not supported by SQL** as well as that the
**join condition of an inheritance mapper is not
automatically rendered**. Care must be taken in any
multiple-table delete to first accommodate via some other means
how the related table will be deleted, as well as to
explicitly include the joining
condition between those tables, even in mappings where
this is normally automatic. E.g. if a class ``Engineer``
subclasses ``Employee``, a DELETE against the ``Employee``
table would look like::
session.query(Engineer).\
filter(Engineer.id == Employee.id).\
filter(Employee.name == 'dilbert').\
delete()
However the above SQL will not delete from the Engineer table,
unless an ON DELETE CASCADE rule is established in the database
to handle it.
Short story, **do not use this method for joined inheritance
mappings unless you have taken the additional steps to make
this feasible**.
* The polymorphic identity WHERE criteria is **not** included
for single- or
joined- table updates - this must be added **manually** even
for single table inheritance.
* The method does **not** offer in-Python cascading of
relationships - it is assumed that ON DELETE CASCADE/SET
NULL/etc. is configured for any foreign key references
which require it, otherwise the database may emit an
integrity violation if foreign key references are being
enforced.
After the DELETE, dependent objects in the
:class:`.Session` which were impacted by an ON DELETE
may not contain the current state, or may have been
deleted. This issue is resolved once the
:class:`.Session` is expired, which normally occurs upon
:meth:`.Session.commit` or can be forced by using
:meth:`.Session.expire_all`. Accessing an expired
object whose row has been deleted will invoke a SELECT
to locate the row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is
raised.
* The ``'fetch'`` strategy results in an additional
SELECT statement emitted and will significantly reduce
performance.
* The ``'evaluate'`` strategy performs a scan of
all matching objects within the :class:`.Session`; if the
contents of the :class:`.Session` are expired, such as
via a proceeding :meth:`.Session.commit` call, **this will
result in SELECT queries emitted for every matching object**.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events **are not invoked** from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to
act upon a mass DELETE of entity rows.
.. seealso::
:meth:`_query.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
delete_op = persistence.BulkDelete.factory(self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session="evaluate", update_args=None):
r"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session=False)
sess.query(User).filter(User.age == 25).\
update({"age": User.age - 10}, synchronize_session='evaluate')
.. warning:: The :meth:`_query.Query.update`
method is a "bulk" operation,
which bypasses ORM unit-of-work automation in favor of greater
performance. **Please read all caveats and warnings below.**
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values. If :ref:`parameter-ordered
mode <updates_order_parameters>` is desired, the values can be
passed as a list of 2-tuples;
this requires that the
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
flag is passed to the :paramref:`.Query.update.update_args` dictionary
as well.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:param update_args: Optional dictionary, if present will be passed
to the underlying :func:`_expression.update`
construct as the ``**kw`` for
the object. May be used to pass dialect-specific arguments such
as ``mysql_limit``, as well as other special arguments such as
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`.
.. versionadded:: 1.0.0
:return: the count of rows matched as returned by the database's
"row count" feature.
.. warning:: **Additional Caveats for bulk query updates**
* The method does **not** offer in-Python cascading of
relationships - it is assumed that ON UPDATE CASCADE is
configured for any foreign key references which require
it, otherwise the database may emit an integrity
violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the
:class:`.Session` which were impacted by an ON UPDATE
CASCADE may not contain the current state; this issue is
resolved once the :class:`.Session` is expired, which
normally occurs upon :meth:`.Session.commit` or can be
forced by using :meth:`.Session.expire_all`.
* The ``'fetch'`` strategy results in an additional
SELECT statement emitted and will significantly reduce
performance.
* The ``'evaluate'`` strategy performs a scan of
all matching objects within the :class:`.Session`; if the
contents of the :class:`.Session` are expired, such as
via a proceeding :meth:`.Session.commit` call, **this will
result in SELECT queries emitted for every matching object**.
* The method supports multiple table updates, as detailed
in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and
other multiple table mappings. However, the **join
condition of an inheritance mapper is not
automatically rendered**. Care must be taken in any
multiple-table update to explicitly include the joining
condition between those tables, even in mappings where
this is normally automatic. E.g. if a class ``Engineer``
subclasses ``Employee``, an UPDATE of the ``Engineer``
local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\
filter(Engineer.id == Employee.id).\
filter(Employee.name == 'dilbert').\
update({"engineer_type": "programmer"})
* The polymorphic identity WHERE criteria is **not** included
for single- or
joined- table updates - this must be added **manually**, even
for single table inheritance.
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events **are not invoked from this method**. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to
act upon a mass UPDATE of entity rows.
.. seealso::
:meth:`_query.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_args = update_args or {}
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values, update_args
)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
if self.dispatch.before_compile:
for fn in self.dispatch.before_compile:
new_query = fn(self)
if new_query is not None and new_query is not self:
self = new_query
if not fn._bake_ok:
self._bake_ok = False
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(context, *rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
# else "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items."
)
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to " "SELECT from."
)
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = sql_util.expand_column_list_from_order_by(
context.primary_columns, context.order_by
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
# put FOR UPDATE on the inner query, where MySQL will honor it,
# as well as if it has an OF so PostgreSQL can use it.
inner._for_update_arg = context._for_update_arg
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns, use_labels=context.labels
)
# Oracle however does not allow FOR UPDATE on the subquery,
# and the Oracle dialect ignores it, plus for PostgreSQL, MySQL
# we expect that all elements of the row are locked, so also put it
# on the outside (except in the case of PG when OF is used)
if (
context._for_update_arg is not None
and context._for_update_arg.of is None
):
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause, eager_join, eager_join.stop_on
)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(context.order_by)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct is True and context.order_by:
context.primary_columns += (
sql_util.expand_column_list_from_order_by
)(context.primary_columns, context.order_by)
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns + context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, as well as the "select from entity",
add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
search = set(self._mapper_adapter_map.values())
if (
self._select_from_entity
and self._select_from_entity not in self._mapper_adapter_map
):
insp = inspect(self._select_from_entity)
if insp.is_aliased_class:
adapter = insp._adapter
else:
adapter = None
search = search.union([(self._select_from_entity, adapter)])
for (ext_info, adapter) in search:
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause), single_crit
)
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode
)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and _is_mapped_class(
entity
):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
query._has_mapper_entities = True
self.entities = [entity]
self.expr = entity
supports_single_entity = True
use_id_for_hash = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(
self, query, cls_or_mappers, selectable, polymorphic_on
):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against " "an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable
)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper,
sql_util.ColumnAdapter(from_obj, self.mapper._equivalent_columns),
)
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
return _entity_corresponds_to(self.entity_zero, entity)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable, self.mapper._equivalent_columns
)
if query._primary_entity is self:
only_load_props = query._only_load_props
refresh_state = context.refresh_state
else:
only_load_props = refresh_state = None
_instance = loading._instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=only_load_props,
refresh_state=refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator,
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(context.order_by)
)
loading._setup_entity_query(
context,
self.mapper,
self,
self.path,
adapter,
context.primary_columns,
with_polymorphic=self._with_polymorphic,
only_load_props=query._only_load_props,
polymorphic_discriminator=self._polymorphic_discriminator,
)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(InspectionAttr):
"""A grouping of SQL expressions that are returned by a
:class:`_query.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`_query.Query` object.
It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
is_clause_element = False
is_mapper = False
is_aliased_class = False
def __init__(self, name, *exprs, **kw):
r"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update(
(getattr(col, "key", col._label), col) for col in exprs
)
self.single_entity = kw.pop("single_entity", self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.exprs)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple("result", labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
use_id_for_hash = False
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr)
self.supports_single_entity = self.bundle.single_entity
@property
def mapper(self):
ezero = self.entity_zero
if ezero is not None:
return ezero.mapper
else:
return None
@property
def entities(self):
entities = []
for ent in self._entities:
entities.extend(ent.entities)
return entities
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: we might be able to implement this but for now
# we are working around it
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[
ent.row_processor(query, context, result)
for ent in self._entities
]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
search_entities = True
check_column = False
if isinstance(column, util.string_types):
util.warn_deprecated(
"Plain string expression passed to Query() should be "
"explicitly declared using literal_column(); "
"automatic coercion of this value will be removed in "
"SQLAlchemy 1.4"
)
column = sql.literal_column(column)
self._label_name = column.name
search_entities = False
check_column = True
_entity = None
elif isinstance(
column, (attributes.QueryableAttribute, interfaces.PropComparator)
):
_entity = getattr(column, "_parententity", None)
if _entity is not None:
search_entities = False
self._label_name = column.key
column = column._query_clause_element()
check_column = True
if isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
if hasattr(column, "_select_iterable"):
# break out an object like Table into
# individual columns
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column,)
)
elif not check_column:
self._label_name = getattr(column, "key", None)
search_entities = True
self.type = type_ = column.type
self.use_id_for_hash = not type_.hashable
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, "is_literal", False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = list(column._from_objects)
actual_froms = set(self.actual_froms)
if not search_entities:
self.entity_zero = _entity
if _entity:
self.entities = [_entity]
self.mapper = _entity.mapper
else:
self.entities = []
self.mapper = None
self._from_entities = set(self.entities)
else:
all_elements = [
elem
for elem in sql_util.surface_column_elements(
column, include_scalar_selects=False
)
if "parententity" in elem._annotations
]
self.entities = util.unique_list(
[
elem._annotations["parententity"]
for elem in all_elements
if "parententity" in elem._annotations
]
)
self._from_entities = set(
[
elem._annotations["parententity"]
for elem in all_elements
if "parententity" in elem._annotations
and actual_froms.intersection(elem._from_objects)
]
)
if self.entities:
self.entity_zero = self.entities[0]
self.mapper = self.entity_zero.mapper
elif self.namespace is not None:
self.entity_zero = self.namespace
self.mapper = None
else:
self.entity_zero = None
self.mapper = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return self.actual_froms[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if "selectable" not in self.__dict__:
self.selectable = ext_info.selectable
if set(self.actual_froms).intersection(
ext_info.selectable._from_objects
):
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(
self.entity_zero
) and entity.common_parent(self.entity_zero)
def row_processor(self, query, context, result):
if ("fetch_column", self) in context.attributes:
column = context.attributes[("fetch_column", self)]
else:
column = query._adapt_clause(self.column, False, True)
if column._annotations:
# annotated columns perform more slowly in compiler and
# result due to the __eq__() method, so use deannotated
column = column._deannotate()
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = query._adapt_clause(self.column, False, True)
if column._annotations:
# annotated columns perform more slowly in compiler and
# result due to the __eq__() method, so use deannotated
column = column._deannotate()
context.froms += tuple(self.froms)
context.primary_columns.append(column)
context.attributes[("fetch_column", self)] = column
def __str__(self):
return str(self.column)
class QueryContext(object):
__slots__ = (
"multi_row_eager_loaders",
"adapter",
"froms",
"for_update",
"query",
"session",
"autoflush",
"populate_existing",
"invoke_all_eagers",
"version_check",
"refresh_state",
"primary_columns",
"secondary_columns",
"eager_order_by",
"eager_joins",
"create_eager_joins",
"propagate_options",
"attributes",
"statement",
"from_clause",
"whereclause",
"order_by",
"labels",
"_for_update_arg",
"runid",
"partials",
"post_load_paths",
"identity_token",
)
def __init__(self, query):
if query._statement is not None:
if (
isinstance(query._statement, expression.SelectBase)
and not query._statement._textual
and not query._statement.use_labels
):
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.multi_row_eager_loaders = False
self.adapter = None
self.froms = ()
self.for_update = None
self.query = query
self.session = query.session
self.autoflush = query._autoflush
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(
o for o in query._with_options if o.propagate_to_loaders
)
self.attributes = query._attributes.copy()
if self.refresh_state is not None:
self.identity_token = query._refresh_identity_token
else:
self.identity_token = None
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
r"""Return a :class:`.MapperOption` that will indicate to the
:class:`_query.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\
union(users.select(users.c.user_id>7)).\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`_expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().persist_selectable.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/attributes.py
|
# orm/attributes.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines instrumentation for class attributes and their interaction
with instances.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
"""
import operator
from . import collections
from . import exc as orm_exc
from . import interfaces
from .base import ATTR_EMPTY
from .base import ATTR_WAS_SET
from .base import CALLABLES_OK
from .base import INIT_OK
from .base import instance_dict
from .base import instance_state
from .base import instance_str
from .base import LOAD_AGAINST_COMMITTED
from .base import manager_of_class
from .base import NEVER_SET
from .base import NO_AUTOFLUSH
from .base import NO_CHANGE # noqa
from .base import NO_RAISE
from .base import NO_VALUE
from .base import NON_PERSISTENT_OK # noqa
from .base import PASSIVE_CLASS_MISMATCH # noqa
from .base import PASSIVE_NO_FETCH
from .base import PASSIVE_NO_FETCH_RELATED # noqa
from .base import PASSIVE_NO_INITIALIZE
from .base import PASSIVE_NO_RESULT
from .base import PASSIVE_OFF
from .base import PASSIVE_ONLY_PERSISTENT
from .base import PASSIVE_RETURN_NEVER_SET
from .base import RELATED_OBJECT_OK # noqa
from .base import SQL_OK # noqa
from .base import state_str
from .. import event
from .. import inspection
from .. import util
@inspection._self_inspects
class QueryableAttribute(
interfaces._MappedAttribute,
interfaces.InspectionAttr,
interfaces.PropComparator,
):
"""Base class for :term:`descriptor` objects that intercept
attribute events on behalf of a :class:`.MapperProperty`
object. The actual :class:`.MapperProperty` is accessible
via the :attr:`.QueryableAttribute.property`
attribute.
.. seealso::
:class:`.InstrumentedAttribute`
:class:`.MapperProperty`
:attr:`_orm.Mapper.all_orm_descriptors`
:attr:`_orm.Mapper.attrs`
"""
is_attribute = True
def __init__(
self,
class_,
key,
impl=None,
comparator=None,
parententity=None,
of_type=None,
):
self.class_ = class_
self.key = key
self.impl = impl
self.comparator = comparator
self._parententity = parententity
self._of_type = of_type
manager = manager_of_class(class_)
# manager is None in the case of AliasedClass
if manager:
# propagate existing event listeners from
# immediate superclass
for base in manager._bases:
if key in base:
self.dispatch._update(base[key].dispatch)
if base[key].dispatch._active_history:
self.dispatch._active_history = True
@util.memoized_property
def _supports_population(self):
return self.impl.supports_population
@property
def _impl_uses_objects(self):
return self.impl.uses_objects
def get_history(self, instance, passive=PASSIVE_OFF):
return self.impl.get_history(
instance_state(instance), instance_dict(instance), passive
)
def __selectable__(self):
# TODO: conditionally attach this method based on clause_element ?
return self
@util.memoized_property
def info(self):
"""Return the 'info' dictionary for the underlying SQL element.
The behavior here is as follows:
* If the attribute is a column-mapped property, i.e.
:class:`.ColumnProperty`, which is mapped directly
to a schema-level :class:`_schema.Column` object, this attribute
will return the :attr:`.SchemaItem.info` dictionary associated
with the core-level :class:`_schema.Column` object.
* If the attribute is a :class:`.ColumnProperty` but is mapped to
any other kind of SQL expression other than a
:class:`_schema.Column`,
the attribute will refer to the :attr:`.MapperProperty.info`
dictionary associated directly with the :class:`.ColumnProperty`,
assuming the SQL expression itself does not have its own ``.info``
attribute (which should be the case, unless a user-defined SQL
construct has defined one).
* If the attribute refers to any other kind of
:class:`.MapperProperty`, including :class:`.RelationshipProperty`,
the attribute will refer to the :attr:`.MapperProperty.info`
dictionary associated with that :class:`.MapperProperty`.
* To access the :attr:`.MapperProperty.info` dictionary of the
:class:`.MapperProperty` unconditionally, including for a
:class:`.ColumnProperty` that's associated directly with a
:class:`_schema.Column`, the attribute can be referred to using
:attr:`.QueryableAttribute.property` attribute, as
``MyClass.someattribute.property.info``.
.. seealso::
:attr:`.SchemaItem.info`
:attr:`.MapperProperty.info`
"""
return self.comparator.info
@util.memoized_property
def parent(self):
"""Return an inspection instance representing the parent.
This will be either an instance of :class:`_orm.Mapper`
or :class:`.AliasedInsp`, depending upon the nature
of the parent entity which this attribute is associated
with.
"""
return inspection.inspect(self._parententity)
@property
def expression(self):
return self.comparator.__clause_element__()
def __clause_element__(self):
return self.comparator.__clause_element__()
def _query_clause_element(self):
"""like __clause_element__(), but called specifically
by :class:`_query.Query` to allow special behavior."""
return self.comparator._query_clause_element()
def _bulk_update_tuples(self, value):
"""Return setter tuples for a bulk UPDATE."""
return self.comparator._bulk_update_tuples(value)
def adapt_to_entity(self, adapt_to_entity):
assert not self._of_type
return self.__class__(
adapt_to_entity.entity,
self.key,
impl=self.impl,
comparator=self.comparator.adapt_to_entity(adapt_to_entity),
parententity=adapt_to_entity,
)
def of_type(self, cls):
return QueryableAttribute(
self.class_,
self.key,
self.impl,
self.comparator.of_type(cls),
self._parententity,
of_type=cls,
)
def label(self, name):
return self._query_clause_element().label(name)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def hasparent(self, state, optimistic=False):
return self.impl.hasparent(state, optimistic=optimistic) is not False
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError as err:
util.raise_(
AttributeError(
"Neither %r object nor %r object associated with %s "
"has an attribute %r"
% (
type(self).__name__,
type(self.comparator).__name__,
self,
key,
)
),
replace_context=err,
)
def __str__(self):
return "%s.%s" % (self.class_.__name__, self.key)
@util.memoized_property
def property(self):
"""Return the :class:`.MapperProperty` associated with this
:class:`.QueryableAttribute`.
Return values here will commonly be instances of
:class:`.ColumnProperty` or :class:`.RelationshipProperty`.
"""
return self.comparator.property
class InstrumentedAttribute(QueryableAttribute):
"""Class bound instrumented attribute which adds basic
:term:`descriptor` methods.
See :class:`.QueryableAttribute` for a description of most features.
"""
def __set__(self, instance, value):
self.impl.set(
instance_state(instance), instance_dict(instance), value, None
)
def __delete__(self, instance):
self.impl.delete(instance_state(instance), instance_dict(instance))
def __get__(self, instance, owner):
if instance is None:
return self
dict_ = instance_dict(instance)
if self._supports_population and self.key in dict_:
return dict_[self.key]
else:
return self.impl.get(instance_state(instance), dict_)
def create_proxied_attribute(descriptor):
"""Create an QueryableAttribute / user descriptor hybrid.
Returns a new QueryableAttribute type that delegates descriptor
behavior and getattr() to the given descriptor.
"""
# TODO: can move this to descriptor_props if the need for this
# function is removed from ext/hybrid.py
class Proxy(QueryableAttribute):
"""Presents the :class:`.QueryableAttribute` interface as a
proxy on top of a Python descriptor / :class:`.PropComparator`
combination.
"""
def __init__(
self,
class_,
key,
descriptor,
comparator,
adapt_to_entity=None,
doc=None,
original_property=None,
):
self.class_ = class_
self.key = key
self.descriptor = descriptor
self.original_property = original_property
self._comparator = comparator
self._adapt_to_entity = adapt_to_entity
self.__doc__ = doc
_is_internal_proxy = True
@property
def _impl_uses_objects(self):
return (
self.original_property is not None
and getattr(self.class_, self.key).impl.uses_objects
)
@property
def property(self):
return self.comparator.property
@util.memoized_property
def comparator(self):
if util.callable(self._comparator):
self._comparator = self._comparator()
if self._adapt_to_entity:
self._comparator = self._comparator.adapt_to_entity(
self._adapt_to_entity
)
return self._comparator
def adapt_to_entity(self, adapt_to_entity):
return self.__class__(
adapt_to_entity.entity,
self.key,
self.descriptor,
self._comparator,
adapt_to_entity,
)
def __get__(self, instance, owner):
retval = self.descriptor.__get__(instance, owner)
# detect if this is a plain Python @property, which just returns
# itself for class level access. If so, then return us.
# Otherwise, return the object returned by the descriptor.
if retval is self.descriptor and instance is None:
return self
else:
return retval
def __str__(self):
return "%s.%s" % (self.class_.__name__, self.key)
def __getattr__(self, attribute):
"""Delegate __getattr__ to the original descriptor and/or
comparator."""
try:
return getattr(descriptor, attribute)
except AttributeError as err:
if attribute == "comparator":
util.raise_(
AttributeError("comparator"), replace_context=err
)
try:
# comparator itself might be unreachable
comparator = self.comparator
except AttributeError as err2:
util.raise_(
AttributeError(
"Neither %r object nor unconfigured comparator "
"object associated with %s has an attribute %r"
% (type(descriptor).__name__, self, attribute)
),
replace_context=err2,
)
else:
try:
return getattr(comparator, attribute)
except AttributeError as err3:
util.raise_(
AttributeError(
"Neither %r object nor %r object "
"associated with %s has an attribute %r"
% (
type(descriptor).__name__,
type(comparator).__name__,
self,
attribute,
)
),
replace_context=err3,
)
Proxy.__name__ = type(descriptor).__name__ + "Proxy"
util.monkeypatch_proxied_specials(
Proxy, type(descriptor), name="descriptor", from_instance=descriptor
)
return Proxy
OP_REMOVE = util.symbol("REMOVE")
OP_APPEND = util.symbol("APPEND")
OP_REPLACE = util.symbol("REPLACE")
OP_BULK_REPLACE = util.symbol("BULK_REPLACE")
OP_MODIFIED = util.symbol("MODIFIED")
class Event(object):
"""A token propagated throughout the course of a chain of attribute
events.
Serves as an indicator of the source of the event and also provides
a means of controlling propagation across a chain of attribute
operations.
The :class:`.Event` object is sent as the ``initiator`` argument
when dealing with events such as :meth:`.AttributeEvents.append`,
:meth:`.AttributeEvents.set`,
and :meth:`.AttributeEvents.remove`.
The :class:`.Event` object is currently interpreted by the backref
event handlers, and is used to control the propagation of operations
across two mutually-dependent attributes.
.. versionadded:: 0.9.0
:attribute impl: The :class:`.AttributeImpl` which is the current event
initiator.
:attribute op: The symbol :attr:`.OP_APPEND`, :attr:`.OP_REMOVE`,
:attr:`.OP_REPLACE`, or :attr:`.OP_BULK_REPLACE`, indicating the
source operation.
"""
__slots__ = "impl", "op", "parent_token"
def __init__(self, attribute_impl, op):
self.impl = attribute_impl
self.op = op
self.parent_token = self.impl.parent_token
def __eq__(self, other):
return (
isinstance(other, Event)
and other.impl is self.impl
and other.op == self.op
)
@property
def key(self):
return self.impl.key
def hasparent(self, state):
return self.impl.hasparent(state)
class AttributeImpl(object):
"""internal implementation for instrumented attributes."""
def __init__(
self,
class_,
key,
callable_,
dispatch,
trackparent=False,
extension=None,
compare_function=None,
active_history=False,
parent_token=None,
expire_missing=True,
send_modified_events=True,
accepts_scalar_loader=None,
**kwargs
):
r"""Construct an AttributeImpl.
:param \class_: associated class
:param key: string name of the attribute
:param \callable_:
optional function which generates a callable based on a parent
instance, which produces the "default" values for a scalar or
collection attribute when it's first accessed, if not present
already.
:param trackparent:
if True, attempt to track if an instance has a parent attached
to it via this attribute.
:param extension:
a single or list of AttributeExtension object(s) which will
receive set/delete/append/remove/etc. events.
The event package is now used.
.. deprecated:: 1.3
The :paramref:`.AttributeImpl.extension` parameter is deprecated
and will be removed in a future release, corresponding to the
"extension" parameter on the :class:`.MapperProprty` classes
like :func:`.column_property` and :func:`_orm.relationship` The
events system is now used.
:param compare_function:
a function that compares two values which are normally
assignable to this attribute.
:param active_history:
indicates that get_history() should always return the "old" value,
even if it means executing a lazy callable upon attribute change.
:param parent_token:
Usually references the MapperProperty, used as a key for
the hasparent() function to identify an "owning" attribute.
Allows multiple AttributeImpls to all match a single
owner attribute.
:param expire_missing:
if False, don't add an "expiry" callable to this attribute
during state.expire_attributes(None), if no value is present
for this key.
:param send_modified_events:
if False, the InstanceState._modified_event method will have no
effect; this means the attribute will never show up as changed in a
history entry.
"""
self.class_ = class_
self.key = key
self.callable_ = callable_
self.dispatch = dispatch
self.trackparent = trackparent
self.parent_token = parent_token or self
self.send_modified_events = send_modified_events
if compare_function is None:
self.is_equal = operator.eq
else:
self.is_equal = compare_function
if accepts_scalar_loader is not None:
self.accepts_scalar_loader = accepts_scalar_loader
else:
self.accepts_scalar_loader = self.default_accepts_scalar_loader
# TODO: pass in the manager here
# instead of doing a lookup
attr = manager_of_class(class_)[key]
for ext in util.to_list(extension or []):
ext._adapt_listener(attr, ext)
if active_history:
self.dispatch._active_history = True
self.expire_missing = expire_missing
self._modified_token = Event(self, OP_MODIFIED)
__slots__ = (
"class_",
"key",
"callable_",
"dispatch",
"trackparent",
"parent_token",
"send_modified_events",
"is_equal",
"expire_missing",
"_modified_token",
"accepts_scalar_loader",
)
def __str__(self):
return "%s.%s" % (self.class_.__name__, self.key)
def _get_active_history(self):
"""Backwards compat for impl.active_history"""
return self.dispatch._active_history
def _set_active_history(self, value):
self.dispatch._active_history = value
active_history = property(_get_active_history, _set_active_history)
def hasparent(self, state, optimistic=False):
"""Return the boolean value of a `hasparent` flag attached to
the given state.
The `optimistic` flag determines what the default return value
should be if no `hasparent` flag can be located.
As this function is used to determine if an instance is an
*orphan*, instances that were loaded from storage should be
assumed to not be orphans, until a True/False value for this
flag is set.
An instance attribute that is loaded by a callable function
will also not have a `hasparent` flag.
"""
msg = "This AttributeImpl is not configured to track parents."
assert self.trackparent, msg
return (
state.parents.get(id(self.parent_token), optimistic) is not False
)
def sethasparent(self, state, parent_state, value):
"""Set a boolean flag on the given item corresponding to
whether or not it is attached to a parent object via the
attribute represented by this ``InstrumentedAttribute``.
"""
msg = "This AttributeImpl is not configured to track parents."
assert self.trackparent, msg
id_ = id(self.parent_token)
if value:
state.parents[id_] = parent_state
else:
if id_ in state.parents:
last_parent = state.parents[id_]
if (
last_parent is not False
and last_parent.key != parent_state.key
):
if last_parent.obj() is None:
raise orm_exc.StaleDataError(
"Removing state %s from parent "
"state %s along attribute '%s', "
"but the parent record "
"has gone stale, can't be sure this "
"is the most recent parent."
% (
state_str(state),
state_str(parent_state),
self.key,
)
)
return
state.parents[id_] = False
def get_history(self, state, dict_, passive=PASSIVE_OFF):
raise NotImplementedError()
def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE):
"""Return a list of tuples of (state, obj)
for all objects in this attribute's current state
+ history.
Only applies to object-based attributes.
This is an inlining of existing functionality
which roughly corresponds to:
get_state_history(
state,
key,
passive=PASSIVE_NO_INITIALIZE).sum()
"""
raise NotImplementedError()
def initialize(self, state, dict_):
"""Initialize the given state's attribute with an empty value."""
value = None
for fn in self.dispatch.init_scalar:
ret = fn(state, value, dict_)
if ret is not ATTR_EMPTY:
value = ret
return value
def get(self, state, dict_, passive=PASSIVE_OFF):
"""Retrieve a value from the given object.
If a callable is assembled on this object's attribute, and
passive is False, the callable will be executed and the
resulting value will be set as the new value for this attribute.
"""
if self.key in dict_:
return dict_[self.key]
else:
# if history present, don't load
key = self.key
if (
key not in state.committed_state
or state.committed_state[key] is NEVER_SET
):
if not passive & CALLABLES_OK:
return PASSIVE_NO_RESULT
if key in state.expired_attributes:
value = state._load_expired(state, passive)
elif key in state.callables:
callable_ = state.callables[key]
value = callable_(state, passive)
elif self.callable_:
value = self.callable_(state, passive)
else:
value = ATTR_EMPTY
if value is PASSIVE_NO_RESULT or value is NEVER_SET:
return value
elif value is ATTR_WAS_SET:
try:
return dict_[key]
except KeyError as err:
# TODO: no test coverage here.
util.raise_(
KeyError(
"Deferred loader for attribute "
"%r failed to populate "
"correctly" % key
),
replace_context=err,
)
elif value is not ATTR_EMPTY:
return self.set_committed_value(state, dict_, value)
if not passive & INIT_OK:
return NEVER_SET
else:
# Return a new, empty value
return self.initialize(state, dict_)
def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(state, dict_, value, initiator, passive=passive)
def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(
state, dict_, None, initiator, passive=passive, check_old=value
)
def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(
state,
dict_,
None,
initiator,
passive=passive,
check_old=value,
pop=True,
)
def set(
self,
state,
dict_,
value,
initiator,
passive=PASSIVE_OFF,
check_old=None,
pop=False,
):
raise NotImplementedError()
def get_committed_value(self, state, dict_, passive=PASSIVE_OFF):
"""return the unchanged value of this attribute"""
if self.key in state.committed_state:
value = state.committed_state[self.key]
if value in (NO_VALUE, NEVER_SET):
return None
else:
return value
else:
return self.get(state, dict_, passive=passive)
def set_committed_value(self, state, dict_, value):
"""set an attribute value on the given instance and 'commit' it."""
dict_[self.key] = value
state._commit(dict_, [self.key])
return value
class ScalarAttributeImpl(AttributeImpl):
"""represents a scalar value-holding InstrumentedAttribute."""
default_accepts_scalar_loader = True
uses_objects = False
supports_population = True
collection = False
dynamic = False
__slots__ = "_replace_token", "_append_token", "_remove_token"
def __init__(self, *arg, **kw):
super(ScalarAttributeImpl, self).__init__(*arg, **kw)
self._replace_token = self._append_token = Event(self, OP_REPLACE)
self._remove_token = Event(self, OP_REMOVE)
def delete(self, state, dict_):
if self.dispatch._active_history:
old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET)
else:
old = dict_.get(self.key, NO_VALUE)
if self.dispatch.remove:
self.fire_remove_event(state, dict_, old, self._remove_token)
state._modified_event(dict_, self, old)
existing = dict_.pop(self.key, NO_VALUE)
if (
existing is NO_VALUE
and old is NO_VALUE
and not state.expired
and self.key not in state.expired_attributes
):
raise AttributeError("%s object does not have a value" % self)
def get_history(self, state, dict_, passive=PASSIVE_OFF):
if self.key in dict_:
return History.from_scalar_attribute(self, state, dict_[self.key])
else:
if passive & INIT_OK:
passive ^= INIT_OK
current = self.get(state, dict_, passive=passive)
if current is PASSIVE_NO_RESULT:
return HISTORY_BLANK
else:
return History.from_scalar_attribute(self, state, current)
def set(
self,
state,
dict_,
value,
initiator,
passive=PASSIVE_OFF,
check_old=None,
pop=False,
):
if self.dispatch._active_history:
old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET)
else:
old = dict_.get(self.key, NO_VALUE)
if self.dispatch.set:
value = self.fire_replace_event(
state, dict_, value, old, initiator
)
state._modified_event(dict_, self, old)
dict_[self.key] = value
def fire_replace_event(self, state, dict_, value, previous, initiator):
for fn in self.dispatch.set:
value = fn(
state, value, previous, initiator or self._replace_token
)
return value
def fire_remove_event(self, state, dict_, value, initiator):
for fn in self.dispatch.remove:
fn(state, value, initiator or self._remove_token)
@property
def type(self):
self.property.columns[0].type
class ScalarObjectAttributeImpl(ScalarAttributeImpl):
"""represents a scalar-holding InstrumentedAttribute,
where the target object is also instrumented.
Adds events to delete/set operations.
"""
default_accepts_scalar_loader = False
uses_objects = True
supports_population = True
collection = False
__slots__ = ()
def delete(self, state, dict_):
if self.dispatch._active_history:
old = self.get(
state,
dict_,
passive=PASSIVE_ONLY_PERSISTENT
| NO_AUTOFLUSH
| LOAD_AGAINST_COMMITTED,
)
else:
old = self.get(
state,
dict_,
passive=PASSIVE_NO_FETCH ^ INIT_OK
| LOAD_AGAINST_COMMITTED
| NO_RAISE,
)
self.fire_remove_event(state, dict_, old, self._remove_token)
existing = dict_.pop(self.key, NO_VALUE)
# if the attribute is expired, we currently have no way to tell
# that an object-attribute was expired vs. not loaded. So
# for this test, we look to see if the object has a DB identity.
if (
existing is NO_VALUE
and old is not PASSIVE_NO_RESULT
and state.key is None
):
raise AttributeError("%s object does not have a value" % self)
def get_history(self, state, dict_, passive=PASSIVE_OFF):
if self.key in dict_:
return History.from_object_attribute(self, state, dict_[self.key])
else:
if passive & INIT_OK:
passive ^= INIT_OK
current = self.get(state, dict_, passive=passive)
if current is PASSIVE_NO_RESULT:
return HISTORY_BLANK
else:
return History.from_object_attribute(self, state, current)
def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE):
if self.key in dict_:
current = dict_[self.key]
elif passive & CALLABLES_OK:
current = self.get(state, dict_, passive=passive)
else:
return []
# can't use __hash__(), can't use __eq__() here
if (
current is not None
and current is not PASSIVE_NO_RESULT
and current is not NEVER_SET
):
ret = [(instance_state(current), current)]
else:
ret = [(None, None)]
if self.key in state.committed_state:
original = state.committed_state[self.key]
if (
original is not None
and original is not PASSIVE_NO_RESULT
and original is not NEVER_SET
and original is not current
):
ret.append((instance_state(original), original))
return ret
def set(
self,
state,
dict_,
value,
initiator,
passive=PASSIVE_OFF,
check_old=None,
pop=False,
):
"""Set a value on the given InstanceState.
"""
if self.dispatch._active_history:
old = self.get(
state,
dict_,
passive=PASSIVE_ONLY_PERSISTENT
| NO_AUTOFLUSH
| LOAD_AGAINST_COMMITTED,
)
else:
old = self.get(
state,
dict_,
passive=PASSIVE_NO_FETCH ^ INIT_OK
| LOAD_AGAINST_COMMITTED
| NO_RAISE,
)
if (
check_old is not None
and old is not PASSIVE_NO_RESULT
and check_old is not old
):
if pop:
return
else:
raise ValueError(
"Object %s not associated with %s on attribute '%s'"
% (instance_str(check_old), state_str(state), self.key)
)
value = self.fire_replace_event(state, dict_, value, old, initiator)
dict_[self.key] = value
def fire_remove_event(self, state, dict_, value, initiator):
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), state, False)
for fn in self.dispatch.remove:
fn(state, value, initiator or self._remove_token)
state._modified_event(dict_, self, value)
def fire_replace_event(self, state, dict_, value, previous, initiator):
if self.trackparent:
if previous is not value and previous not in (
None,
PASSIVE_NO_RESULT,
NEVER_SET,
):
self.sethasparent(instance_state(previous), state, False)
for fn in self.dispatch.set:
value = fn(
state, value, previous, initiator or self._replace_token
)
state._modified_event(dict_, self, previous)
if self.trackparent:
if value is not None:
self.sethasparent(instance_state(value), state, True)
return value
class CollectionAttributeImpl(AttributeImpl):
"""A collection-holding attribute that instruments changes in membership.
Only handles collections of instrumented objects.
InstrumentedCollectionAttribute holds an arbitrary, user-specified
container object (defaulting to a list) and brokers access to the
CollectionAdapter, a "view" onto that object that presents consistent bag
semantics to the orm layer independent of the user data implementation.
"""
default_accepts_scalar_loader = False
uses_objects = True
supports_population = True
collection = True
dynamic = False
__slots__ = (
"copy",
"collection_factory",
"_append_token",
"_remove_token",
"_bulk_replace_token",
"_duck_typed_as",
)
def __init__(
self,
class_,
key,
callable_,
dispatch,
typecallable=None,
trackparent=False,
extension=None,
copy_function=None,
compare_function=None,
**kwargs
):
super(CollectionAttributeImpl, self).__init__(
class_,
key,
callable_,
dispatch,
trackparent=trackparent,
extension=extension,
compare_function=compare_function,
**kwargs
)
if copy_function is None:
copy_function = self.__copy
self.copy = copy_function
self.collection_factory = typecallable
self._append_token = Event(self, OP_APPEND)
self._remove_token = Event(self, OP_REMOVE)
self._bulk_replace_token = Event(self, OP_BULK_REPLACE)
self._duck_typed_as = util.duck_type_collection(
self.collection_factory()
)
if getattr(self.collection_factory, "_sa_linker", None):
@event.listens_for(self, "init_collection")
def link(target, collection, collection_adapter):
collection._sa_linker(collection_adapter)
@event.listens_for(self, "dispose_collection")
def unlink(target, collection, collection_adapter):
collection._sa_linker(None)
def __copy(self, item):
return [y for y in collections.collection_adapter(item)]
def get_history(self, state, dict_, passive=PASSIVE_OFF):
current = self.get(state, dict_, passive=passive)
if current is PASSIVE_NO_RESULT:
return HISTORY_BLANK
else:
return History.from_collection(self, state, current)
def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE):
# NOTE: passive is ignored here at the moment
if self.key not in dict_:
return []
current = dict_[self.key]
current = getattr(current, "_sa_adapter")
if self.key in state.committed_state:
original = state.committed_state[self.key]
if original not in (NO_VALUE, NEVER_SET):
current_states = [
((c is not None) and instance_state(c) or None, c)
for c in current
]
original_states = [
((c is not None) and instance_state(c) or None, c)
for c in original
]
current_set = dict(current_states)
original_set = dict(original_states)
return (
[
(s, o)
for s, o in current_states
if s not in original_set
]
+ [(s, o) for s, o in current_states if s in original_set]
+ [
(s, o)
for s, o in original_states
if s not in current_set
]
)
return [(instance_state(o), o) for o in current]
def fire_append_event(self, state, dict_, value, initiator):
for fn in self.dispatch.append:
value = fn(state, value, initiator or self._append_token)
state._modified_event(dict_, self, NEVER_SET, True)
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), state, True)
return value
def fire_pre_remove_event(self, state, dict_, initiator):
"""A special event used for pop() operations.
The "remove" event needs to have the item to be removed passed to
it, which in the case of pop from a set, we don't have a way to access
the item before the operation. the event is used for all pop()
operations (even though set.pop is the one where it is really needed).
"""
state._modified_event(dict_, self, NEVER_SET, True)
def fire_remove_event(self, state, dict_, value, initiator):
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), state, False)
for fn in self.dispatch.remove:
fn(state, value, initiator or self._remove_token)
state._modified_event(dict_, self, NEVER_SET, True)
def delete(self, state, dict_):
if self.key not in dict_:
return
state._modified_event(dict_, self, NEVER_SET, True)
collection = self.get_collection(state, state.dict)
collection.clear_with_event()
# key is always present because we checked above. e.g.
# del is a no-op if collection not present.
del dict_[self.key]
def initialize(self, state, dict_):
"""Initialize this attribute with an empty collection."""
_, user_data = self._initialize_collection(state)
dict_[self.key] = user_data
return user_data
def _initialize_collection(self, state):
adapter, collection = state.manager.initialize_collection(
self.key, state, self.collection_factory
)
self.dispatch.init_collection(state, collection, adapter)
return adapter, collection
def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
collection = self.get_collection(state, dict_, passive=passive)
if collection is PASSIVE_NO_RESULT:
value = self.fire_append_event(state, dict_, value, initiator)
assert (
self.key not in dict_
), "Collection was loaded during event handling."
state._get_pending_mutation(self.key).append(value)
else:
collection.append_with_event(value, initiator)
def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
collection = self.get_collection(state, state.dict, passive=passive)
if collection is PASSIVE_NO_RESULT:
self.fire_remove_event(state, dict_, value, initiator)
assert (
self.key not in dict_
), "Collection was loaded during event handling."
state._get_pending_mutation(self.key).remove(value)
else:
collection.remove_with_event(value, initiator)
def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
try:
# TODO: better solution here would be to add
# a "popper" role to collections.py to complement
# "remover".
self.remove(state, dict_, value, initiator, passive=passive)
except (ValueError, KeyError, IndexError):
pass
def set(
self,
state,
dict_,
value,
initiator=None,
passive=PASSIVE_OFF,
pop=False,
_adapt=True,
):
iterable = orig_iterable = value
# pulling a new collection first so that an adaptation exception does
# not trigger a lazy load of the old collection.
new_collection, user_data = self._initialize_collection(state)
if _adapt:
if new_collection._converter is not None:
iterable = new_collection._converter(iterable)
else:
setting_type = util.duck_type_collection(iterable)
receiving_type = self._duck_typed_as
if setting_type is not receiving_type:
given = (
iterable is None
and "None"
or iterable.__class__.__name__
)
wanted = self._duck_typed_as.__name__
raise TypeError(
"Incompatible collection type: %s is not %s-like"
% (given, wanted)
)
# If the object is an adapted collection, return the (iterable)
# adapter.
if hasattr(iterable, "_sa_iterator"):
iterable = iterable._sa_iterator()
elif setting_type is dict:
if util.py3k:
iterable = iterable.values()
else:
iterable = getattr(
iterable, "itervalues", iterable.values
)()
else:
iterable = iter(iterable)
new_values = list(iterable)
evt = self._bulk_replace_token
self.dispatch.bulk_replace(state, new_values, evt)
old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT)
if old is PASSIVE_NO_RESULT:
old = self.initialize(state, dict_)
elif old is orig_iterable:
# ignore re-assignment of the current collection, as happens
# implicitly with in-place operators (foo.collection |= other)
return
# place a copy of "old" in state.committed_state
state._modified_event(dict_, self, old, True)
old_collection = old._sa_adapter
dict_[self.key] = user_data
collections.bulk_replace(
new_values, old_collection, new_collection, initiator=evt
)
del old._sa_adapter
self.dispatch.dispose_collection(state, old, old_collection)
def _invalidate_collection(self, collection):
adapter = getattr(collection, "_sa_adapter")
adapter.invalidated = True
def set_committed_value(self, state, dict_, value):
"""Set an attribute value on the given instance and 'commit' it."""
collection, user_data = self._initialize_collection(state)
if value:
collection.append_multiple_without_event(value)
state.dict[self.key] = user_data
state._commit(dict_, [self.key])
if self.key in state._pending_mutations:
# pending items exist. issue a modified event,
# add/remove new items.
state._modified_event(dict_, self, user_data, True)
pending = state._pending_mutations.pop(self.key)
added = pending.added_items
removed = pending.deleted_items
for item in added:
collection.append_without_event(item)
for item in removed:
collection.remove_without_event(item)
return user_data
def get_collection(
self, state, dict_, user_data=None, passive=PASSIVE_OFF
):
"""Retrieve the CollectionAdapter associated with the given state.
Creates a new CollectionAdapter if one does not exist.
"""
if user_data is None:
user_data = self.get(state, dict_, passive=passive)
if user_data is PASSIVE_NO_RESULT:
return user_data
return getattr(user_data, "_sa_adapter")
def backref_listeners(attribute, key, uselist):
"""Apply listeners to synchronize a two-way relationship."""
# use easily recognizable names for stack traces.
# in the sections marked "tokens to test for a recursive loop",
# this is somewhat brittle and very performance-sensitive logic
# that is specific to how we might arrive at each event. a marker
# that can target us directly to arguments being invoked against
# the impl might be simpler, but could interfere with other systems.
parent_token = attribute.impl.parent_token
parent_impl = attribute.impl
def _acceptable_key_err(child_state, initiator, child_impl):
raise ValueError(
"Bidirectional attribute conflict detected: "
'Passing object %s to attribute "%s" '
'triggers a modify event on attribute "%s" '
'via the backref "%s".'
% (
state_str(child_state),
initiator.parent_token,
child_impl.parent_token,
attribute.impl.parent_token,
)
)
def emit_backref_from_scalar_set_event(state, child, oldchild, initiator):
if oldchild is child:
return child
if (
oldchild is not None
and oldchild is not PASSIVE_NO_RESULT
and oldchild is not NEVER_SET
):
# With lazy=None, there's no guarantee that the full collection is
# present when updating via a backref.
old_state, old_dict = (
instance_state(oldchild),
instance_dict(oldchild),
)
impl = old_state.manager[key].impl
# tokens to test for a recursive loop.
if not impl.collection and not impl.dynamic:
check_recursive_token = impl._replace_token
else:
check_recursive_token = impl._remove_token
if initiator is not check_recursive_token:
impl.pop(
old_state,
old_dict,
state.obj(),
parent_impl._append_token,
passive=PASSIVE_NO_FETCH,
)
if child is not None:
child_state, child_dict = (
instance_state(child),
instance_dict(child),
)
child_impl = child_state.manager[key].impl
if (
initiator.parent_token is not parent_token
and initiator.parent_token is not child_impl.parent_token
):
_acceptable_key_err(state, initiator, child_impl)
# tokens to test for a recursive loop.
check_append_token = child_impl._append_token
check_bulk_replace_token = (
child_impl._bulk_replace_token
if child_impl.collection
else None
)
if (
initiator is not check_append_token
and initiator is not check_bulk_replace_token
):
child_impl.append(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH,
)
return child
def emit_backref_from_collection_append_event(state, child, initiator):
if child is None:
return
child_state, child_dict = instance_state(child), instance_dict(child)
child_impl = child_state.manager[key].impl
if (
initiator.parent_token is not parent_token
and initiator.parent_token is not child_impl.parent_token
):
_acceptable_key_err(state, initiator, child_impl)
# tokens to test for a recursive loop.
check_append_token = child_impl._append_token
check_bulk_replace_token = (
child_impl._bulk_replace_token if child_impl.collection else None
)
if (
initiator is not check_append_token
and initiator is not check_bulk_replace_token
):
child_impl.append(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH,
)
return child
def emit_backref_from_collection_remove_event(state, child, initiator):
if (
child is not None
and child is not PASSIVE_NO_RESULT
and child is not NEVER_SET
):
child_state, child_dict = (
instance_state(child),
instance_dict(child),
)
child_impl = child_state.manager[key].impl
# tokens to test for a recursive loop.
if not child_impl.collection and not child_impl.dynamic:
check_remove_token = child_impl._remove_token
check_replace_token = child_impl._replace_token
check_for_dupes_on_remove = uselist and not parent_impl.dynamic
else:
check_remove_token = child_impl._remove_token
check_replace_token = (
child_impl._bulk_replace_token
if child_impl.collection
else None
)
check_for_dupes_on_remove = False
if (
initiator is not check_remove_token
and initiator is not check_replace_token
):
if not check_for_dupes_on_remove or not util.has_dupes(
# when this event is called, the item is usually
# present in the list, except for a pop() operation.
state.dict[parent_impl.key],
child,
):
child_impl.pop(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH,
)
if uselist:
event.listen(
attribute,
"append",
emit_backref_from_collection_append_event,
retval=True,
raw=True,
)
else:
event.listen(
attribute,
"set",
emit_backref_from_scalar_set_event,
retval=True,
raw=True,
)
# TODO: need coverage in test/orm/ of remove event
event.listen(
attribute,
"remove",
emit_backref_from_collection_remove_event,
retval=True,
raw=True,
)
_NO_HISTORY = util.symbol("NO_HISTORY")
_NO_STATE_SYMBOLS = frozenset(
[id(PASSIVE_NO_RESULT), id(NO_VALUE), id(NEVER_SET)]
)
History = util.namedtuple("History", ["added", "unchanged", "deleted"])
class History(History):
"""A 3-tuple of added, unchanged and deleted values,
representing the changes which have occurred on an instrumented
attribute.
The easiest way to get a :class:`.History` object for a particular
attribute on an object is to use the :func:`_sa.inspect` function::
from sqlalchemy import inspect
hist = inspect(myobject).attrs.myattribute.history
Each tuple member is an iterable sequence:
* ``added`` - the collection of items added to the attribute (the first
tuple element).
* ``unchanged`` - the collection of items that have not changed on the
attribute (the second tuple element).
* ``deleted`` - the collection of items that have been removed from the
attribute (the third tuple element).
"""
def __bool__(self):
return self != HISTORY_BLANK
__nonzero__ = __bool__
def empty(self):
"""Return True if this :class:`.History` has no changes
and no existing, unchanged state.
"""
return not bool((self.added or self.deleted) or self.unchanged)
def sum(self):
"""Return a collection of added + unchanged + deleted."""
return (
(self.added or []) + (self.unchanged or []) + (self.deleted or [])
)
def non_deleted(self):
"""Return a collection of added + unchanged."""
return (self.added or []) + (self.unchanged or [])
def non_added(self):
"""Return a collection of unchanged + deleted."""
return (self.unchanged or []) + (self.deleted or [])
def has_changes(self):
"""Return True if this :class:`.History` has changes."""
return bool(self.added or self.deleted)
def as_state(self):
return History(
[
(c is not None) and instance_state(c) or None
for c in self.added
],
[
(c is not None) and instance_state(c) or None
for c in self.unchanged
],
[
(c is not None) and instance_state(c) or None
for c in self.deleted
],
)
@classmethod
def from_scalar_attribute(cls, attribute, state, current):
original = state.committed_state.get(attribute.key, _NO_HISTORY)
if original is _NO_HISTORY:
if current is NEVER_SET:
return cls((), (), ())
else:
return cls((), [current], ())
# don't let ClauseElement expressions here trip things up
elif attribute.is_equal(current, original) is True:
return cls((), [current], ())
else:
# current convention on native scalars is to not
# include information
# about missing previous value in "deleted", but
# we do include None, which helps in some primary
# key situations
if id(original) in _NO_STATE_SYMBOLS:
deleted = ()
# indicate a "del" operation occurred when we don't have
# the previous value as: ([None], (), ())
if id(current) in _NO_STATE_SYMBOLS:
current = None
else:
deleted = [original]
if current is NEVER_SET:
return cls((), (), deleted)
else:
return cls([current], (), deleted)
@classmethod
def from_object_attribute(cls, attribute, state, current):
original = state.committed_state.get(attribute.key, _NO_HISTORY)
if original is _NO_HISTORY:
if current is NO_VALUE or current is NEVER_SET:
return cls((), (), ())
else:
return cls((), [current], ())
elif current is original and current is not NEVER_SET:
return cls((), [current], ())
else:
# current convention on related objects is to not
# include information
# about missing previous value in "deleted", and
# to also not include None - the dependency.py rules
# ignore the None in any case.
if id(original) in _NO_STATE_SYMBOLS or original is None:
deleted = ()
# indicate a "del" operation occurred when we don't have
# the previous value as: ([None], (), ())
if id(current) in _NO_STATE_SYMBOLS:
current = None
else:
deleted = [original]
if current is NO_VALUE or current is NEVER_SET:
return cls((), (), deleted)
else:
return cls([current], (), deleted)
@classmethod
def from_collection(cls, attribute, state, current):
original = state.committed_state.get(attribute.key, _NO_HISTORY)
if current is NO_VALUE or current is NEVER_SET:
return cls((), (), ())
current = getattr(current, "_sa_adapter")
if original in (NO_VALUE, NEVER_SET):
return cls(list(current), (), ())
elif original is _NO_HISTORY:
return cls((), list(current), ())
else:
current_states = [
((c is not None) and instance_state(c) or None, c)
for c in current
]
original_states = [
((c is not None) and instance_state(c) or None, c)
for c in original
]
current_set = dict(current_states)
original_set = dict(original_states)
return cls(
[o for s, o in current_states if s not in original_set],
[o for s, o in current_states if s in original_set],
[o for s, o in original_states if s not in current_set],
)
HISTORY_BLANK = History(None, None, None)
def get_history(obj, key, passive=PASSIVE_OFF):
"""Return a :class:`.History` record for the given object
and attribute key.
This is the **pre-flush** history for a given attribute, which is
reset each time the :class:`.Session` flushes changes to the
current database transaction.
.. note::
Prefer to use the :attr:`.AttributeState.history` and
:meth:`.AttributeState.load_history` accessors to retrieve the
:class:`.History` for instance attributes.
:param obj: an object whose class is instrumented by the
attributes package.
:param key: string attribute name.
:param passive: indicates loading behavior for the attribute
if the value is not already present. This is a
bitflag attribute, which defaults to the symbol
:attr:`.PASSIVE_OFF` indicating all necessary SQL
should be emitted.
.. seealso::
:attr:`.AttributeState.history`
:meth:`.AttributeState.load_history` - retrieve history
using loader callables if the value is not locally present.
"""
if passive is True:
util.warn_deprecated(
"Passing True for 'passive' is deprecated. "
"Use attributes.PASSIVE_NO_INITIALIZE"
)
passive = PASSIVE_NO_INITIALIZE
elif passive is False:
util.warn_deprecated(
"Passing False for 'passive' is "
"deprecated. Use attributes.PASSIVE_OFF"
)
passive = PASSIVE_OFF
return get_state_history(instance_state(obj), key, passive)
def get_state_history(state, key, passive=PASSIVE_OFF):
return state.get_history(key, passive)
def has_parent(cls, obj, key, optimistic=False):
"""TODO"""
manager = manager_of_class(cls)
state = instance_state(obj)
return manager.has_parent(state, key, optimistic)
def register_attribute(class_, key, **kw):
comparator = kw.pop("comparator", None)
parententity = kw.pop("parententity", None)
doc = kw.pop("doc", None)
desc = register_descriptor(class_, key, comparator, parententity, doc=doc)
register_attribute_impl(class_, key, **kw)
return desc
def register_attribute_impl(
class_,
key,
uselist=False,
callable_=None,
useobject=False,
impl_class=None,
backref=None,
**kw
):
manager = manager_of_class(class_)
if uselist:
factory = kw.pop("typecallable", None)
typecallable = manager.instrument_collection_class(
key, factory or list
)
else:
typecallable = kw.pop("typecallable", None)
dispatch = manager[key].dispatch
if impl_class:
impl = impl_class(class_, key, typecallable, dispatch, **kw)
elif uselist:
impl = CollectionAttributeImpl(
class_, key, callable_, dispatch, typecallable=typecallable, **kw
)
elif useobject:
impl = ScalarObjectAttributeImpl(
class_, key, callable_, dispatch, **kw
)
else:
impl = ScalarAttributeImpl(class_, key, callable_, dispatch, **kw)
manager[key].impl = impl
if backref:
backref_listeners(manager[key], backref, uselist)
manager.post_configure_attribute(key)
return manager[key]
def register_descriptor(
class_, key, comparator=None, parententity=None, doc=None
):
manager = manager_of_class(class_)
descriptor = InstrumentedAttribute(
class_, key, comparator=comparator, parententity=parententity
)
descriptor.__doc__ = doc
manager.instrument_attribute(key, descriptor)
return descriptor
def unregister_attribute(class_, key):
manager_of_class(class_).uninstrument_attribute(key)
def init_collection(obj, key):
"""Initialize a collection attribute and return the collection adapter.
This function is used to provide direct access to collection internals
for a previously unloaded attribute. e.g.::
collection_adapter = init_collection(someobject, 'elements')
for elem in values:
collection_adapter.append_without_event(elem)
For an easier way to do the above, see
:func:`~sqlalchemy.orm.attributes.set_committed_value`.
:param obj: a mapped object
:param key: string attribute name where the collection is located.
"""
state = instance_state(obj)
dict_ = state.dict
return init_state_collection(state, dict_, key)
def init_state_collection(state, dict_, key):
"""Initialize a collection attribute and return the collection adapter."""
attr = state.manager[key].impl
user_data = attr.initialize(state, dict_)
return attr.get_collection(state, dict_, user_data)
def set_committed_value(instance, key, value):
"""Set the value of an attribute with no history events.
Cancels any previous history present. The value should be
a scalar value for scalar-holding attributes, or
an iterable for any collection-holding attribute.
This is the same underlying method used when a lazy loader
fires off and loads additional data from the database.
In particular, this method can be used by application code
which has loaded additional attributes or collections through
separate queries, which can then be attached to an instance
as though it were part of its original loaded state.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.manager[key].impl.set_committed_value(state, dict_, value)
def set_attribute(instance, key, value, initiator=None):
"""Set the value of an attribute, firing history events.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to establish attribute state as understood
by SQLAlchemy.
:param instance: the object that will be modified
:param key: string name of the attribute
:param value: value to assign
:param initiator: an instance of :class:`.Event` that would have
been propagated from a previous event listener. This argument
is used when the :func:`.set_attribute` function is being used within
an existing event listening function where an :class:`.Event` object
is being supplied; the object may be used to track the origin of the
chain of events.
.. versionadded:: 1.2.3
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.manager[key].impl.set(state, dict_, value, initiator)
def get_attribute(instance, key):
"""Get the value of an attribute, firing any callables required.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to make usage of attribute state as understood
by SQLAlchemy.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
return state.manager[key].impl.get(state, dict_)
def del_attribute(instance, key):
"""Delete the value of an attribute, firing history events.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to establish attribute state as understood
by SQLAlchemy.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.manager[key].impl.delete(state, dict_)
def flag_modified(instance, key):
"""Mark an attribute on an instance as 'modified'.
This sets the 'modified' flag on the instance and
establishes an unconditional change event for the given attribute.
The attribute must have a value present, else an
:class:`.InvalidRequestError` is raised.
To mark an object "dirty" without referring to any specific attribute
so that it is considered within a flush, use the
:func:`.attributes.flag_dirty` call.
.. seealso::
:func:`.attributes.flag_dirty`
"""
state, dict_ = instance_state(instance), instance_dict(instance)
impl = state.manager[key].impl
impl.dispatch.modified(state, impl._modified_token)
state._modified_event(dict_, impl, NO_VALUE, is_userland=True)
def flag_dirty(instance):
"""Mark an instance as 'dirty' without any specific attribute mentioned.
This is a special operation that will allow the object to travel through
the flush process for interception by events such as
:meth:`.SessionEvents.before_flush`. Note that no SQL will be emitted in
the flush process for an object that has no changes, even if marked dirty
via this method. However, a :meth:`.SessionEvents.before_flush` handler
will be able to see the object in the :attr:`.Session.dirty` collection and
may establish changes on it, which will then be included in the SQL
emitted.
.. versionadded:: 1.2
.. seealso::
:func:`.attributes.flag_modified`
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state._modified_event(dict_, None, NO_VALUE, is_userland=True)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/strategies.py
|
# orm/strategies.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""sqlalchemy.orm.interfaces.LoaderStrategy
implementations, and related MapperOptions."""
from __future__ import absolute_import
import collections
import itertools
from . import attributes
from . import exc as orm_exc
from . import interfaces
from . import loading
from . import properties
from . import query
from . import unitofwork
from . import util as orm_util
from .base import _DEFER_FOR_STATE
from .base import _SET_DEFERRED_EXPIRED
from .interfaces import LoaderStrategy
from .interfaces import StrategizedProperty
from .session import _state_session
from .state import InstanceState
from .util import _none_set
from .util import aliased
from .. import event
from .. import exc as sa_exc
from .. import inspect
from .. import log
from .. import sql
from .. import util
from ..sql import util as sql_util
from ..sql import visitors
def _register_attribute(
prop,
mapper,
useobject,
compare_function=None,
typecallable=None,
callable_=None,
proxy_property=None,
active_history=False,
impl_class=None,
**kw
):
attribute_ext = list(util.to_list(prop.extension, default=[]))
listen_hooks = []
uselist = useobject and prop.uselist
if useobject and prop.single_parent:
listen_hooks.append(single_parent_validator)
if prop.key in prop.parent.validators:
fn, opts = prop.parent.validators[prop.key]
listen_hooks.append(
lambda desc, prop: orm_util._validator_events(
desc, prop.key, fn, **opts
)
)
if useobject:
listen_hooks.append(unitofwork.track_cascade_events)
# need to assemble backref listeners
# after the singleparentvalidator, mapper validator
if useobject:
backref = prop.back_populates
if backref and prop._effective_sync_backref:
listen_hooks.append(
lambda desc, prop: attributes.backref_listeners(
desc, backref, uselist
)
)
# a single MapperProperty is shared down a class inheritance
# hierarchy, so we set up attribute instrumentation and backref event
# for each mapper down the hierarchy.
# typically, "mapper" is the same as prop.parent, due to the way
# the configure_mappers() process runs, however this is not strongly
# enforced, and in the case of a second configure_mappers() run the
# mapper here might not be prop.parent; also, a subclass mapper may
# be called here before a superclass mapper. That is, can't depend
# on mappers not already being set up so we have to check each one.
for m in mapper.self_and_descendants:
if prop is m._props.get(
prop.key
) and not m.class_manager._attr_has_impl(prop.key):
desc = attributes.register_attribute_impl(
m.class_,
prop.key,
parent_token=prop,
uselist=uselist,
compare_function=compare_function,
useobject=useobject,
extension=attribute_ext,
trackparent=useobject
and (
prop.single_parent
or prop.direction is interfaces.ONETOMANY
),
typecallable=typecallable,
callable_=callable_,
active_history=active_history,
impl_class=impl_class,
send_modified_events=not useobject or not prop.viewonly,
doc=prop.doc,
**kw
)
for hook in listen_hooks:
hook(desc, prop)
@properties.ColumnProperty.strategy_for(instrument=False, deferred=False)
class UninstrumentedColumnLoader(LoaderStrategy):
"""Represent a non-instrumented MapperProperty.
The polymorphic_on argument of mapper() often results in this,
if the argument is against the with_polymorphic selectable.
"""
__slots__ = ("columns",)
def __init__(self, parent, strategy_key):
super(UninstrumentedColumnLoader, self).__init__(parent, strategy_key)
self.columns = self.parent_property.columns
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection=None,
**kwargs
):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
pass
@log.class_logger
@properties.ColumnProperty.strategy_for(instrument=True, deferred=False)
class ColumnLoader(LoaderStrategy):
"""Provide loading behavior for a :class:`.ColumnProperty`."""
__slots__ = "columns", "is_composite"
def __init__(self, parent, strategy_key):
super(ColumnLoader, self).__init__(parent, strategy_key)
self.columns = self.parent_property.columns
self.is_composite = hasattr(self.parent_property, "composite_class")
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kwargs
):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
fetch = self.columns[0]
if adapter:
fetch = adapter.columns[fetch]
memoized_populators[self.parent_property] = fetch
def init_class_attribute(self, mapper):
self.is_class_level = True
coltype = self.columns[0].type
# TODO: check all columns ? check for foreign key as well?
active_history = (
self.parent_property.active_history
or self.columns[0].primary_key
or mapper.version_id_col in set(self.columns)
)
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=coltype.compare_values,
active_history=active_history,
)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
# look through list of columns represented here
# to see which, if any, is present in the row.
for col in self.columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
@log.class_logger
@properties.ColumnProperty.strategy_for(query_expression=True)
class ExpressionColumnLoader(ColumnLoader):
def __init__(self, parent, strategy_key):
super(ExpressionColumnLoader, self).__init__(parent, strategy_key)
null = sql.null()
self._have_default_expression = any(
not c.compare(null) for c in self.parent_property.columns
)
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kwargs
):
columns = None
if loadopt and "expression" in loadopt.local_opts:
columns = [loadopt.local_opts["expression"]]
elif self._have_default_expression:
columns = self.parent_property.columns
if columns is None:
return
for c in columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
fetch = columns[0]
if adapter:
fetch = adapter.columns[fetch]
memoized_populators[self.parent_property] = fetch
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
# look through list of columns represented here
# to see which, if any, is present in the row.
if loadopt and "expression" in loadopt.local_opts:
columns = [loadopt.local_opts["expression"]]
for col in columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=self.columns[0].type.compare_values,
accepts_scalar_loader=False,
)
@log.class_logger
@properties.ColumnProperty.strategy_for(deferred=True, instrument=True)
@properties.ColumnProperty.strategy_for(do_nothing=True)
class DeferredColumnLoader(LoaderStrategy):
"""Provide loading behavior for a deferred :class:`.ColumnProperty`."""
__slots__ = "columns", "group"
def __init__(self, parent, strategy_key):
super(DeferredColumnLoader, self).__init__(parent, strategy_key)
if hasattr(self.parent_property, "composite_class"):
raise NotImplementedError(
"Deferred loading for composite " "types not implemented yet"
)
self.columns = self.parent_property.columns
self.group = self.parent_property.group
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
# this path currently does not check the result
# for the column; this is because in most cases we are
# working just with the setup_query() directive which does
# not support this, and the behavior here should be consistent.
if not self.is_class_level:
set_deferred_for_local_state = (
self.parent_property._deferred_column_loader
)
populators["new"].append((self.key, set_deferred_for_local_state))
else:
populators["expire"].append((self.key, False))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=self.columns[0].type.compare_values,
callable_=self._load_for_state,
expire_missing=False,
)
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
only_load_props=None,
**kw
):
if (
(
loadopt
and "undefer_pks" in loadopt.local_opts
and set(self.columns).intersection(
self.parent._should_undefer_in_wildcard
)
)
or (
loadopt
and self.group
and loadopt.local_opts.get(
"undefer_group_%s" % self.group, False
)
)
or (only_load_props and self.key in only_load_props)
):
self.parent_property._get_strategy(
(("deferred", False), ("instrument", True))
).setup_query(
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kw
)
elif self.is_class_level:
memoized_populators[self.parent_property] = _SET_DEFERRED_EXPIRED
else:
memoized_populators[self.parent_property] = _DEFER_FOR_STATE
def _load_for_state(self, state, passive):
if not state.key:
return attributes.ATTR_EMPTY
if not passive & attributes.SQL_OK:
return attributes.PASSIVE_NO_RESULT
localparent = state.manager.mapper
if self.group:
toload = [
p.key
for p in localparent.iterate_properties
if isinstance(p, StrategizedProperty)
and isinstance(p.strategy, DeferredColumnLoader)
and p.group == self.group
]
else:
toload = [self.key]
# narrow the keys down to just those which have no history
group = [k for k in toload if k in state.unmodified]
session = _state_session(state)
if session is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"deferred load operation of attribute '%s' cannot proceed"
% (orm_util.state_str(state), self.key)
)
query = session.query(localparent)
if (
loading.load_on_ident(
query, state.key, only_load_props=group, refresh_state=state
)
is None
):
raise orm_exc.ObjectDeletedError(state)
return attributes.ATTR_WAS_SET
class LoadDeferredColumns(object):
"""serializable loader object used by DeferredColumnLoader"""
def __init__(self, key):
self.key = key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
localparent = state.manager.mapper
prop = localparent._props[key]
strategy = prop._strategies[DeferredColumnLoader]
return strategy._load_for_state(state, passive)
class AbstractRelationshipLoader(LoaderStrategy):
"""LoaderStratgies which deal with related objects."""
__slots__ = "mapper", "target", "uselist", "entity"
def __init__(self, parent, strategy_key):
super(AbstractRelationshipLoader, self).__init__(parent, strategy_key)
self.mapper = self.parent_property.mapper
self.entity = self.parent_property.entity
self.target = self.parent_property.target
self.uselist = self.parent_property.uselist
@log.class_logger
@properties.RelationshipProperty.strategy_for(do_nothing=True)
class DoNothingLoader(LoaderStrategy):
"""Relationship loader that makes no change to the object's state.
Compared to NoLoader, this loader does not initialize the
collection/attribute to empty/none; the usual default LazyLoader will
take effect.
"""
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="noload")
@properties.RelationshipProperty.strategy_for(lazy=None)
class NoLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=None".
"""
__slots__ = ()
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=True,
typecallable=self.parent_property.collection_class,
)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
def invoke_no_load(state, dict_, row):
if self.uselist:
state.manager.get_impl(self.key).initialize(state, dict_)
else:
dict_[self.key] = None
populators["new"].append((self.key, invoke_no_load))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy=True)
@properties.RelationshipProperty.strategy_for(lazy="select")
@properties.RelationshipProperty.strategy_for(lazy="raise")
@properties.RelationshipProperty.strategy_for(lazy="raise_on_sql")
@properties.RelationshipProperty.strategy_for(lazy="baked_select")
class LazyLoader(AbstractRelationshipLoader, util.MemoizedSlots):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=True", that is loads when first accessed.
"""
__slots__ = (
"_lazywhere",
"_rev_lazywhere",
"use_get",
"is_aliased_class",
"_bind_to_col",
"_equated_columns",
"_rev_bind_to_col",
"_rev_equated_columns",
"_simple_lazy_clause",
"_raise_always",
"_raise_on_sql",
"_bakery",
)
def __init__(self, parent, strategy_key):
super(LazyLoader, self).__init__(parent, strategy_key)
self._raise_always = self.strategy_opts["lazy"] == "raise"
self._raise_on_sql = self.strategy_opts["lazy"] == "raise_on_sql"
self.is_aliased_class = inspect(self.entity).is_aliased_class
join_condition = self.parent_property._join_condition
(
self._lazywhere,
self._bind_to_col,
self._equated_columns,
) = join_condition.create_lazy_clause()
(
self._rev_lazywhere,
self._rev_bind_to_col,
self._rev_equated_columns,
) = join_condition.create_lazy_clause(reverse_direction=True)
self.logger.info("%s lazy loading clause %s", self, self._lazywhere)
# determine if our "lazywhere" clause is the same as the mapper's
# get() clause. then we can just use mapper.get()
#
# TODO: the "not self.uselist" can be taken out entirely; a m2o
# load that populates for a list (very unusual, but is possible with
# the API) can still set for "None" and the attribute system will
# populate as an empty list.
self.use_get = (
not self.is_aliased_class
and not self.uselist
and self.entity._get_clause[0].compare(
self._lazywhere,
use_proxies=True,
equivalents=self.mapper._equivalent_columns,
)
)
if self.use_get:
for col in list(self._equated_columns):
if col in self.mapper._equivalent_columns:
for c in self.mapper._equivalent_columns[col]:
self._equated_columns[c] = self._equated_columns[col]
self.logger.info(
"%s will use query.get() to " "optimize instance loads", self
)
def init_class_attribute(self, mapper):
self.is_class_level = True
active_history = (
self.parent_property.active_history
or self.parent_property.direction is not interfaces.MANYTOONE
or not self.use_get
)
# MANYTOONE currently only needs the
# "old" value for delete-orphan
# cascades. the required _SingleParentValidator
# will enable active_history
# in that case. otherwise we don't need the
# "old" value during backref operations.
_register_attribute(
self.parent_property,
mapper,
useobject=True,
callable_=self._load_for_state,
typecallable=self.parent_property.collection_class,
active_history=active_history,
)
def _memoized_attr__simple_lazy_clause(self):
criterion, bind_to_col = (self._lazywhere, self._bind_to_col)
params = []
def visit_bindparam(bindparam):
bindparam.unique = False
visitors.traverse(criterion, {}, {"bindparam": visit_bindparam})
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
params.append(
(
bindparam.key,
bind_to_col[bindparam._identifying_key],
None,
)
)
elif bindparam.callable is None:
params.append((bindparam.key, None, bindparam.value))
criterion = visitors.cloned_traverse(
criterion, {}, {"bindparam": visit_bindparam}
)
return criterion, params
def _generate_lazy_clause(self, state, passive):
criterion, param_keys = self._simple_lazy_clause
if state is None:
return sql_util.adapt_criterion_to_null(
criterion, [key for key, ident, value in param_keys]
)
mapper = self.parent_property.parent
o = state.obj() # strong ref
dict_ = attributes.instance_dict(o)
if passive & attributes.INIT_OK:
passive ^= attributes.INIT_OK
params = {}
for key, ident, value in param_keys:
if ident is not None:
if passive and passive & attributes.LOAD_AGAINST_COMMITTED:
value = mapper._get_committed_state_attr_by_column(
state, dict_, ident, passive
)
else:
value = mapper._get_state_attr_by_column(
state, dict_, ident, passive
)
params[key] = value
return criterion, params
def _invoke_raise_load(self, state, passive, lazy):
raise sa_exc.InvalidRequestError(
"'%s' is not available due to lazy='%s'" % (self, lazy)
)
def _load_for_state(self, state, passive):
if not state.key and (
(
not self.parent_property.load_on_pending
and not state._load_pending
)
or not state.session_id
):
return attributes.ATTR_EMPTY
pending = not state.key
primary_key_identity = None
if (not passive & attributes.SQL_OK and not self.use_get) or (
not passive & attributes.NON_PERSISTENT_OK and pending
):
return attributes.PASSIVE_NO_RESULT
if (
# we were given lazy="raise"
self._raise_always
# the no_raise history-related flag was not passed
and not passive & attributes.NO_RAISE
and (
# if we are use_get and related_object_ok is disabled,
# which means we are at most looking in the identity map
# for history purposes or otherwise returning
# PASSIVE_NO_RESULT, don't raise. This is also a
# history-related flag
not self.use_get
or passive & attributes.RELATED_OBJECT_OK
)
):
self._invoke_raise_load(state, passive, "raise")
session = _state_session(state)
if not session:
if passive & attributes.NO_RAISE:
return attributes.PASSIVE_NO_RESULT
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"lazy load operation of attribute '%s' cannot proceed"
% (orm_util.state_str(state), self.key)
)
# if we have a simple primary key load, check the
# identity map without generating a Query at all
if self.use_get:
primary_key_identity = self._get_ident_for_use_get(
session, state, passive
)
if attributes.PASSIVE_NO_RESULT in primary_key_identity:
return attributes.PASSIVE_NO_RESULT
elif attributes.NEVER_SET in primary_key_identity:
return attributes.NEVER_SET
if _none_set.issuperset(primary_key_identity):
return None
# look for this identity in the identity map. Delegate to the
# Query class in use, as it may have special rules for how it
# does this, including how it decides what the correct
# identity_token would be for this identity.
instance = session.query()._identity_lookup(
self.entity,
primary_key_identity,
passive=passive,
lazy_loaded_from=state,
)
if instance is not None:
if instance is attributes.PASSIVE_CLASS_MISMATCH:
return None
else:
return instance
elif (
not passive & attributes.SQL_OK
or not passive & attributes.RELATED_OBJECT_OK
):
return attributes.PASSIVE_NO_RESULT
return self._emit_lazyload(
session, state, primary_key_identity, passive
)
def _get_ident_for_use_get(self, session, state, passive):
instance_mapper = state.manager.mapper
if passive & attributes.LOAD_AGAINST_COMMITTED:
get_attr = instance_mapper._get_committed_state_attr_by_column
else:
get_attr = instance_mapper._get_state_attr_by_column
dict_ = state.dict
return [
get_attr(state, dict_, self._equated_columns[pk], passive=passive)
for pk in self.mapper.primary_key
]
@util.dependencies("sqlalchemy.ext.baked")
def _memoized_attr__bakery(self, baked):
return baked.bakery(size=50)
@util.dependencies("sqlalchemy.orm.strategy_options")
def _emit_lazyload(
self, strategy_options, session, state, primary_key_identity, passive
):
# emit lazy load now using BakedQuery, to cut way down on the overhead
# of generating queries.
# there are two big things we are trying to guard against here:
#
# 1. two different lazy loads that need to have a different result,
# being cached on the same key. The results between two lazy loads
# can be different due to the options passed to the query, which
# take effect for descendant objects. Therefore we have to make
# sure paths and load options generate good cache keys, and if they
# don't, we don't cache.
# 2. a lazy load that gets cached on a key that includes some
# "throwaway" object, like a per-query AliasedClass, meaning
# the cache key will never be seen again and the cache itself
# will fill up. (the cache is an LRU cache, so while we won't
# run out of memory, it will perform terribly when it's full. A
# warning is emitted if this occurs.) We must prevent the
# generation of a cache key that is including a throwaway object
# in the key.
# note that "lazy='select'" and "lazy=True" make two separate
# lazy loaders. Currently the LRU cache is local to the LazyLoader,
# however add ourselves to the initial cache key just to future
# proof in case it moves
q = self._bakery(lambda session: session.query(self.entity), self)
q.add_criteria(
lambda q: q._adapt_all_clauses()._with_invoke_all_eagers(False),
self.parent_property,
)
if not self.parent_property.bake_queries:
q.spoil(full=True)
if self.parent_property.secondary is not None:
q.add_criteria(
lambda q: q.select_from(
self.mapper, self.parent_property.secondary
)
)
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
q.add_criteria(lambda q: q.autoflush(False))
if state.load_options:
# here, if any of the options cannot return a cache key,
# the BakedQuery "spoils" and caching will not occur. a path
# that features Cls.attribute.of_type(some_alias) will cancel
# caching, for example, since "some_alias" is user-defined and
# is usually a throwaway object.
effective_path = state.load_path[self.parent_property]
q._add_lazyload_options(state.load_options, effective_path)
if self.use_get:
if self._raise_on_sql:
self._invoke_raise_load(state, passive, "raise_on_sql")
return (
q(session)
.with_post_criteria(lambda q: q._set_lazyload_from(state))
._load_on_pk_identity(
session.query(self.mapper), primary_key_identity
)
)
if self.parent_property.order_by:
q.add_criteria(
lambda q: q.order_by(
*util.to_list(self.parent_property.order_by)
)
)
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if (
rev.direction is interfaces.MANYTOONE
and rev._use_get
and not isinstance(rev.strategy, LazyLoader)
):
q.add_criteria(
lambda q: q.options(
strategy_options.Load.for_existing_path(
q._current_path[rev.parent]
).lazyload(rev.key)
)
)
lazy_clause, params = self._generate_lazy_clause(state, passive)
if pending:
if util.has_intersection(orm_util._none_set, params.values()):
return None
elif util.has_intersection(orm_util._never_set, params.values()):
return None
if self._raise_on_sql:
self._invoke_raise_load(state, passive, "raise_on_sql")
q.add_criteria(lambda q: q.filter(lazy_clause))
# set parameters in the query such that we don't overwrite
# parameters that are already set within it
def set_default_params(q):
params.update(q._params)
q._params = params
return q
result = (
q(session)
.with_post_criteria(lambda q: q._set_lazyload_from(state))
.with_post_criteria(set_default_params)
.all()
)
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property
)
return result[0]
else:
return None
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
key = self.key
if not self.is_class_level:
# we are not the primary manager for this attribute
# on this class - set up a
# per-instance lazyloader, which will override the
# class-level behavior.
# this currently only happens when using a
# "lazyload" option on a "no load"
# attribute - "eager" attributes always have a
# class-level lazyloader installed.
set_lazy_callable = (
InstanceState._instance_level_callable_processor
)(mapper.class_manager, LoadLazyAttribute(key, self), key)
populators["new"].append((self.key, set_lazy_callable))
elif context.populate_existing or mapper.always_refresh:
def reset_for_lazy_callable(state, dict_, row):
# we are the primary manager for this attribute on
# this class - reset its
# per-instance attribute state, so that the class-level
# lazy loader is
# executed when next referenced on this instance.
# this is needed in
# populate_existing() types of scenarios to reset
# any existing state.
state._reset(dict_, key)
populators["new"].append((self.key, reset_for_lazy_callable))
class LoadLazyAttribute(object):
"""serializable loader object used by LazyLoader"""
def __init__(self, key, initiating_strategy):
self.key = key
self.strategy_key = initiating_strategy.strategy_key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
instance_mapper = state.manager.mapper
prop = instance_mapper._props[key]
strategy = prop._strategies[self.strategy_key]
return strategy._load_for_state(state, passive)
@properties.RelationshipProperty.strategy_for(lazy="immediate")
class ImmediateLoader(AbstractRelationshipLoader):
__slots__ = ()
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def setup_query(
self,
context,
entity,
path,
loadopt,
adapter,
column_collection=None,
parentmapper=None,
**kwargs
):
pass
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
def load_immediate(state, dict_, row):
state.get_impl(self.key).get(state, dict_)
populators["delayed"].append((self.key, load_immediate))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="subquery")
class SubqueryLoader(AbstractRelationshipLoader):
__slots__ = ("join_depth",)
def __init__(self, parent, strategy_key):
super(SubqueryLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def setup_query(
self,
context,
entity,
path,
loadopt,
adapter,
column_collection=None,
parentmapper=None,
**kwargs
):
if not context.query._enable_eagerloads:
return
elif context.query._yield_per:
context.query._no_yield_per("subquery")
path = path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_entity = path.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
effective_entity = with_poly_entity
else:
effective_entity = self.entity
subq_path = context.attributes.get(
("subquery_path", None), orm_util.PathRegistry.root
)
subq_path = subq_path + path
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if (
(
context.query._current_path.length
if context.query._current_path
else 0
)
+ path.length
) / 2 > self.join_depth:
return
elif subq_path.contains_mapper(self.mapper):
return
(
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
) = self._get_leftmost(subq_path)
orig_query = context.attributes.get(
("orig_query", SubqueryLoader), context.query
)
# generate a new Query from the original, then
# produce a subquery from it.
left_alias = self._generate_from_original_query(
orig_query,
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
entity.entity_zero,
)
# generate another Query that will join the
# left alias to the target relationships.
# basically doing a longhand
# "from_self()". (from_self() itself not quite industrial
# strength enough for all contingencies...but very close)
q = orig_query.session.query(effective_entity)
q._attributes = {
("orig_query", SubqueryLoader): orig_query,
("subquery_path", None): subq_path,
}
q = q._set_enable_single_crit(False)
to_join, local_attr, parent_alias = self._prep_for_joins(
left_alias, subq_path
)
q = q.add_columns(*local_attr)
q = self._apply_joins(
q, to_join, left_alias, parent_alias, effective_entity
)
q = self._setup_options(q, subq_path, orig_query, effective_entity)
q = self._setup_outermost_orderby(q)
# add new query to attributes to be picked up
# by create_row_processor
path.set(context.attributes, "subquery", q)
def _get_leftmost(self, subq_path):
subq_path = subq_path.path
subq_mapper = orm_util._class_to_mapper(subq_path[0])
# determine attributes of the leftmost mapper
if (
self.parent.isa(subq_mapper)
and self.parent_property is subq_path[1]
):
leftmost_mapper, leftmost_prop = self.parent, self.parent_property
else:
leftmost_mapper, leftmost_prop = subq_mapper, subq_path[1]
leftmost_cols = leftmost_prop.local_columns
leftmost_attr = [
getattr(
subq_path[0].entity, leftmost_mapper._columntoproperty[c].key
)
for c in leftmost_cols
]
return leftmost_mapper, leftmost_attr, leftmost_prop
def _generate_from_original_query(
self,
orig_query,
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
orig_entity,
):
# reformat the original query
# to look only for significant columns
q = orig_query._clone().correlate(None)
# set the query's "FROM" list explicitly to what the
# FROM list would be in any case, as we will be limiting
# the columns in the SELECT list which may no longer include
# all entities mentioned in things like WHERE, JOIN, etc.
if not q._from_obj:
q._set_select_from(
list(
set(
[
ent["entity"]
for ent in orig_query.column_descriptions
if ent["entity"] is not None
]
)
),
False,
)
# select from the identity columns of the outer (specifically, these
# are the 'local_cols' of the property). This will remove
# other columns from the query that might suggest the right entity
# which is why we do _set_select_from above.
target_cols = q._adapt_col_list(leftmost_attr)
q._set_entities(target_cols)
distinct_target_key = leftmost_relationship.distinct_target_key
if distinct_target_key is True:
q._distinct = True
elif distinct_target_key is None:
# if target_cols refer to a non-primary key or only
# part of a composite primary key, set the q as distinct
for t in set(c.table for c in target_cols):
if not set(target_cols).issuperset(t.primary_key):
q._distinct = True
break
if q._order_by is False:
q._order_by = leftmost_mapper.order_by
# don't need ORDER BY if no limit/offset
if q._limit is None and q._offset is None:
q._order_by = None
# the original query now becomes a subquery
# which we'll join onto.
embed_q = q.with_labels().subquery()
left_alias = orm_util.AliasedClass(
leftmost_mapper, embed_q, use_mapper_path=True
)
return left_alias
def _prep_for_joins(self, left_alias, subq_path):
# figure out what's being joined. a.k.a. the fun part
to_join = []
pairs = list(subq_path.pairs())
for i, (mapper, prop) in enumerate(pairs):
if i > 0:
# look at the previous mapper in the chain -
# if it is as or more specific than this prop's
# mapper, use that instead.
# note we have an assumption here that
# the non-first element is always going to be a mapper,
# not an AliasedClass
prev_mapper = pairs[i - 1][1].mapper
to_append = prev_mapper if prev_mapper.isa(mapper) else mapper
else:
to_append = mapper
to_join.append((to_append, prop.key))
# determine the immediate parent class we are joining from,
# which needs to be aliased.
if len(to_join) < 2:
# in the case of a one level eager load, this is the
# leftmost "left_alias".
parent_alias = left_alias
else:
info = inspect(to_join[-1][0])
if info.is_aliased_class:
parent_alias = info.entity
else:
# alias a plain mapper as we may be
# joining multiple times
parent_alias = orm_util.AliasedClass(
info.entity, use_mapper_path=True
)
local_cols = self.parent_property.local_columns
local_attr = [
getattr(parent_alias, self.parent._columntoproperty[c].key)
for c in local_cols
]
return to_join, local_attr, parent_alias
def _apply_joins(
self, q, to_join, left_alias, parent_alias, effective_entity
):
ltj = len(to_join)
if ltj == 1:
to_join = [
getattr(left_alias, to_join[0][1]).of_type(effective_entity)
]
elif ltj == 2:
to_join = [
getattr(left_alias, to_join[0][1]).of_type(parent_alias),
getattr(parent_alias, to_join[-1][1]).of_type(
effective_entity
),
]
elif ltj > 2:
middle = [
(
orm_util.AliasedClass(item[0])
if not inspect(item[0]).is_aliased_class
else item[0].entity,
item[1],
)
for item in to_join[1:-1]
]
inner = []
while middle:
item = middle.pop(0)
attr = getattr(item[0], item[1])
if middle:
attr = attr.of_type(middle[0][0])
else:
attr = attr.of_type(parent_alias)
inner.append(attr)
to_join = (
[getattr(left_alias, to_join[0][1]).of_type(inner[0].parent)]
+ inner
+ [
getattr(parent_alias, to_join[-1][1]).of_type(
effective_entity
)
]
)
for attr in to_join:
q = q.join(attr, from_joinpoint=True)
return q
def _setup_options(self, q, subq_path, orig_query, effective_entity):
# propagate loader options etc. to the new query.
# these will fire relative to subq_path.
q = q._with_current_path(subq_path)
q = q._conditional_options(*orig_query._with_options)
if orig_query._populate_existing:
q._populate_existing = orig_query._populate_existing
return q
def _setup_outermost_orderby(self, q):
if self.parent_property.order_by:
# if there's an ORDER BY, alias it the same
# way joinedloader does, but we have to pull out
# the "eagerjoin" from the query.
# this really only picks up the "secondary" table
# right now.
eagerjoin = q._from_obj[0]
eager_order_by = eagerjoin._target_adapter.copy_and_process(
util.to_list(self.parent_property.order_by)
)
q = q.order_by(*eager_order_by)
return q
class _SubqCollections(object):
"""Given a :class:`_query.Query` used to emit the "subquery load",
provide a load interface that executes the query at the
first moment a value is needed.
"""
_data = None
def __init__(self, subq):
self.subq = subq
def get(self, key, default):
if self._data is None:
self._load()
return self._data.get(key, default)
def _load(self):
self._data = collections.defaultdict(list)
for k, v in itertools.groupby(self.subq, lambda x: x[1:]):
self._data[k].extend(vv[0] for vv in v)
def loader(self, state, dict_, row):
if self._data is None:
self._load()
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
path = path[self.parent_property]
subq = path.get(context.attributes, "subquery")
if subq is None:
return
assert subq.session is context.session, (
"Subquery session doesn't refer to that of "
"our context. Are there broken context caching "
"schemes being used?"
)
local_cols = self.parent_property.local_columns
# cache the loaded collections in the context
# so that inheriting mappers don't re-load when they
# call upon create_row_processor again
collections = path.get(context.attributes, "collections")
if collections is None:
collections = self._SubqCollections(subq)
path.set(context.attributes, "collections", collections)
if adapter:
local_cols = [adapter.columns[c] for c in local_cols]
if self.uselist:
self._create_collection_loader(
context, collections, local_cols, populators
)
else:
self._create_scalar_loader(
context, collections, local_cols, populators
)
def _create_collection_loader(
self, context, collections, local_cols, populators
):
def load_collection_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]), ()
)
state.get_impl(self.key).set_committed_value(
state, dict_, collection
)
def load_collection_from_subq_existing_row(state, dict_, row):
if self.key not in dict_:
load_collection_from_subq(state, dict_, row)
populators["new"].append((self.key, load_collection_from_subq))
populators["existing"].append(
(self.key, load_collection_from_subq_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
def _create_scalar_loader(
self, context, collections, local_cols, populators
):
def load_scalar_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]), (None,)
)
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' " % self
)
scalar = collection[0]
state.get_impl(self.key).set_committed_value(state, dict_, scalar)
def load_scalar_from_subq_existing_row(state, dict_, row):
if self.key not in dict_:
load_scalar_from_subq(state, dict_, row)
populators["new"].append((self.key, load_scalar_from_subq))
populators["existing"].append(
(self.key, load_scalar_from_subq_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="joined")
@properties.RelationshipProperty.strategy_for(lazy=False)
class JoinedLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
using joined eager loading.
"""
__slots__ = "join_depth", "_aliased_class_pool"
def __init__(self, parent, strategy_key):
super(JoinedLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
self._aliased_class_pool = []
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection=None,
parentmapper=None,
chained_from_outerjoin=False,
**kwargs
):
"""Add a left outer join to the statement that's being constructed."""
if not context.query._enable_eagerloads:
return
elif context.query._yield_per and self.uselist:
context.query._no_yield_per("joined collection")
path = path[self.parent_property]
with_polymorphic = None
user_defined_adapter = (
self._init_user_defined_eager_proc(loadopt, context)
if loadopt
else False
)
if user_defined_adapter is not False:
(
clauses,
adapter,
add_to_collection,
) = self._setup_query_on_user_defined_adapter(
context, query_entity, path, adapter, user_defined_adapter
)
else:
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif path.contains_mapper(self.mapper):
return
(
clauses,
adapter,
add_to_collection,
chained_from_outerjoin,
) = self._generate_row_adapter(
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
parentmapper,
chained_from_outerjoin,
)
with_poly_entity = path.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
with_polymorphic = inspect(
with_poly_entity
).with_polymorphic_mappers
else:
with_polymorphic = None
path = path[self.entity]
loading._setup_entity_query(
context,
self.mapper,
query_entity,
path,
clauses,
add_to_collection,
with_polymorphic=with_polymorphic,
parentmapper=self.mapper,
chained_from_outerjoin=chained_from_outerjoin,
)
if with_poly_entity is not None and None in set(
context.secondary_columns
):
raise sa_exc.InvalidRequestError(
"Detected unaliased columns when generating joined "
"load. Make sure to use aliased=True or flat=True "
"when using joined loading with with_polymorphic()."
)
def _init_user_defined_eager_proc(self, loadopt, context):
# check if the opt applies at all
if "eager_from_alias" not in loadopt.local_opts:
# nope
return False
path = loadopt.path.parent
# the option applies. check if the "user_defined_eager_row_processor"
# has been built up.
adapter = path.get(
context.attributes, "user_defined_eager_row_processor", False
)
if adapter is not False:
# just return it
return adapter
# otherwise figure it out.
alias = loadopt.local_opts["eager_from_alias"]
root_mapper, prop = path[-2:]
if alias is not None:
if isinstance(alias, str):
alias = prop.target.alias(alias)
adapter = sql_util.ColumnAdapter(
alias, equivalents=prop.mapper._equivalent_columns
)
else:
if path.contains(context.attributes, "path_with_polymorphic"):
with_poly_entity = path.get(
context.attributes, "path_with_polymorphic"
)
adapter = orm_util.ORMAdapter(
with_poly_entity,
equivalents=prop.mapper._equivalent_columns,
)
else:
adapter = context.query._polymorphic_adapters.get(
prop.mapper, None
)
path.set(
context.attributes, "user_defined_eager_row_processor", adapter
)
return adapter
def _setup_query_on_user_defined_adapter(
self, context, entity, path, adapter, user_defined_adapter
):
# apply some more wrapping to the "user defined adapter"
# if we are setting up the query for SQL render.
adapter = entity._get_entity_clauses(context.query, context)
if adapter and user_defined_adapter:
user_defined_adapter = user_defined_adapter.wrap(adapter)
path.set(
context.attributes,
"user_defined_eager_row_processor",
user_defined_adapter,
)
elif adapter:
user_defined_adapter = adapter
path.set(
context.attributes,
"user_defined_eager_row_processor",
user_defined_adapter,
)
add_to_collection = context.primary_columns
return user_defined_adapter, adapter, add_to_collection
def _gen_pooled_aliased_class(self, context):
# keep a local pool of AliasedClass objects that get re-used.
# we need one unique AliasedClass per query per appearance of our
# entity in the query.
if inspect(self.entity).is_aliased_class:
alt_selectable = inspect(self.entity).selectable
else:
alt_selectable = None
key = ("joinedloader_ac", self)
if key not in context.attributes:
context.attributes[key] = idx = 0
else:
context.attributes[key] = idx = context.attributes[key] + 1
if idx >= len(self._aliased_class_pool):
to_adapt = orm_util.AliasedClass(
self.mapper,
alias=alt_selectable.alias(flat=True)
if alt_selectable is not None
else None,
flat=True,
use_mapper_path=True,
)
# load up the .columns collection on the Alias() before
# the object becomes shared among threads. this prevents
# races for column identities.
inspect(to_adapt).selectable.c
self._aliased_class_pool.append(to_adapt)
return self._aliased_class_pool[idx]
def _generate_row_adapter(
self,
context,
entity,
path,
loadopt,
adapter,
column_collection,
parentmapper,
chained_from_outerjoin,
):
with_poly_entity = path.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_entity:
to_adapt = with_poly_entity
else:
to_adapt = self._gen_pooled_aliased_class(context)
clauses = inspect(to_adapt)._memo(
("joinedloader_ormadapter", self),
orm_util.ORMAdapter,
to_adapt,
equivalents=self.mapper._equivalent_columns,
adapt_required=True,
allow_label_resolve=False,
anonymize_labels=True,
)
assert clauses.aliased_class is not None
if self.parent_property.uselist:
context.multi_row_eager_loaders = True
innerjoin = (
loadopt.local_opts.get("innerjoin", self.parent_property.innerjoin)
if loadopt is not None
else self.parent_property.innerjoin
)
if not innerjoin:
# if this is an outer join, all non-nested eager joins from
# this path must also be outer joins
chained_from_outerjoin = True
context.create_eager_joins.append(
(
self._create_eager_join,
entity,
path,
adapter,
parentmapper,
clauses,
innerjoin,
chained_from_outerjoin,
)
)
add_to_collection = context.secondary_columns
path.set(context.attributes, "eager_row_processor", clauses)
return clauses, adapter, add_to_collection, chained_from_outerjoin
def _create_eager_join(
self,
context,
query_entity,
path,
adapter,
parentmapper,
clauses,
innerjoin,
chained_from_outerjoin,
):
if parentmapper is None:
localparent = query_entity.mapper
else:
localparent = parentmapper
# whether or not the Query will wrap the selectable in a subquery,
# and then attach eager load joins to that (i.e., in the case of
# LIMIT/OFFSET etc.)
should_nest_selectable = (
context.multi_row_eager_loaders
and context.query._should_nest_selectable
)
query_entity_key = None
if (
query_entity not in context.eager_joins
and not should_nest_selectable
and context.from_clause
):
indexes = sql_util.find_left_clause_that_matches_given(
context.from_clause, query_entity.selectable
)
if len(indexes) > 1:
# for the eager load case, I can't reproduce this right
# now. For query.join() I can.
raise sa_exc.InvalidRequestError(
"Can't identify which query entity in which to joined "
"eager load from. Please use an exact match when "
"specifying the join path."
)
if indexes:
clause = context.from_clause[indexes[0]]
# join to an existing FROM clause on the query.
# key it to its list index in the eager_joins dict.
# Query._compile_context will adapt as needed and
# append to the FROM clause of the select().
query_entity_key, default_towrap = indexes[0], clause
if query_entity_key is None:
query_entity_key, default_towrap = (
query_entity,
query_entity.selectable,
)
towrap = context.eager_joins.setdefault(
query_entity_key, default_towrap
)
if adapter:
if getattr(adapter, "aliased_class", None):
# joining from an adapted entity. The adapted entity
# might be a "with_polymorphic", so resolve that to our
# specific mapper's entity before looking for our attribute
# name on it.
efm = inspect(adapter.aliased_class)._entity_for_mapper(
localparent
if localparent.isa(self.parent)
else self.parent
)
# look for our attribute on the adapted entity, else fall back
# to our straight property
onclause = getattr(efm.entity, self.key, self.parent_property)
else:
onclause = getattr(
orm_util.AliasedClass(
self.parent, adapter.selectable, use_mapper_path=True
),
self.key,
self.parent_property,
)
else:
onclause = self.parent_property
assert clauses.aliased_class is not None
attach_on_outside = (
not chained_from_outerjoin
or not innerjoin
or innerjoin == "unnested"
or query_entity.entity_zero.represents_outer_join
)
if attach_on_outside:
# this is the "classic" eager join case.
eagerjoin = orm_util._ORMJoin(
towrap,
clauses.aliased_class,
onclause,
isouter=not innerjoin
or query_entity.entity_zero.represents_outer_join
or (chained_from_outerjoin and isinstance(towrap, sql.Join)),
_left_memo=self.parent,
_right_memo=self.mapper,
)
else:
# all other cases are innerjoin=='nested' approach
eagerjoin = self._splice_nested_inner_join(
path, towrap, clauses, onclause
)
context.eager_joins[query_entity_key] = eagerjoin
# send a hint to the Query as to where it may "splice" this join
eagerjoin.stop_on = query_entity.selectable
if not parentmapper:
# for parentclause that is the non-eager end of the join,
# ensure all the parent cols in the primaryjoin are actually
# in the
# columns clause (i.e. are not deferred), so that aliasing applied
# by the Query propagates those columns outward.
# This has the effect
# of "undefering" those columns.
for col in sql_util._find_columns(
self.parent_property.primaryjoin
):
if localparent.persist_selectable.c.contains_column(col):
if adapter:
col = adapter.columns[col]
context.primary_columns.append(col)
if self.parent_property.order_by:
context.eager_order_by += (
eagerjoin._target_adapter.copy_and_process
)(util.to_list(self.parent_property.order_by))
def _splice_nested_inner_join(
self, path, join_obj, clauses, onclause, splicing=False
):
if splicing is False:
# first call is always handed a join object
# from the outside
assert isinstance(join_obj, orm_util._ORMJoin)
elif isinstance(join_obj, sql.selectable.FromGrouping):
return self._splice_nested_inner_join(
path, join_obj.element, clauses, onclause, splicing
)
elif not isinstance(join_obj, orm_util._ORMJoin):
if path[-2] is splicing:
return orm_util._ORMJoin(
join_obj,
clauses.aliased_class,
onclause,
isouter=False,
_left_memo=splicing,
_right_memo=path[-1].mapper,
)
else:
# only here if splicing == True
return None
target_join = self._splice_nested_inner_join(
path, join_obj.right, clauses, onclause, join_obj._right_memo
)
if target_join is None:
right_splice = False
target_join = self._splice_nested_inner_join(
path, join_obj.left, clauses, onclause, join_obj._left_memo
)
if target_join is None:
# should only return None when recursively called,
# e.g. splicing==True
assert (
splicing is not False
), "assertion failed attempting to produce joined eager loads"
return None
else:
right_splice = True
if right_splice:
# for a right splice, attempt to flatten out
# a JOIN b JOIN c JOIN .. to avoid needless
# parenthesis nesting
if not join_obj.isouter and not target_join.isouter:
eagerjoin = join_obj._splice_into_center(target_join)
else:
eagerjoin = orm_util._ORMJoin(
join_obj.left,
target_join,
join_obj.onclause,
isouter=join_obj.isouter,
_left_memo=join_obj._left_memo,
)
else:
eagerjoin = orm_util._ORMJoin(
target_join,
join_obj.right,
join_obj.onclause,
isouter=join_obj.isouter,
_right_memo=join_obj._right_memo,
)
eagerjoin._target_adapter = target_join._target_adapter
return eagerjoin
def _create_eager_adapter(self, context, result, adapter, path, loadopt):
user_defined_adapter = (
self._init_user_defined_eager_proc(loadopt, context)
if loadopt
else False
)
if user_defined_adapter is not False:
decorator = user_defined_adapter
# user defined eagerloads are part of the "primary"
# portion of the load.
# the adapters applied to the Query should be honored.
if context.adapter and decorator:
decorator = decorator.wrap(context.adapter)
elif context.adapter:
decorator = context.adapter
else:
decorator = path.get(context.attributes, "eager_row_processor")
if decorator is None:
return False
if self.mapper._result_has_identity_key(result, decorator):
return decorator
else:
# no identity key - don't return a row
# processor, will cause a degrade to lazy
return False
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
our_path = path[self.parent_property]
eager_adapter = self._create_eager_adapter(
context, result, adapter, our_path, loadopt
)
if eager_adapter is not False:
key = self.key
_instance = loading._instance_processor(
self.mapper,
context,
result,
our_path[self.entity],
eager_adapter,
)
if not self.uselist:
self._create_scalar_loader(context, key, _instance, populators)
else:
self._create_collection_loader(
context, key, _instance, populators
)
else:
self.parent_property._get_strategy(
(("lazy", "select"),)
).create_row_processor(
context, path, loadopt, mapper, result, adapter, populators
)
def _create_collection_loader(self, context, key, _instance, populators):
def load_collection_from_joined_new_row(state, dict_, row):
collection = attributes.init_state_collection(state, dict_, key)
result_list = util.UniqueAppender(
collection, "append_without_event"
)
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_existing_row(state, dict_, row):
if (state, key) in context.attributes:
result_list = context.attributes[(state, key)]
else:
# appender_key can be absent from context.attributes
# with isnew=False when self-referential eager loading
# is used; the same instance may be present in two
# distinct sets of result columns
collection = attributes.init_state_collection(
state, dict_, key
)
result_list = util.UniqueAppender(
collection, "append_without_event"
)
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append(
(self.key, load_collection_from_joined_new_row)
)
populators["existing"].append(
(self.key, load_collection_from_joined_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_collection_from_joined_exec)
)
def _create_scalar_loader(self, context, key, _instance, populators):
def load_scalar_from_joined_new_row(state, dict_, row):
# set a scalar object instance directly on the parent
# object, bypassing InstrumentedAttribute event handlers.
dict_[key] = _instance(row)
def load_scalar_from_joined_existing_row(state, dict_, row):
# call _instance on the row, even though the object has
# been created, so that we further descend into properties
existing = _instance(row)
# conflicting value already loaded, this shouldn't happen
if key in dict_:
if existing is not dict_[key]:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self
)
else:
# this case is when one row has multiple loads of the
# same entity (e.g. via aliasing), one has an attribute
# that the other doesn't.
dict_[key] = existing
def load_scalar_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append((self.key, load_scalar_from_joined_new_row))
populators["existing"].append(
(self.key, load_scalar_from_joined_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_scalar_from_joined_exec)
)
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="selectin")
class SelectInLoader(AbstractRelationshipLoader, util.MemoizedSlots):
__slots__ = (
"join_depth",
"omit_join",
"_parent_alias",
"_query_info",
"_fallback_query_info",
"_bakery",
)
query_info = collections.namedtuple(
"queryinfo",
[
"load_only_child",
"load_with_join",
"in_expr",
"pk_cols",
"zero_idx",
"child_lookup_cols",
],
)
_chunksize = 500
def __init__(self, parent, strategy_key):
super(SelectInLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
is_m2o = self.parent_property.direction is interfaces.MANYTOONE
if self.parent_property.omit_join is not None:
self.omit_join = self.parent_property.omit_join
else:
lazyloader = self.parent_property._get_strategy(
(("lazy", "select"),)
)
if is_m2o:
self.omit_join = lazyloader.use_get
else:
self.omit_join = self.parent._get_clause[0].compare(
lazyloader._rev_lazywhere,
use_proxies=True,
equivalents=self.parent._equivalent_columns,
)
if self.omit_join:
if is_m2o:
self._query_info = self._init_for_omit_join_m2o()
self._fallback_query_info = self._init_for_join()
else:
self._query_info = self._init_for_omit_join()
else:
self._query_info = self._init_for_join()
def _init_for_omit_join(self):
pk_to_fk = dict(
self.parent_property._join_condition.local_remote_pairs
)
pk_to_fk.update(
(equiv, pk_to_fk[k])
for k in list(pk_to_fk)
for equiv in self.parent._equivalent_columns.get(k, ())
)
pk_cols = fk_cols = [
pk_to_fk[col] for col in self.parent.primary_key if col in pk_to_fk
]
if len(fk_cols) > 1:
in_expr = sql.tuple_(*fk_cols)
zero_idx = False
else:
in_expr = fk_cols[0]
zero_idx = True
return self.query_info(False, False, in_expr, pk_cols, zero_idx, None)
def _init_for_omit_join_m2o(self):
pk_cols = self.mapper.primary_key
if len(pk_cols) > 1:
in_expr = sql.tuple_(*pk_cols)
zero_idx = False
else:
in_expr = pk_cols[0]
zero_idx = True
lazyloader = self.parent_property._get_strategy((("lazy", "select"),))
lookup_cols = [lazyloader._equated_columns[pk] for pk in pk_cols]
return self.query_info(
True, False, in_expr, pk_cols, zero_idx, lookup_cols
)
def _init_for_join(self):
self._parent_alias = aliased(self.parent.class_)
pa_insp = inspect(self._parent_alias)
pk_cols = [
pa_insp._adapt_element(col) for col in self.parent.primary_key
]
if len(pk_cols) > 1:
in_expr = sql.tuple_(*pk_cols)
zero_idx = False
else:
in_expr = pk_cols[0]
zero_idx = True
return self.query_info(False, True, in_expr, pk_cols, zero_idx, None)
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
@util.dependencies("sqlalchemy.ext.baked")
def _memoized_attr__bakery(self, baked):
return baked.bakery(size=50)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
selectin_path = (
context.query._current_path or orm_util.PathRegistry.root
) + path
if not orm_util._entity_isa(path[-1], self.parent):
return
if loading.PostLoad.path_exists(
context, selectin_path, self.parent_property
):
return
path_w_prop = path[self.parent_property]
selectin_path_w_prop = selectin_path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_entity = path_w_prop.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
effective_entity = with_poly_entity
else:
effective_entity = self.entity
if not path_w_prop.contains(context.attributes, "loader"):
if self.join_depth:
if selectin_path_w_prop.length / 2 > self.join_depth:
return
elif selectin_path_w_prop.contains_mapper(self.mapper):
return
loading.PostLoad.callable_for_path(
context,
selectin_path,
self.parent,
self.parent_property,
self._load_for_path,
effective_entity,
)
@util.dependencies("sqlalchemy.ext.baked")
def _load_for_path(
self, baked, context, path, states, load_only, effective_entity
):
if load_only and self.key not in load_only:
return
query_info = self._query_info
if query_info.load_only_child:
our_states = collections.defaultdict(list)
none_states = []
mapper = self.parent
for state, overwrite in states:
state_dict = state.dict
related_ident = tuple(
mapper._get_state_attr_by_column(
state,
state_dict,
lk,
passive=attributes.PASSIVE_NO_FETCH,
)
for lk in query_info.child_lookup_cols
)
# if the loaded parent objects do not have the foreign key
# to the related item loaded, then degrade into the joined
# version of selectinload
if attributes.PASSIVE_NO_RESULT in related_ident:
query_info = self._fallback_query_info
break
# organize states into lists keyed to particular foreign
# key values.
if None not in related_ident:
our_states[related_ident].append(
(state, state_dict, overwrite)
)
else:
# For FK values that have None, add them to a
# separate collection that will be populated separately
none_states.append((state, state_dict, overwrite))
# note the above conditional may have changed query_info
if not query_info.load_only_child:
our_states = [
(state.key[1], state, state.dict, overwrite)
for state, overwrite in states
]
pk_cols = query_info.pk_cols
in_expr = query_info.in_expr
if not query_info.load_with_join:
# in "omit join" mode, the primary key column and the
# "in" expression are in terms of the related entity. So
# if the related entity is polymorphic or otherwise aliased,
# we need to adapt our "pk_cols" and "in_expr" to that
# entity. in non-"omit join" mode, these are against the
# parent entity and do not need adaption.
insp = inspect(effective_entity)
if insp.is_aliased_class:
pk_cols = [insp._adapt_element(col) for col in pk_cols]
in_expr = insp._adapt_element(in_expr)
pk_cols = [insp._adapt_element(col) for col in pk_cols]
q = self._bakery(
lambda session: session.query(
query.Bundle("pk", *pk_cols), effective_entity
),
self,
)
if not query_info.load_with_join:
# the Bundle we have in the "omit_join" case is against raw, non
# annotated columns, so to ensure the Query knows its primary
# entity, we add it explicitly. If we made the Bundle against
# annotated columns, we hit a performance issue in this specific
# case, which is detailed in issue #4347.
q.add_criteria(lambda q: q.select_from(effective_entity))
else:
# in the non-omit_join case, the Bundle is against the annotated/
# mapped column of the parent entity, but the #4347 issue does not
# occur in this case.
pa = self._parent_alias
q.add_criteria(
lambda q: q.select_from(pa).join(
getattr(pa, self.parent_property.key).of_type(
effective_entity
)
)
)
if query_info.load_only_child:
q.add_criteria(
lambda q: q.filter(
in_expr.in_(sql.bindparam("primary_keys", expanding=True))
)
)
else:
q.add_criteria(
lambda q: q.filter(
in_expr.in_(sql.bindparam("primary_keys", expanding=True))
)
)
orig_query = context.query
q._add_lazyload_options(
orig_query._with_options, path[self.parent_property]
)
if orig_query._populate_existing:
q.add_criteria(lambda q: q.populate_existing())
if self.parent_property.order_by:
if not query_info.load_with_join:
eager_order_by = self.parent_property.order_by
if insp.is_aliased_class:
eager_order_by = [
insp._adapt_element(elem) for elem in eager_order_by
]
q.add_criteria(lambda q: q.order_by(*eager_order_by))
else:
def _setup_outermost_orderby(q):
# imitate the same method that subquery eager loading uses,
# looking for the adapted "secondary" table
eagerjoin = q._from_obj[0]
return q.order_by(
*eagerjoin._target_adapter.copy_and_process(
util.to_list(self.parent_property.order_by)
)
)
q.add_criteria(_setup_outermost_orderby)
if query_info.load_only_child:
self._load_via_child(
our_states, none_states, query_info, q, context
)
else:
self._load_via_parent(our_states, query_info, q, context)
def _load_via_child(self, our_states, none_states, query_info, q, context):
uselist = self.uselist
# this sort is really for the benefit of the unit tests
our_keys = sorted(our_states)
while our_keys:
chunk = our_keys[0 : self._chunksize]
our_keys = our_keys[self._chunksize :]
data = {
k: v
for k, v in q(context.session).params(
primary_keys=[
key[0] if query_info.zero_idx else key for key in chunk
]
)
}
for key in chunk:
# for a real foreign key and no concurrent changes to the
# DB while running this method, "key" is always present in
# data. However, for primaryjoins without real foreign keys
# a non-None primaryjoin condition may still refer to no
# related object.
related_obj = data.get(key, None)
for state, dict_, overwrite in our_states[key]:
if not overwrite and self.key in dict_:
continue
state.get_impl(self.key).set_committed_value(
state,
dict_,
related_obj if not uselist else [related_obj],
)
# populate none states with empty value / collection
for state, dict_, overwrite in none_states:
if not overwrite and self.key in dict_:
continue
# note it's OK if this is a uselist=True attribute, the empty
# collection will be populated
state.get_impl(self.key).set_committed_value(state, dict_, None)
def _load_via_parent(self, our_states, query_info, q, context):
uselist = self.uselist
_empty_result = () if uselist else None
while our_states:
chunk = our_states[0 : self._chunksize]
our_states = our_states[self._chunksize :]
primary_keys = [
key[0] if query_info.zero_idx else key
for key, state, state_dict, overwrite in chunk
]
data = collections.defaultdict(list)
for k, v in itertools.groupby(
q(context.session).params(primary_keys=primary_keys),
lambda x: x[0],
):
data[k].extend(vv[1] for vv in v)
for key, state, state_dict, overwrite in chunk:
if not overwrite and self.key in state_dict:
continue
collection = data.get(key, _empty_result)
if not uselist and collection:
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded "
"attribute '%s' " % self
)
state.get_impl(self.key).set_committed_value(
state, state_dict, collection[0]
)
else:
# note that empty tuple set on uselist=False sets the
# value to None
state.get_impl(self.key).set_committed_value(
state, state_dict, collection
)
def single_parent_validator(desc, prop):
def _do_check(state, value, oldvalue, initiator):
if value is not None and initiator.key == prop.key:
hasparent = initiator.hasparent(attributes.instance_state(value))
if hasparent and oldvalue is not value:
raise sa_exc.InvalidRequestError(
"Instance %s is already associated with an instance "
"of %s via its %s attribute, and is only allowed a "
"single parent."
% (orm_util.instance_str(value), state.class_, prop),
code="bbf1",
)
return value
def append(state, value, initiator):
return _do_check(state, value, None, initiator)
def set_(state, value, oldvalue, initiator):
return _do_check(state, value, oldvalue, initiator)
event.listen(
desc, "append", append, raw=True, retval=True, active_history=True
)
event.listen(desc, "set", set_, raw=True, retval=True, active_history=True)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/strategy_options.py
|
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
"""
from . import util as orm_util
from .attributes import QueryableAttribute
from .base import _class_to_mapper
from .base import _is_aliased_class
from .base import _is_mapped_class
from .base import InspectionAttr
from .interfaces import MapperOption
from .interfaces import PropComparator
from .path_registry import _DEFAULT_TOKEN
from .path_registry import _WILDCARD_TOKEN
from .path_registry import PathRegistry
from .path_registry import TokenRegistry
from .util import _orm_full_deannotate
from .. import exc as sa_exc
from .. import inspect
from .. import util
from ..sql import expression as sql_expr
from ..sql.base import _generative
from ..sql.base import Generative
class Load(Generative, MapperOption):
"""Represents loader options which modify the state of a
:class:`_query.Query` in order to affect how various mapped attributes are
loaded.
The :class:`_orm.Load` object is in most cases used implicitly behind the
scenes when one makes use of a query option like :func:`_orm.joinedload`,
:func:`.defer`, or similar. However, the :class:`_orm.Load` object
can also be used directly, and in some cases can be useful.
To use :class:`_orm.Load` directly, instantiate it with the target mapped
class as the argument. This style of usage is
useful when dealing with a :class:`_query.Query`
that has multiple entities::
myopt = Load(MyClass).joinedload("widgets")
The above ``myopt`` can now be used with :meth:`_query.Query.options`,
where it
will only take effect for the ``MyClass`` entity::
session.query(MyClass, MyOtherClass).options(myopt)
One case where :class:`_orm.Load`
is useful as public API is when specifying
"wildcard" options that only take effect for a certain class::
session.query(Order).options(Load(Order).lazyload('*'))
Above, all relationships on ``Order`` will be lazy-loaded, but other
attributes on those descendant objects will load using their normal
loader strategy.
.. seealso::
:ref:`deferred_options`
:ref:`deferred_loading_w_multiple`
:ref:`relationship_loader_options`
"""
def __init__(self, entity):
insp = inspect(entity)
self.path = insp._path_registry
# note that this .context is shared among all descendant
# Load objects
self.context = util.OrderedDict()
self.local_opts = {}
self.is_class_strategy = False
@classmethod
def for_existing_path(cls, path):
load = cls.__new__(cls)
load.path = path
load.context = {}
load.local_opts = {}
load._of_type = None
return load
def _generate_cache_key(self, path):
if path.path[0].is_aliased_class:
return False
serialized = []
for (key, loader_path), obj in self.context.items():
if key != "loader":
continue
for local_elem, obj_elem in zip(self.path.path, loader_path):
if local_elem is not obj_elem:
break
else:
endpoint = obj._of_type or obj.path.path[-1]
chopped = self._chop_path(loader_path, path)
if (
# means loader_path and path are unrelated,
# this does not need to be part of a cache key
chopped
is None
) or (
# means no additional path with loader_path + path
# and the endpoint isn't using of_type so isn't modified
# into an alias or other unsafe entity
not chopped
and not obj._of_type
):
continue
serialized_path = []
for token in chopped:
if isinstance(token, util.string_types):
serialized_path.append(token)
elif token.is_aliased_class:
return False
elif token.is_property:
serialized_path.append(token.key)
else:
assert token.is_mapper
serialized_path.append(token.class_)
if not serialized_path or endpoint != serialized_path[-1]:
if endpoint.is_mapper:
serialized_path.append(endpoint.class_)
elif endpoint.is_aliased_class:
return False
serialized.append(
(
tuple(serialized_path)
+ (obj.strategy or ())
+ (
tuple(
[
(key, obj.local_opts[key])
for key in sorted(obj.local_opts)
]
)
if obj.local_opts
else ()
)
)
)
if not serialized:
return None
else:
return tuple(serialized)
def _generate(self):
cloned = super(Load, self)._generate()
cloned.local_opts = {}
return cloned
is_opts_only = False
is_class_strategy = False
strategy = None
propagate_to_loaders = False
_of_type = None
def process_query(self, query):
self._process(query, True)
def process_query_conditionally(self, query):
self._process(query, False)
def _process(self, query, raiseerr):
current_path = query._current_path
if current_path:
for (token, start_path), loader in self.context.items():
chopped_start_path = self._chop_path(start_path, current_path)
if chopped_start_path is not None:
query._attributes[(token, chopped_start_path)] = loader
else:
query._attributes.update(self.context)
def _generate_path(
self, path, attr, for_strategy, wildcard_key, raiseerr=True
):
existing_of_type = self._of_type
self._of_type = None
if raiseerr and not path.has_entity:
if isinstance(path, TokenRegistry):
raise sa_exc.ArgumentError(
"Wildcard token cannot be followed by another entity"
)
else:
raise sa_exc.ArgumentError(
"Mapped attribute '%s' does not "
"refer to a mapped entity" % (path.prop,)
)
if isinstance(attr, util.string_types):
default_token = attr.endswith(_DEFAULT_TOKEN)
if attr.endswith(_WILDCARD_TOKEN) or default_token:
if default_token:
self.propagate_to_loaders = False
if wildcard_key:
attr = "%s:%s" % (wildcard_key, attr)
# TODO: AliasedInsp inside the path for of_type is not
# working for a with_polymorphic entity because the
# relationship loaders don't render the with_poly into the
# path. See #4469 which will try to improve this
if existing_of_type and not existing_of_type.is_aliased_class:
path = path.parent[existing_of_type]
path = path.token(attr)
self.path = path
return path
if existing_of_type:
ent = inspect(existing_of_type)
else:
ent = path.entity
try:
# use getattr on the class to work around
# synonyms, hybrids, etc.
attr = getattr(ent.class_, attr)
except AttributeError as err:
if raiseerr:
util.raise_(
sa_exc.ArgumentError(
'Can\'t find property named "%s" on '
"%s in this Query." % (attr, ent)
),
replace_context=err,
)
else:
return None
else:
attr = found_property = attr.property
path = path[attr]
elif _is_mapped_class(attr):
# TODO: this does not appear to be a valid codepath. "attr"
# would never be a mapper. This block is present in 1.2
# as well however does not seem to be accessed in any tests.
if not orm_util._entity_corresponds_to_use_path_impl(
attr.parent, path[-1]
):
if raiseerr:
raise sa_exc.ArgumentError(
"Attribute '%s' does not "
"link from element '%s'" % (attr, path.entity)
)
else:
return None
else:
prop = found_property = attr.property
if not orm_util._entity_corresponds_to_use_path_impl(
attr.parent, path[-1]
):
if raiseerr:
raise sa_exc.ArgumentError(
'Attribute "%s" does not '
'link from element "%s".%s'
% (
attr,
path.entity,
(
" Did you mean to use "
"%s.of_type(%s)?"
% (path[-2], attr.class_.__name__)
if len(path) > 1
and path.entity.is_mapper
and attr.parent.is_aliased_class
else ""
),
)
)
else:
return None
if getattr(attr, "_of_type", None):
ac = attr._of_type
ext_info = of_type_info = inspect(ac)
existing = path.entity_path[prop].get(
self.context, "path_with_polymorphic"
)
if not ext_info.is_aliased_class:
ac = orm_util.with_polymorphic(
ext_info.mapper.base_mapper,
ext_info.mapper,
aliased=True,
_use_mapper_path=True,
_existing_alias=inspect(existing)
if existing is not None
else None,
)
ext_info = inspect(ac)
path.entity_path[prop].set(
self.context, "path_with_polymorphic", ac
)
path = path[prop][ext_info]
self._of_type = of_type_info
else:
path = path[prop]
if for_strategy is not None:
found_property._get_strategy(for_strategy)
if path.has_entity:
path = path.entity_path
self.path = path
return path
def __str__(self):
return "Load(strategy=%r)" % (self.strategy,)
def _coerce_strat(self, strategy):
if strategy is not None:
strategy = tuple(sorted(strategy.items()))
return strategy
def _apply_to_parent(self, parent, applied, bound):
raise NotImplementedError(
"Only 'unbound' loader options may be used with the "
"Load.options() method"
)
@_generative
def options(self, *opts):
r"""Apply a series of options as sub-options to this
:class:`_orm.Load`
object.
E.g.::
query = session.query(Author)
query = query.options(
joinedload(Author.book).options(
load_only("summary", "excerpt"),
joinedload(Book.citations).options(
joinedload(Citation.author)
)
)
)
:param \*opts: A series of loader option objects (ultimately
:class:`_orm.Load` objects) which should be applied to the path
specified by this :class:`_orm.Load` object.
.. versionadded:: 1.3.6
.. seealso::
:func:`.defaultload`
:ref:`relationship_loader_options`
:ref:`deferred_loading_w_multiple`
"""
apply_cache = {}
bound = not isinstance(self, _UnboundLoad)
if bound:
raise NotImplementedError(
"The options() method is currently only supported "
"for 'unbound' loader options"
)
for opt in opts:
opt._apply_to_parent(self, apply_cache, bound)
@_generative
def set_relationship_strategy(
self, attr, strategy, propagate_to_loaders=True
):
strategy = self._coerce_strat(strategy)
self.propagate_to_loaders = propagate_to_loaders
cloned = self._clone_for_bind_strategy(attr, strategy, "relationship")
self.path = cloned.path
self._of_type = cloned._of_type
cloned.is_class_strategy = self.is_class_strategy = False
self.propagate_to_loaders = cloned.propagate_to_loaders
@_generative
def set_column_strategy(self, attrs, strategy, opts=None, opts_only=False):
strategy = self._coerce_strat(strategy)
self.is_class_strategy = False
for attr in attrs:
cloned = self._clone_for_bind_strategy(
attr, strategy, "column", opts_only=opts_only, opts=opts
)
cloned.propagate_to_loaders = True
@_generative
def set_generic_strategy(self, attrs, strategy):
strategy = self._coerce_strat(strategy)
for attr in attrs:
cloned = self._clone_for_bind_strategy(attr, strategy, None)
cloned.propagate_to_loaders = True
@_generative
def set_class_strategy(self, strategy, opts):
strategy = self._coerce_strat(strategy)
cloned = self._clone_for_bind_strategy(None, strategy, None)
cloned.is_class_strategy = True
cloned.propagate_to_loaders = True
cloned.local_opts.update(opts)
def _clone_for_bind_strategy(
self, attr, strategy, wildcard_key, opts_only=False, opts=None
):
"""Create an anonymous clone of the Load/_UnboundLoad that is suitable
to be placed in the context / _to_bind collection of this Load
object. The clone will then lose references to context/_to_bind
in order to not create reference cycles.
"""
cloned = self._generate()
cloned._generate_path(self.path, attr, strategy, wildcard_key)
cloned.strategy = strategy
cloned.local_opts = self.local_opts
if opts:
cloned.local_opts.update(opts)
if opts_only:
cloned.is_opts_only = True
if strategy or cloned.is_opts_only:
cloned._set_path_strategy()
return cloned
def _set_for_path(self, context, path, replace=True, merge_opts=False):
if merge_opts or not replace:
existing = path.get(self.context, "loader")
if existing:
if merge_opts:
existing.local_opts.update(self.local_opts)
else:
path.set(context, "loader", self)
else:
existing = path.get(self.context, "loader")
path.set(context, "loader", self)
if existing and existing.is_opts_only:
self.local_opts.update(existing.local_opts)
def _set_path_strategy(self):
if not self.is_class_strategy and self.path.has_entity:
effective_path = self.path.parent
else:
effective_path = self.path
if effective_path.is_token:
for path in effective_path.generate_for_superclasses():
self._set_for_path(
self.context,
path,
replace=True,
merge_opts=self.is_opts_only,
)
else:
self._set_for_path(
self.context,
effective_path,
replace=True,
merge_opts=self.is_opts_only,
)
# remove cycles; _set_path_strategy is always invoked on an
# anonymous clone of the Load / UnboundLoad object since #5056
self.context = None
def __getstate__(self):
d = self.__dict__.copy()
if d["context"] is not None:
d["context"] = PathRegistry.serialize_context_dict(
d["context"], ("loader",)
)
d["path"] = self.path.serialize()
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.path = PathRegistry.deserialize(self.path)
if self.context is not None:
self.context = PathRegistry.deserialize_context_dict(self.context)
def _chop_path(self, to_chop, path):
i = -1
for i, (c_token, p_token) in enumerate(zip(to_chop, path.path)):
if isinstance(c_token, util.string_types):
# TODO: this is approximated from the _UnboundLoad
# version and probably has issues, not fully covered.
if i == 0 and c_token.endswith(":" + _DEFAULT_TOKEN):
return to_chop
elif (
c_token != "relationship:%s" % (_WILDCARD_TOKEN,)
and c_token != p_token.key
):
return None
if c_token is p_token:
continue
elif (
isinstance(c_token, InspectionAttr)
and c_token.is_mapper
and p_token.is_mapper
and c_token.isa(p_token)
):
continue
else:
return None
return to_chop[i + 1 :]
class _UnboundLoad(Load):
"""Represent a loader option that isn't tied to a root entity.
The loader option will produce an entity-linked :class:`_orm.Load`
object when it is passed :meth:`_query.Query.options`.
This provides compatibility with the traditional system
of freestanding options, e.g. ``joinedload('x.y.z')``.
"""
def __init__(self):
self.path = ()
self._to_bind = []
self.local_opts = {}
_is_chain_link = False
def _generate_cache_key(self, path):
serialized = ()
for val in self._to_bind:
for local_elem, val_elem in zip(self.path, val.path):
if local_elem is not val_elem:
break
else:
opt = val._bind_loader([path.path[0]], None, None, False)
if opt:
c_key = opt._generate_cache_key(path)
if c_key is False:
return False
elif c_key:
serialized += c_key
if not serialized:
return None
else:
return serialized
def _set_path_strategy(self):
self._to_bind.append(self)
# remove cycles; _set_path_strategy is always invoked on an
# anonymous clone of the Load / UnboundLoad object since #5056
self._to_bind = None
def _apply_to_parent(self, parent, applied, bound, to_bind=None):
if self in applied:
return applied[self]
if to_bind is None:
to_bind = self._to_bind
cloned = self._generate()
applied[self] = cloned
cloned.strategy = self.strategy
if self.path:
attr = self.path[-1]
if isinstance(attr, util.string_types) and attr.endswith(
_DEFAULT_TOKEN
):
attr = attr.split(":")[0] + ":" + _WILDCARD_TOKEN
cloned._generate_path(
parent.path + self.path[0:-1], attr, self.strategy, None
)
# these assertions can go away once the "sub options" API is
# mature
assert cloned.propagate_to_loaders == self.propagate_to_loaders
assert cloned.is_class_strategy == self.is_class_strategy
assert cloned.is_opts_only == self.is_opts_only
new_to_bind = {
elem._apply_to_parent(parent, applied, bound, to_bind)
for elem in to_bind
}
cloned._to_bind = parent._to_bind
cloned._to_bind.extend(new_to_bind)
cloned.local_opts.update(self.local_opts)
return cloned
def _generate_path(self, path, attr, for_strategy, wildcard_key):
if (
wildcard_key
and isinstance(attr, util.string_types)
and attr in (_WILDCARD_TOKEN, _DEFAULT_TOKEN)
):
if attr == _DEFAULT_TOKEN:
self.propagate_to_loaders = False
attr = "%s:%s" % (wildcard_key, attr)
if path and _is_mapped_class(path[-1]) and not self.is_class_strategy:
path = path[0:-1]
if attr:
path = path + (attr,)
self.path = path
return path
def __getstate__(self):
d = self.__dict__.copy()
d["path"] = self._serialize_path(self.path, filter_aliased_class=True)
return d
def __setstate__(self, state):
ret = []
for key in state["path"]:
if isinstance(key, tuple):
if len(key) == 2:
# support legacy
cls, propkey = key
of_type = None
else:
cls, propkey, of_type = key
prop = getattr(cls, propkey)
if of_type:
prop = prop.of_type(of_type)
ret.append(prop)
else:
ret.append(key)
state["path"] = tuple(ret)
self.__dict__ = state
def _process(self, query, raiseerr):
dedupes = query._attributes["_unbound_load_dedupes"]
for val in self._to_bind:
if val not in dedupes:
dedupes.add(val)
val._bind_loader(
[ent.entity_zero for ent in query._mapper_entities],
query._current_path,
query._attributes,
raiseerr,
)
@classmethod
def _from_keys(cls, meth, keys, chained, kw):
opt = _UnboundLoad()
def _split_key(key):
if isinstance(key, util.string_types):
# coerce fooload('*') into "default loader strategy"
if key == _WILDCARD_TOKEN:
return (_DEFAULT_TOKEN,)
# coerce fooload(".*") into "wildcard on default entity"
elif key.startswith("." + _WILDCARD_TOKEN):
key = key[1:]
return key.split(".")
else:
return (key,)
all_tokens = [token for key in keys for token in _split_key(key)]
for token in all_tokens[0:-1]:
# set _is_chain_link first so that clones of the
# object also inherit this flag
opt._is_chain_link = True
if chained:
opt = meth(opt, token, **kw)
else:
opt = opt.defaultload(token)
opt = meth(opt, all_tokens[-1], **kw)
opt._is_chain_link = False
return opt
def _chop_path(self, to_chop, path):
i = -1
for i, (c_token, (p_entity, p_prop)) in enumerate(
zip(to_chop, path.pairs())
):
if isinstance(c_token, util.string_types):
if i == 0 and c_token.endswith(":" + _DEFAULT_TOKEN):
return to_chop
elif (
c_token != "relationship:%s" % (_WILDCARD_TOKEN,)
and c_token != p_prop.key
):
return None
elif isinstance(c_token, PropComparator):
if c_token.property is not p_prop or (
c_token._parententity is not p_entity
and (
not c_token._parententity.is_mapper
or not c_token._parententity.isa(p_entity)
)
):
return None
else:
i += 1
return to_chop[i:]
def _serialize_path(self, path, filter_aliased_class=False):
ret = []
for token in path:
if isinstance(token, QueryableAttribute):
if (
filter_aliased_class
and token._of_type
and inspect(token._of_type).is_aliased_class
):
ret.append((token._parentmapper.class_, token.key, None))
else:
ret.append(
(token._parentmapper.class_, token.key, token._of_type)
)
elif isinstance(token, PropComparator):
ret.append((token._parentmapper.class_, token.key, None))
else:
ret.append(token)
return ret
def _bind_loader(self, entities, current_path, context, raiseerr):
"""Convert from an _UnboundLoad() object into a Load() object.
The _UnboundLoad() uses an informal "path" and does not necessarily
refer to a lead entity as it may use string tokens. The Load()
OTOH refers to a complete path. This method reconciles from a
given Query into a Load.
Example::
query = session.query(User).options(
joinedload("orders").joinedload("items"))
The above options will be an _UnboundLoad object along the lines
of (note this is not the exact API of _UnboundLoad)::
_UnboundLoad(
_to_bind=[
_UnboundLoad(["orders"], {"lazy": "joined"}),
_UnboundLoad(["orders", "items"], {"lazy": "joined"}),
]
)
After this method, we get something more like this (again this is
not exact API)::
Load(
User,
(User, User.orders.property))
Load(
User,
(User, User.orders.property, Order, Order.items.property))
"""
start_path = self.path
if self.is_class_strategy and current_path:
start_path += (entities[0],)
# _current_path implies we're in a
# secondary load with an existing path
if current_path:
start_path = self._chop_path(start_path, current_path)
if not start_path:
return None
# look at the first token and try to locate within the Query
# what entity we are referring towards.
token = start_path[0]
if isinstance(token, util.string_types):
entity = self._find_entity_basestring(entities, token, raiseerr)
elif isinstance(token, PropComparator):
prop = token.property
entity = self._find_entity_prop_comparator(
entities, prop, token._parententity, raiseerr
)
elif self.is_class_strategy and _is_mapped_class(token):
entity = inspect(token)
if entity not in entities:
entity = None
else:
raise sa_exc.ArgumentError(
"mapper option expects " "string key or list of attributes"
)
if not entity:
return
path_element = entity
# transfer our entity-less state into a Load() object
# with a real entity path. Start with the lead entity
# we just located, then go through the rest of our path
# tokens and populate into the Load().
loader = Load(path_element)
if context is not None:
loader.context = context
else:
context = loader.context
loader.strategy = self.strategy
loader.is_opts_only = self.is_opts_only
loader.is_class_strategy = self.is_class_strategy
path = loader.path
if not loader.is_class_strategy:
for idx, token in enumerate(start_path):
if not loader._generate_path(
loader.path,
token,
self.strategy if idx == len(start_path) - 1 else None,
None,
raiseerr,
):
return
loader.local_opts.update(self.local_opts)
if not loader.is_class_strategy and loader.path.has_entity:
effective_path = loader.path.parent
else:
effective_path = loader.path
# prioritize "first class" options over those
# that were "links in the chain", e.g. "x" and "y" in
# someload("x.y.z") versus someload("x") / someload("x.y")
if effective_path.is_token:
for path in effective_path.generate_for_superclasses():
loader._set_for_path(
context,
path,
replace=not self._is_chain_link,
merge_opts=self.is_opts_only,
)
else:
loader._set_for_path(
context,
effective_path,
replace=not self._is_chain_link,
merge_opts=self.is_opts_only,
)
return loader
def _find_entity_prop_comparator(self, entities, prop, mapper, raiseerr):
if _is_aliased_class(mapper):
searchfor = mapper
else:
searchfor = _class_to_mapper(mapper)
for ent in entities:
if orm_util._entity_corresponds_to(ent, searchfor):
return ent
else:
if raiseerr:
if not list(entities):
raise sa_exc.ArgumentError(
"Query has only expression-based entities, "
'which do not apply to %s "%s"'
% (util.clsname_as_plain_name(type(prop)), prop)
)
else:
raise sa_exc.ArgumentError(
'Mapped attribute "%s" does not apply to any of the '
"root entities in this query, e.g. %s. Please "
"specify the full path "
"from one of the root entities to the target "
"attribute. "
% (prop, ", ".join(str(x) for x in entities))
)
else:
return None
def _find_entity_basestring(self, entities, token, raiseerr):
if token.endswith(":" + _WILDCARD_TOKEN):
if len(list(entities)) != 1:
if raiseerr:
raise sa_exc.ArgumentError(
"Can't apply wildcard ('*') or load_only() "
"loader option to multiple entities %s. Specify "
"loader options for each entity individually, such "
"as %s."
% (
", ".join(str(ent) for ent in entities),
", ".join(
"Load(%s).some_option('*')" % ent
for ent in entities
),
)
)
elif token.endswith(_DEFAULT_TOKEN):
raiseerr = False
for ent in entities:
# return only the first _MapperEntity when searching
# based on string prop name. Ideally object
# attributes are used to specify more exactly.
return ent
else:
if raiseerr:
raise sa_exc.ArgumentError(
"Query has only expression-based entities - "
'can\'t find property named "%s".' % (token,)
)
else:
return None
class loader_option(object):
def __init__(self):
pass
def __call__(self, fn):
self.name = name = fn.__name__
self.fn = fn
if hasattr(Load, name):
raise TypeError("Load class already has a %s method." % (name))
setattr(Load, name, fn)
return self
def _add_unbound_fn(self, fn):
self._unbound_fn = fn
fn_doc = self.fn.__doc__
self.fn.__doc__ = """Produce a new :class:`_orm.Load` object with the
:func:`_orm.%(name)s` option applied.
See :func:`_orm.%(name)s` for usage examples.
""" % {
"name": self.name
}
fn.__doc__ = fn_doc
return self
def _add_unbound_all_fn(self, fn):
fn.__doc__ = """Produce a standalone "all" option for
:func:`_orm.%(name)s`.
.. deprecated:: 0.9
The :func:`_orm.%(name)s_all` function is deprecated, and will be removed
in a future release. Please use method chaining with
:func:`_orm.%(name)s` instead, as in::
session.query(MyClass).options(
%(name)s("someattribute").%(name)s("anotherattribute")
)
""" % {
"name": self.name
}
fn = util.deprecated(
"0.9",
"The :func:`.%(name)s_all` function is deprecated, and will be "
"removed in a future release. Please use method chaining with "
":func:`.%(name)s` instead" % {"name": self.name},
add_deprecation_to_docstring=False,
)(fn)
self._unbound_all_fn = fn
return self
@loader_option()
def contains_eager(loadopt, attr, alias=None):
r"""Indicate that the given attribute should be eagerly loaded from
columns stated manually in the query.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
The option is used in conjunction with an explicit join that loads
the desired rows, i.e.::
sess.query(Order).\
join(Order.user).\
options(contains_eager(Order.user))
The above query would join from the ``Order`` entity to its related
``User`` entity, and the returned ``Order`` objects would have the
``Order.user`` attribute pre-populated.
When making use of aliases with :func:`.contains_eager`, the path
should be specified using :meth:`.PropComparator.of_type`::
user_alias = aliased(User)
sess.query(Order).\
join((user_alias, Order.user)).\
options(contains_eager(Order.user.of_type(user_alias)))
:meth:`.PropComparator.of_type` is also used to indicate a join
against specific subclasses of an inherting mapper, or
of a :func:`.with_polymorphic` construct::
# employees of a particular subtype
sess.query(Company).\
outerjoin(Company.employees.of_type(Manager)).\
options(
contains_eager(
Company.employees.of_type(Manager),
)
)
# employees of a multiple subtypes
wp = with_polymorphic(Employee, [Manager, Engineer])
sess.query(Company).\
outerjoin(Company.employees.of_type(wp)).\
options(
contains_eager(
Company.employees.of_type(wp),
)
)
The :paramref:`.contains_eager.alias` parameter is used for a similar
purpose, however the :meth:`.PropComparator.of_type` approach should work
in all cases and is more effective and explicit.
.. seealso::
:ref:`loading_toplevel`
:ref:`contains_eager`
"""
if alias is not None:
if not isinstance(alias, str):
info = inspect(alias)
alias = info.selectable
elif getattr(attr, "_of_type", None):
ot = inspect(attr._of_type)
alias = ot.selectable
cloned = loadopt.set_relationship_strategy(
attr, {"lazy": "joined"}, propagate_to_loaders=False
)
cloned.local_opts["eager_from_alias"] = alias
return cloned
@contains_eager._add_unbound_fn
def contains_eager(*keys, **kw):
return _UnboundLoad()._from_keys(
_UnboundLoad.contains_eager, keys, True, kw
)
@loader_option()
def load_only(loadopt, *attrs):
"""Indicate that for a particular entity, only the given list
of column-based attribute names should be loaded; all others will be
deferred.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
Example - given a class ``User``, load only the ``name`` and ``fullname``
attributes::
session.query(User).options(load_only("name", "fullname"))
Example - given a relationship ``User.addresses -> Address``, specify
subquery loading for the ``User.addresses`` collection, but on each
``Address`` object load only the ``email_address`` attribute::
session.query(User).options(
subqueryload("addresses").load_only("email_address")
)
For a :class:`_query.Query` that has multiple entities,
the lead entity can be
specifically referred to using the :class:`_orm.Load` constructor::
session.query(User, Address).join(User.addresses).options(
Load(User).load_only("name", "fullname"),
Load(Address).load_only("email_address")
)
.. versionadded:: 0.9.0
"""
cloned = loadopt.set_column_strategy(
attrs, {"deferred": False, "instrument": True}
)
cloned.set_column_strategy(
"*", {"deferred": True, "instrument": True}, {"undefer_pks": True}
)
return cloned
@load_only._add_unbound_fn
def load_only(*attrs):
return _UnboundLoad().load_only(*attrs)
@loader_option()
def joinedload(loadopt, attr, innerjoin=None):
"""Indicate that the given attribute should be loaded using joined
eager loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
examples::
# joined-load the "orders" collection on "User"
query(User).options(joinedload(User.orders))
# joined-load Order.items and then Item.keywords
query(Order).options(
joinedload(Order.items).joinedload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# joined-load the keywords collection
query(Order).options(
lazyload(Order.items).joinedload(Item.keywords))
:param innerjoin: if ``True``, indicates that the joined eager load should
use an inner join instead of the default of left outer join::
query(Order).options(joinedload(Order.user, innerjoin=True))
In order to chain multiple eager joins together where some may be
OUTER and others INNER, right-nested joins are used to link them::
query(A).options(
joinedload(A.bs, innerjoin=False).
joinedload(B.cs, innerjoin=True)
)
The above query, linking A.bs via "outer" join and B.cs via "inner" join
would render the joins as "a LEFT OUTER JOIN (b JOIN c)". When using
older versions of SQLite (< 3.7.16), this form of JOIN is translated to
use full subqueries as this syntax is otherwise not directly supported.
The ``innerjoin`` flag can also be stated with the term ``"unnested"``.
This indicates that an INNER JOIN should be used, *unless* the join
is linked to a LEFT OUTER JOIN to the left, in which case it
will render as LEFT OUTER JOIN. For example, supposing ``A.bs``
is an outerjoin::
query(A).options(
joinedload(A.bs).
joinedload(B.cs, innerjoin="unnested")
)
The above join will render as "a LEFT OUTER JOIN b LEFT OUTER JOIN c",
rather than as "a LEFT OUTER JOIN (b JOIN c)".
.. note:: The "unnested" flag does **not** affect the JOIN rendered
from a many-to-many association table, e.g. a table configured
as :paramref:`_orm.relationship.secondary`, to the target table; for
correctness of results, these joins are always INNER and are
therefore right-nested if linked to an OUTER join.
.. versionchanged:: 1.0.0 ``innerjoin=True`` now implies
``innerjoin="nested"``, whereas in 0.9 it implied
``innerjoin="unnested"``. In order to achieve the pre-1.0 "unnested"
inner join behavior, use the value ``innerjoin="unnested"``.
See :ref:`migration_3008`.
.. note::
The joins produced by :func:`_orm.joinedload` are **anonymously
aliased**. The criteria by which the join proceeds cannot be
modified, nor can the :class:`_query.Query`
refer to these joins in any way,
including ordering. See :ref:`zen_of_eager_loading` for further
detail.
To produce a specific SQL JOIN which is explicitly available, use
:meth:`_query.Query.join`.
To combine explicit JOINs with eager loading
of collections, use :func:`_orm.contains_eager`; see
:ref:`contains_eager`.
.. seealso::
:ref:`loading_toplevel`
:ref:`joined_eager_loading`
"""
loader = loadopt.set_relationship_strategy(attr, {"lazy": "joined"})
if innerjoin is not None:
loader.local_opts["innerjoin"] = innerjoin
return loader
@joinedload._add_unbound_fn
def joinedload(*keys, **kw):
return _UnboundLoad._from_keys(_UnboundLoad.joinedload, keys, False, kw)
@joinedload._add_unbound_all_fn
def joinedload_all(*keys, **kw):
return _UnboundLoad._from_keys(_UnboundLoad.joinedload, keys, True, kw)
@loader_option()
def subqueryload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
subquery eager loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
examples::
# subquery-load the "orders" collection on "User"
query(User).options(subqueryload(User.orders))
# subquery-load Order.items and then Item.keywords
query(Order).options(
subqueryload(Order.items).subqueryload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# subquery-load the keywords collection
query(Order).options(
lazyload(Order.items).subqueryload(Item.keywords))
.. seealso::
:ref:`loading_toplevel`
:ref:`subquery_eager_loading`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "subquery"})
@subqueryload._add_unbound_fn
def subqueryload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.subqueryload, keys, False, {})
@subqueryload._add_unbound_all_fn
def subqueryload_all(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.subqueryload, keys, True, {})
@loader_option()
def selectinload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
SELECT IN eager loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
examples::
# selectin-load the "orders" collection on "User"
query(User).options(selectinload(User.orders))
# selectin-load Order.items and then Item.keywords
query(Order).options(
selectinload(Order.items).selectinload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# selectin-load the keywords collection
query(Order).options(
lazyload(Order.items).selectinload(Item.keywords))
.. versionadded:: 1.2
.. seealso::
:ref:`loading_toplevel`
:ref:`selectin_eager_loading`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "selectin"})
@selectinload._add_unbound_fn
def selectinload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.selectinload, keys, False, {})
@selectinload._add_unbound_all_fn
def selectinload_all(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.selectinload, keys, True, {})
@loader_option()
def lazyload(loadopt, attr):
"""Indicate that the given attribute should be loaded using "lazy"
loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
.. seealso::
:ref:`loading_toplevel`
:ref:`lazy_loading`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "select"})
@lazyload._add_unbound_fn
def lazyload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.lazyload, keys, False, {})
@lazyload._add_unbound_all_fn
def lazyload_all(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.lazyload, keys, True, {})
@loader_option()
def immediateload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
an immediate load with a per-attribute SELECT statement.
The :func:`.immediateload` option is superseded in general
by the :func:`.selectinload` option, which performs the same task
more efficiently by emitting a SELECT for all loaded objects.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
.. seealso::
:ref:`loading_toplevel`
:ref:`selectin_eager_loading`
"""
loader = loadopt.set_relationship_strategy(attr, {"lazy": "immediate"})
return loader
@immediateload._add_unbound_fn
def immediateload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.immediateload, keys, False, {})
@loader_option()
def noload(loadopt, attr):
"""Indicate that the given relationship attribute should remain unloaded.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
:func:`_orm.noload` applies to :func:`_orm.relationship` attributes; for
column-based attributes, see :func:`_orm.defer`.
.. seealso::
:ref:`loading_toplevel`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "noload"})
@noload._add_unbound_fn
def noload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.noload, keys, False, {})
@loader_option()
def raiseload(loadopt, attr, sql_only=False):
"""Indicate that the given relationship attribute should disallow lazy loads.
A relationship attribute configured with :func:`_orm.raiseload` will
raise an :exc:`~sqlalchemy.exc.InvalidRequestError` upon access. The
typical way this is useful is when an application is attempting to ensure
that all relationship attributes that are accessed in a particular context
would have been already loaded via eager loading. Instead of having
to read through SQL logs to ensure lazy loads aren't occurring, this
strategy will cause them to raise immediately.
:param sql_only: if True, raise only if the lazy load would emit SQL,
but not if it is only checking the identity map, or determining that
the related value should just be None due to missing keys. When False,
the strategy will raise for all varieties of lazyload.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
:func:`_orm.raiseload` applies to :func:`_orm.relationship`
attributes only.
.. versionadded:: 1.1
.. seealso::
:ref:`loading_toplevel`
:ref:`prevent_lazy_with_raiseload`
"""
return loadopt.set_relationship_strategy(
attr, {"lazy": "raise_on_sql" if sql_only else "raise"}
)
@raiseload._add_unbound_fn
def raiseload(*keys, **kw):
return _UnboundLoad._from_keys(_UnboundLoad.raiseload, keys, False, kw)
@loader_option()
def defaultload(loadopt, attr):
"""Indicate an attribute should load using its default loader style.
This method is used to link to other loader options further into
a chain of attributes without altering the loader style of the links
along the chain. For example, to set joined eager loading for an
element of an element::
session.query(MyClass).options(
defaultload(MyClass.someattribute).
joinedload(MyOtherClass.someotherattribute)
)
:func:`.defaultload` is also useful for setting column-level options
on a related class, namely that of :func:`.defer` and :func:`.undefer`::
session.query(MyClass).options(
defaultload(MyClass.someattribute).
defer("some_column").
undefer("some_other_column")
)
.. seealso::
:meth:`_orm.Load.options` - allows for complex hierarchical
loader option structures with less verbosity than with individual
:func:`.defaultload` directives.
:ref:`relationship_loader_options`
:ref:`deferred_loading_w_multiple`
"""
return loadopt.set_relationship_strategy(attr, None)
@defaultload._add_unbound_fn
def defaultload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.defaultload, keys, False, {})
@loader_option()
def defer(loadopt, key):
r"""Indicate that the given column-oriented attribute should be deferred,
e.g. not loaded until accessed.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
e.g.::
from sqlalchemy.orm import defer
session.query(MyClass).options(
defer("attribute_one"),
defer("attribute_two"))
session.query(MyClass).options(
defer(MyClass.attribute_one),
defer(MyClass.attribute_two))
To specify a deferred load of an attribute on a related class,
the path can be specified one token at a time, specifying the loading
style for each link along the chain. To leave the loading style
for a link unchanged, use :func:`_orm.defaultload`::
session.query(MyClass).options(defaultload("someattr").defer("some_column"))
A :class:`_orm.Load` object that is present on a certain path can have
:meth:`_orm.Load.defer` called multiple times,
each will operate on the same
parent entity::
session.query(MyClass).options(
defaultload("someattr").
defer("some_column").
defer("some_other_column").
defer("another_column")
)
:param key: Attribute to be deferred.
:param \*addl_attrs: This option supports the old 0.8 style
of specifying a path as a series of attributes, which is now superseded
by the method-chained style.
.. deprecated:: 0.9 The \*addl_attrs on :func:`_orm.defer` is
deprecated and will be removed in a future release. Please
use method chaining in conjunction with defaultload() to
indicate a path.
.. seealso::
:ref:`deferred`
:func:`_orm.undefer`
"""
return loadopt.set_column_strategy(
(key,), {"deferred": True, "instrument": True}
)
@defer._add_unbound_fn
def defer(key, *addl_attrs):
if addl_attrs:
util.warn_deprecated(
"The *addl_attrs on orm.defer is deprecated. Please use "
"method chaining in conjunction with defaultload() to "
"indicate a path."
)
return _UnboundLoad._from_keys(
_UnboundLoad.defer, (key,) + addl_attrs, False, {}
)
@loader_option()
def undefer(loadopt, key):
r"""Indicate that the given column-oriented attribute should be undeferred,
e.g. specified within the SELECT statement of the entity as a whole.
The column being undeferred is typically set up on the mapping as a
:func:`.deferred` attribute.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
Examples::
# undefer two columns
session.query(MyClass).options(undefer("col1"), undefer("col2"))
# undefer all columns specific to a single class using Load + *
session.query(MyClass, MyOtherClass).options(
Load(MyClass).undefer("*"))
# undefer a column on a related object
session.query(MyClass).options(
defaultload(MyClass.items).undefer('text'))
:param key: Attribute to be undeferred.
:param \*addl_attrs: This option supports the old 0.8 style
of specifying a path as a series of attributes, which is now superseded
by the method-chained style.
.. deprecated:: 0.9 The \*addl_attrs on :func:`_orm.undefer` is
deprecated and will be removed in a future release. Please
use method chaining in conjunction with defaultload() to
indicate a path.
.. seealso::
:ref:`deferred`
:func:`_orm.defer`
:func:`_orm.undefer_group`
"""
return loadopt.set_column_strategy(
(key,), {"deferred": False, "instrument": True}
)
@undefer._add_unbound_fn
def undefer(key, *addl_attrs):
if addl_attrs:
util.warn_deprecated(
"The *addl_attrs on orm.undefer is deprecated. Please use "
"method chaining in conjunction with defaultload() to "
"indicate a path."
)
return _UnboundLoad._from_keys(
_UnboundLoad.undefer, (key,) + addl_attrs, False, {}
)
@loader_option()
def undefer_group(loadopt, name):
"""Indicate that columns within the given deferred group name should be
undeferred.
The columns being undeferred are set up on the mapping as
:func:`.deferred` attributes and include a "group" name.
E.g::
session.query(MyClass).options(undefer_group("large_attrs"))
To undefer a group of attributes on a related entity, the path can be
spelled out using relationship loader options, such as
:func:`_orm.defaultload`::
session.query(MyClass).options(
defaultload("someattr").undefer_group("large_attrs"))
.. versionchanged:: 0.9.0 :func:`_orm.undefer_group` is now specific to a
particular entity load path.
.. seealso::
:ref:`deferred`
:func:`_orm.defer`
:func:`_orm.undefer`
"""
return loadopt.set_column_strategy(
"*", None, {"undefer_group_%s" % name: True}, opts_only=True
)
@undefer_group._add_unbound_fn
def undefer_group(name):
return _UnboundLoad().undefer_group(name)
@loader_option()
def with_expression(loadopt, key, expression):
r"""Apply an ad-hoc SQL expression to a "deferred expression" attribute.
This option is used in conjunction with the :func:`_orm.query_expression`
mapper-level construct that indicates an attribute which should be the
target of an ad-hoc SQL expression.
E.g.::
sess.query(SomeClass).options(
with_expression(SomeClass.x_y_expr, SomeClass.x + SomeClass.y)
)
.. versionadded:: 1.2
:param key: Attribute to be undeferred.
:param expr: SQL expression to be applied to the attribute.
.. seealso::
:ref:`mapper_querytime_expression`
"""
expression = sql_expr._labeled(_orm_full_deannotate(expression))
return loadopt.set_column_strategy(
(key,), {"query_expression": True}, opts={"expression": expression}
)
@with_expression._add_unbound_fn
def with_expression(key, expression):
return _UnboundLoad._from_keys(
_UnboundLoad.with_expression, (key,), False, {"expression": expression}
)
@loader_option()
def selectin_polymorphic(loadopt, classes):
"""Indicate an eager load should take place for all attributes
specific to a subclass.
This uses an additional SELECT with IN against all matched primary
key values, and is the per-query analogue to the ``"selectin"``
setting on the :paramref:`.mapper.polymorphic_load` parameter.
.. versionadded:: 1.2
.. seealso::
:ref:`polymorphic_selectin`
"""
loadopt.set_class_strategy(
{"selectinload_polymorphic": True},
opts={
"entities": tuple(
sorted((inspect(cls) for cls in classes), key=id)
)
},
)
return loadopt
@selectin_polymorphic._add_unbound_fn
def selectin_polymorphic(base_cls, classes):
ul = _UnboundLoad()
ul.is_class_strategy = True
ul.path = (inspect(base_cls),)
ul.selectin_polymorphic(classes)
return ul
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/properties.py
|
# orm/properties.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""MapperProperty implementations.
This is a private module which defines the behavior of individual ORM-
mapped attributes.
"""
from __future__ import absolute_import
from . import attributes
from .interfaces import PropComparator
from .interfaces import StrategizedProperty
from .util import _orm_full_deannotate
from .. import log
from .. import util
from ..sql import expression
__all__ = ["ColumnProperty"]
@log.class_logger
class ColumnProperty(StrategizedProperty):
"""Describes an object attribute that corresponds to a table column.
Public constructor is the :func:`_orm.column_property` function.
"""
strategy_wildcard_key = "column"
__slots__ = (
"_orig_columns",
"columns",
"group",
"deferred",
"instrument",
"comparator_factory",
"descriptor",
"extension",
"active_history",
"expire_on_flush",
"info",
"doc",
"strategy_key",
"_creation_order",
"_is_polymorphic_discriminator",
"_mapped_by_synonym",
"_deferred_column_loader",
)
@util.deprecated_params(
extension=(
"0.7",
":class:`.AttributeExtension` is deprecated in favor of the "
":class:`.AttributeEvents` listener interface. The "
":paramref:`.column_property.extension` parameter will be "
"removed in a future release.",
)
)
def __init__(self, *columns, **kwargs):
r"""Provide a column-level property for use with a mapping.
Column-based properties can normally be applied to the mapper's
``properties`` dictionary using the :class:`_schema.Column`
element directly.
Use this function when the given column is not directly present within
the mapper's selectable; examples include SQL expressions, functions,
and scalar SELECT queries.
The :func:`_orm.column_property` function returns an instance of
:class:`.ColumnProperty`.
Columns that aren't present in the mapper's selectable won't be
persisted by the mapper and are effectively "read-only" attributes.
:param \*cols:
list of Column objects to be mapped.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
scalar attribute should be loaded when replaced, if not
already loaded. Normally, history tracking logic for
simple non-primary-key scalar values only needs to be
aware of the "new" value in order to perform a flush. This
flag is available for applications that make use of
:func:`.attributes.get_history` or :meth:`.Session.is_modified`
which also need to know
the "previous" value of the attribute.
:param comparator_factory: a class which extends
:class:`.ColumnProperty.Comparator` which provides custom SQL
clause generation for comparison operations.
:param group:
a group name for this property when marked as deferred.
:param deferred:
when True, the column property is "deferred", meaning that
it does not load immediately, and is instead loaded when the
attribute is first accessed on an instance. See also
:func:`~sqlalchemy.orm.deferred`.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param expire_on_flush=True:
Disable expiry on flush. A column_property() which refers
to a SQL expression (and not a single table-bound column)
is considered to be a "read only" property; populating it
has no effect on the state of data, and it can only return
database state. For this reason a column_property()'s value
is expired whenever the parent object is involved in a
flush, that is, has any kind of "dirty" state within a flush.
Setting this parameter to ``False`` will have the effect of
leaving any existing value present after the flush proceeds.
Note however that the :class:`.Session` with default expiration
settings still expires
all attributes after a :meth:`.Session.commit` call, however.
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
:param extension:
an :class:`.AttributeExtension` instance, or list of extensions,
which will be prepended to the list of attribute listeners for the
resulting descriptor placed on the class.
.. seealso::
:ref:`column_property_options` - to map columns while including
mapping options
:ref:`mapper_column_property_sql_expressions` - to map SQL
expressions
"""
super(ColumnProperty, self).__init__()
self._orig_columns = [expression._labeled(c) for c in columns]
self.columns = [
expression._labeled(_orm_full_deannotate(c)) for c in columns
]
self.group = kwargs.pop("group", None)
self.deferred = kwargs.pop("deferred", False)
self.instrument = kwargs.pop("_instrument", True)
self.comparator_factory = kwargs.pop(
"comparator_factory", self.__class__.Comparator
)
self.descriptor = kwargs.pop("descriptor", None)
self.extension = kwargs.pop("extension", None)
self.active_history = kwargs.pop("active_history", False)
self.expire_on_flush = kwargs.pop("expire_on_flush", True)
if "info" in kwargs:
self.info = kwargs.pop("info")
if "doc" in kwargs:
self.doc = kwargs.pop("doc")
else:
for col in reversed(self.columns):
doc = getattr(col, "doc", None)
if doc is not None:
self.doc = doc
break
else:
self.doc = None
if kwargs:
raise TypeError(
"%s received unexpected keyword argument(s): %s"
% (self.__class__.__name__, ", ".join(sorted(kwargs.keys())))
)
util.set_creation_order(self)
self.strategy_key = (
("deferred", self.deferred),
("instrument", self.instrument),
)
@util.dependencies("sqlalchemy.orm.state", "sqlalchemy.orm.strategies")
def _memoized_attr__deferred_column_loader(self, state, strategies):
return state.InstanceState._instance_level_callable_processor(
self.parent.class_manager,
strategies.LoadDeferredColumns(self.key),
self.key,
)
def __clause_element__(self):
"""Allow the ColumnProperty to work in expression before it is turned
into an instrumented attribute.
"""
return self.expression
@property
def expression(self):
"""Return the primary column or expression for this ColumnProperty.
E.g.::
class File(Base):
# ...
name = Column(String(64))
extension = Column(String(8))
filename = column_property(name + '.' + extension)
path = column_property('C:/' + filename.expression)
.. seealso::
:ref:`mapper_column_property_sql_expressions_composed`
"""
return self.columns[0]
def instrument_class(self, mapper):
if not self.instrument:
return
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
def do_init(self):
super(ColumnProperty, self).do_init()
if len(self.columns) > 1 and set(self.parent.primary_key).issuperset(
self.columns
):
util.warn(
(
"On mapper %s, primary key column '%s' is being combined "
"with distinct primary key column '%s' in attribute '%s'. "
"Use explicit properties to give each column its own "
"mapped attribute name."
)
% (self.parent, self.columns[1], self.columns[0], self.key)
)
def copy(self):
return ColumnProperty(
deferred=self.deferred,
group=self.group,
active_history=self.active_history,
*self.columns
)
def _getcommitted(
self, state, dict_, column, passive=attributes.PASSIVE_OFF
):
return state.get_impl(self.key).get_committed_value(
state, dict_, passive=passive
)
def merge(
self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load,
_recursive,
_resolve_conflict_map,
):
if not self.instrument:
return
elif self.key in source_dict:
value = source_dict[self.key]
if not load:
dest_dict[self.key] = value
else:
impl = dest_state.get_impl(self.key)
impl.set(dest_state, dest_dict, value, None)
elif dest_state.has_identity and self.key not in dest_dict:
dest_state._expire_attributes(
dest_dict, [self.key], no_loader=True
)
class Comparator(util.MemoizedSlots, PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.ColumnProperty` attributes.
See the documentation for :class:`.PropComparator` for a brief
overview.
.. seealso::
:class:`.PropComparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__slots__ = "__clause_element__", "info", "expressions"
def _memoized_method___clause_element__(self):
if self.adapter:
return self.adapter(self.prop.columns[0])
else:
# no adapter, so we aren't aliased
# assert self._parententity is self._parentmapper
return self.prop.columns[0]._annotate(
{
"parententity": self._parententity,
"parentmapper": self._parententity,
}
)
def _memoized_attr_info(self):
"""The .info dictionary for this attribute."""
ce = self.__clause_element__()
try:
return ce.info
except AttributeError:
return self.prop.info
def _memoized_attr_expressions(self):
"""The full sequence of columns referenced by this
attribute, adjusted for any aliasing in progress.
.. versionadded:: 1.3.17
"""
if self.adapter:
return [self.adapter(col) for col in self.prop.columns]
else:
# no adapter, so we aren't aliased
# assert self._parententity is self._parentmapper
return [
col._annotate(
{
"parententity": self._parententity,
"parentmapper": self._parententity,
"orm_key": self.prop.key,
}
)
for col in self.prop.columns
]
def _fallback_getattr(self, key):
"""proxy attribute access down to the mapped column.
this allows user-defined comparison methods to be accessed.
"""
return getattr(self.__clause_element__(), key)
def operate(self, op, *other, **kwargs):
return op(self.__clause_element__(), *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
col = self.__clause_element__()
return op(col._bind_param(op, other), col, **kwargs)
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/util.py
|
# orm/util.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
import types
import weakref
from . import attributes # noqa
from .base import _class_to_mapper # noqa
from .base import _never_set # noqa
from .base import _none_set # noqa
from .base import attribute_str # noqa
from .base import class_mapper # noqa
from .base import InspectionAttr # noqa
from .base import instance_str # noqa
from .base import object_mapper # noqa
from .base import object_state # noqa
from .base import state_attribute_str # noqa
from .base import state_class_str # noqa
from .base import state_str # noqa
from .interfaces import MapperProperty # noqa
from .interfaces import PropComparator # noqa
from .path_registry import PathRegistry # noqa
from .. import event
from .. import exc as sa_exc
from .. import inspection
from .. import sql
from .. import util
from ..sql import expression
from ..sql import util as sql_util
all_cascades = frozenset(
(
"delete",
"delete-orphan",
"all",
"merge",
"expunge",
"save-update",
"refresh-expire",
"none",
)
)
class CascadeOptions(frozenset):
"""Keeps track of the options sent to relationship().cascade"""
_add_w_all_cascades = all_cascades.difference(
["all", "none", "delete-orphan"]
)
_allowed_cascades = all_cascades
_viewonly_cascades = ["expunge", "all", "none", "refresh-expire"]
__slots__ = (
"save_update",
"delete",
"refresh_expire",
"merge",
"expunge",
"delete_orphan",
)
def __new__(cls, value_list):
if isinstance(value_list, util.string_types) or value_list is None:
return cls.from_string(value_list)
values = set(value_list)
if values.difference(cls._allowed_cascades):
raise sa_exc.ArgumentError(
"Invalid cascade option(s): %s"
% ", ".join(
[
repr(x)
for x in sorted(
values.difference(cls._allowed_cascades)
)
]
)
)
if "all" in values:
values.update(cls._add_w_all_cascades)
if "none" in values:
values.clear()
values.discard("all")
self = frozenset.__new__(CascadeOptions, values)
self.save_update = "save-update" in values
self.delete = "delete" in values
self.refresh_expire = "refresh-expire" in values
self.merge = "merge" in values
self.expunge = "expunge" in values
self.delete_orphan = "delete-orphan" in values
if self.delete_orphan and not self.delete:
util.warn(
"The 'delete-orphan' cascade " "option requires 'delete'."
)
return self
def __repr__(self):
return "CascadeOptions(%r)" % (",".join([x for x in sorted(self)]))
@classmethod
def from_string(cls, arg):
values = [c for c in re.split(r"\s*,\s*", arg or "") if c]
return cls(values)
def _validator_events(desc, key, validator, include_removes, include_backrefs):
"""Runs a validation method on an attribute value to be set or
appended.
"""
if not include_backrefs:
def detect_is_backref(state, initiator):
impl = state.manager[key].impl
return initiator.impl is not impl
if include_removes:
def append(state, value, initiator):
if initiator.op is not attributes.OP_BULK_REPLACE and (
include_backrefs or not detect_is_backref(state, initiator)
):
return validator(state.obj(), key, value, False)
else:
return value
def bulk_set(state, values, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
obj = state.obj()
values[:] = [
validator(obj, key, value, False) for value in values
]
def set_(state, value, oldvalue, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
return validator(state.obj(), key, value, False)
else:
return value
def remove(state, value, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
validator(state.obj(), key, value, True)
else:
def append(state, value, initiator):
if initiator.op is not attributes.OP_BULK_REPLACE and (
include_backrefs or not detect_is_backref(state, initiator)
):
return validator(state.obj(), key, value)
else:
return value
def bulk_set(state, values, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
obj = state.obj()
values[:] = [validator(obj, key, value) for value in values]
def set_(state, value, oldvalue, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
return validator(state.obj(), key, value)
else:
return value
event.listen(desc, "append", append, raw=True, retval=True)
event.listen(desc, "bulk_replace", bulk_set, raw=True)
event.listen(desc, "set", set_, raw=True, retval=True)
if include_removes:
event.listen(desc, "remove", remove, raw=True, retval=True)
def polymorphic_union(
table_map, typecolname, aliasname="p_union", cast_nulls=True
):
"""Create a ``UNION`` statement used by a polymorphic mapper.
See :ref:`concrete_inheritance` for an example of how
this is used.
:param table_map: mapping of polymorphic identities to
:class:`_schema.Table` objects.
:param typecolname: string name of a "discriminator" column, which will be
derived from the query, producing the polymorphic identity for
each row. If ``None``, no polymorphic discriminator is generated.
:param aliasname: name of the :func:`~sqlalchemy.sql.expression.alias()`
construct generated.
:param cast_nulls: if True, non-existent columns, which are represented
as labeled NULLs, will be passed into CAST. This is a legacy behavior
that is problematic on some backends such as Oracle - in which case it
can be set to False.
"""
colnames = util.OrderedSet()
colnamemaps = {}
types = {}
for key in table_map:
table = table_map[key]
# mysql doesn't like selecting from a select;
# make it an alias of the select
if isinstance(table, sql.Select):
table = table.alias()
table_map[key] = table
m = {}
for c in table.c:
colnames.add(c.key)
m[c.key] = c
types[c.key] = c.type
colnamemaps[table] = m
def col(name, table):
try:
return colnamemaps[table][name]
except KeyError:
if cast_nulls:
return sql.cast(sql.null(), types[name]).label(name)
else:
return sql.type_coerce(sql.null(), types[name]).label(name)
result = []
for type_, table in table_map.items():
if typecolname is not None:
result.append(
sql.select(
[col(name, table) for name in colnames]
+ [
sql.literal_column(
sql_util._quote_ddl_expr(type_)
).label(typecolname)
],
from_obj=[table],
)
)
else:
result.append(
sql.select(
[col(name, table) for name in colnames], from_obj=[table]
)
)
return sql.union_all(*result).alias(aliasname)
def identity_key(*args, **kwargs):
"""Generate "identity key" tuples, as are used as keys in the
:attr:`.Session.identity_map` dictionary.
This function has several call styles:
* ``identity_key(class, ident, identity_token=token)``
This form receives a mapped class and a primary key scalar or
tuple as an argument.
E.g.::
>>> identity_key(MyClass, (1, 2))
(<class '__main__.MyClass'>, (1, 2), None)
:param class: mapped class (must be a positional argument)
:param ident: primary key, may be a scalar or tuple argument.
:param identity_token: optional identity token
.. versionadded:: 1.2 added identity_token
* ``identity_key(instance=instance)``
This form will produce the identity key for a given instance. The
instance need not be persistent, only that its primary key attributes
are populated (else the key will contain ``None`` for those missing
values).
E.g.::
>>> instance = MyClass(1, 2)
>>> identity_key(instance=instance)
(<class '__main__.MyClass'>, (1, 2), None)
In this form, the given instance is ultimately run though
:meth:`_orm.Mapper.identity_key_from_instance`, which will have the
effect of performing a database check for the corresponding row
if the object is expired.
:param instance: object instance (must be given as a keyword arg)
* ``identity_key(class, row=row, identity_token=token)``
This form is similar to the class/tuple form, except is passed a
database result row as a :class:`.RowProxy` object.
E.g.::
>>> row = engine.execute("select * from table where a=1 and b=2").\
first()
>>> identity_key(MyClass, row=row)
(<class '__main__.MyClass'>, (1, 2), None)
:param class: mapped class (must be a positional argument)
:param row: :class:`.RowProxy` row returned by a
:class:`_engine.ResultProxy`
(must be given as a keyword arg)
:param identity_token: optional identity token
.. versionadded:: 1.2 added identity_token
"""
if args:
row = None
largs = len(args)
if largs == 1:
class_ = args[0]
try:
row = kwargs.pop("row")
except KeyError:
ident = kwargs.pop("ident")
elif largs in (2, 3):
class_, ident = args
else:
raise sa_exc.ArgumentError(
"expected up to three positional arguments, " "got %s" % largs
)
identity_token = kwargs.pop("identity_token", None)
if kwargs:
raise sa_exc.ArgumentError(
"unknown keyword arguments: %s" % ", ".join(kwargs)
)
mapper = class_mapper(class_)
if row is None:
return mapper.identity_key_from_primary_key(
util.to_list(ident), identity_token=identity_token
)
else:
return mapper.identity_key_from_row(
row, identity_token=identity_token
)
else:
instance = kwargs.pop("instance")
if kwargs:
raise sa_exc.ArgumentError(
"unknown keyword arguments: %s" % ", ".join(kwargs.keys)
)
mapper = object_mapper(instance)
return mapper.identity_key_from_instance(instance)
class ORMAdapter(sql_util.ColumnAdapter):
"""ColumnAdapter subclass which excludes adaptation of entities from
non-matching mappers.
"""
def __init__(
self,
entity,
equivalents=None,
adapt_required=False,
allow_label_resolve=True,
anonymize_labels=False,
):
info = inspection.inspect(entity)
self.mapper = info.mapper
selectable = info.selectable
is_aliased_class = info.is_aliased_class
if is_aliased_class:
self.aliased_class = entity
else:
self.aliased_class = None
sql_util.ColumnAdapter.__init__(
self,
selectable,
equivalents,
adapt_required=adapt_required,
allow_label_resolve=allow_label_resolve,
anonymize_labels=anonymize_labels,
include_fn=self._include_fn,
)
def _include_fn(self, elem):
entity = elem._annotations.get("parentmapper", None)
return not entity or entity.isa(self.mapper)
class AliasedClass(object):
r"""Represents an "aliased" form of a mapped class for usage with Query.
The ORM equivalent of a :func:`~sqlalchemy.sql.expression.alias`
construct, this object mimics the mapped class using a
``__getattr__`` scheme and maintains a reference to a
real :class:`~sqlalchemy.sql.expression.Alias` object.
A primary purpose of :class:`.AliasedClass` is to serve as an alternate
within a SQL statement generated by the ORM, such that an existing
mapped entity can be used in multiple contexts. A simple example::
# find all pairs of users with the same name
user_alias = aliased(User)
session.query(User, user_alias).\
join((user_alias, User.id > user_alias.id)).\
filter(User.name == user_alias.name)
:class:`.AliasedClass` is also capable of mapping an existing mapped
class to an entirely new selectable, provided this selectable is column-
compatible with the existing mapped selectable, and it can also be
configured in a mapping as the target of a :func:`_orm.relationship`.
See the links below for examples.
The :class:`.AliasedClass` object is constructed typically using the
:func:`_orm.aliased` function. It also is produced with additional
configuration when using the :func:`_orm.with_polymorphic` function.
The resulting object is an instance of :class:`.AliasedClass`.
This object implements an attribute scheme which produces the
same attribute and method interface as the original mapped
class, allowing :class:`.AliasedClass` to be compatible
with any attribute technique which works on the original class,
including hybrid attributes (see :ref:`hybrids_toplevel`).
The :class:`.AliasedClass` can be inspected for its underlying
:class:`_orm.Mapper`, aliased selectable, and other information
using :func:`_sa.inspect`::
from sqlalchemy import inspect
my_alias = aliased(MyClass)
insp = inspect(my_alias)
The resulting inspection object is an instance of :class:`.AliasedInsp`.
.. seealso::
:func:`.aliased`
:func:`.with_polymorphic`
:ref:`relationship_aliased_class`
:ref:`relationship_to_window_function`
"""
def __init__(
self,
cls,
alias=None,
name=None,
flat=False,
adapt_on_names=False,
# TODO: None for default here?
with_polymorphic_mappers=(),
with_polymorphic_discriminator=None,
base_alias=None,
use_mapper_path=False,
represents_outer_join=False,
):
mapper = _class_to_mapper(cls)
if alias is None:
alias = mapper._with_polymorphic_selectable.alias(
name=name, flat=flat
)
self._aliased_insp = AliasedInsp(
self,
mapper,
alias,
name,
with_polymorphic_mappers
if with_polymorphic_mappers
else mapper.with_polymorphic_mappers,
with_polymorphic_discriminator
if with_polymorphic_discriminator is not None
else mapper.polymorphic_on,
base_alias,
use_mapper_path,
adapt_on_names,
represents_outer_join,
)
self.__name__ = "AliasedClass_%s" % mapper.class_.__name__
def __getattr__(self, key):
try:
_aliased_insp = self.__dict__["_aliased_insp"]
except KeyError:
raise AttributeError()
else:
target = _aliased_insp._target
# maintain all getattr mechanics
attr = getattr(target, key)
# attribute is a method, that will be invoked against a
# "self"; so just return a new method with the same function and
# new self
if hasattr(attr, "__call__") and hasattr(attr, "__self__"):
return types.MethodType(attr.__func__, self)
# attribute is a descriptor, that will be invoked against a
# "self"; so invoke the descriptor against this self
if hasattr(attr, "__get__"):
attr = attr.__get__(None, self)
# attributes within the QueryableAttribute system will want this
# to be invoked so the object can be adapted
if hasattr(attr, "adapt_to_entity"):
attr = attr.adapt_to_entity(_aliased_insp)
setattr(self, key, attr)
return attr
def __repr__(self):
return "<AliasedClass at 0x%x; %s>" % (
id(self),
self._aliased_insp._target.__name__,
)
def __str__(self):
return str(self._aliased_insp)
class AliasedInsp(InspectionAttr):
"""Provide an inspection interface for an
:class:`.AliasedClass` object.
The :class:`.AliasedInsp` object is returned
given an :class:`.AliasedClass` using the
:func:`_sa.inspect` function::
from sqlalchemy import inspect
from sqlalchemy.orm import aliased
my_alias = aliased(MyMappedClass)
insp = inspect(my_alias)
Attributes on :class:`.AliasedInsp`
include:
* ``entity`` - the :class:`.AliasedClass` represented.
* ``mapper`` - the :class:`_orm.Mapper` mapping the underlying class.
* ``selectable`` - the :class:`_expression.Alias`
construct which ultimately
represents an aliased :class:`_schema.Table` or
:class:`_expression.Select`
construct.
* ``name`` - the name of the alias. Also is used as the attribute
name when returned in a result tuple from :class:`_query.Query`.
* ``with_polymorphic_mappers`` - collection of :class:`_orm.Mapper`
objects
indicating all those mappers expressed in the select construct
for the :class:`.AliasedClass`.
* ``polymorphic_on`` - an alternate column or SQL expression which
will be used as the "discriminator" for a polymorphic load.
.. seealso::
:ref:`inspection_toplevel`
"""
def __init__(
self,
entity,
mapper,
selectable,
name,
with_polymorphic_mappers,
polymorphic_on,
_base_alias,
_use_mapper_path,
adapt_on_names,
represents_outer_join,
):
self._weak_entity = weakref.ref(entity)
self.mapper = mapper
self.selectable = (
self.persist_selectable
) = self.local_table = selectable
self.name = name
self.polymorphic_on = polymorphic_on
self._base_alias = weakref.ref(_base_alias or self)
self._use_mapper_path = _use_mapper_path
self.represents_outer_join = represents_outer_join
if with_polymorphic_mappers:
self._is_with_polymorphic = True
self.with_polymorphic_mappers = with_polymorphic_mappers
self._with_polymorphic_entities = []
for poly in self.with_polymorphic_mappers:
if poly is not mapper:
ent = AliasedClass(
poly.class_,
selectable,
base_alias=self,
adapt_on_names=adapt_on_names,
use_mapper_path=_use_mapper_path,
)
setattr(self.entity, poly.class_.__name__, ent)
self._with_polymorphic_entities.append(ent._aliased_insp)
else:
self._is_with_polymorphic = False
self.with_polymorphic_mappers = [mapper]
self._adapter = sql_util.ColumnAdapter(
selectable,
equivalents=mapper._equivalent_columns,
adapt_on_names=adapt_on_names,
anonymize_labels=True,
)
self._adapt_on_names = adapt_on_names
self._target = mapper.class_
@property
def entity(self):
return self._weak_entity()
is_aliased_class = True
"always returns True"
@property
def class_(self):
"""Return the mapped class ultimately represented by this
:class:`.AliasedInsp`."""
return self.mapper.class_
@property
def _path_registry(self):
if self._use_mapper_path:
return self.mapper._path_registry
else:
return PathRegistry.per_mapper(self)
def __getstate__(self):
return {
"entity": self.entity,
"mapper": self.mapper,
"alias": self.selectable,
"name": self.name,
"adapt_on_names": self._adapt_on_names,
"with_polymorphic_mappers": self.with_polymorphic_mappers,
"with_polymorphic_discriminator": self.polymorphic_on,
"base_alias": self._base_alias(),
"use_mapper_path": self._use_mapper_path,
"represents_outer_join": self.represents_outer_join,
}
def __setstate__(self, state):
self.__init__(
state["entity"],
state["mapper"],
state["alias"],
state["name"],
state["with_polymorphic_mappers"],
state["with_polymorphic_discriminator"],
state["base_alias"],
state["use_mapper_path"],
state["adapt_on_names"],
state["represents_outer_join"],
)
def _adapt_element(self, elem):
return self._adapter.traverse(elem)._annotate(
{"parententity": self, "parentmapper": self.mapper}
)
def _entity_for_mapper(self, mapper):
self_poly = self.with_polymorphic_mappers
if mapper in self_poly:
if mapper is self.mapper:
return self
else:
return getattr(
self.entity, mapper.class_.__name__
)._aliased_insp
elif mapper.isa(self.mapper):
return self
else:
assert False, "mapper %s doesn't correspond to %s" % (mapper, self)
@util.memoized_property
def _get_clause(self):
onclause, replacemap = self.mapper._get_clause
return (
self._adapter.traverse(onclause),
{
self._adapter.traverse(col): param
for col, param in replacemap.items()
},
)
@util.memoized_property
def _memoized_values(self):
return {}
def _memo(self, key, callable_, *args, **kw):
if key in self._memoized_values:
return self._memoized_values[key]
else:
self._memoized_values[key] = value = callable_(*args, **kw)
return value
def __repr__(self):
if self.with_polymorphic_mappers:
with_poly = "(%s)" % ", ".join(
mp.class_.__name__ for mp in self.with_polymorphic_mappers
)
else:
with_poly = ""
return "<AliasedInsp at 0x%x; %s%s>" % (
id(self),
self.class_.__name__,
with_poly,
)
def __str__(self):
if self._is_with_polymorphic:
return "with_polymorphic(%s, [%s])" % (
self._target.__name__,
", ".join(
mp.class_.__name__
for mp in self.with_polymorphic_mappers
if mp is not self.mapper
),
)
else:
return "aliased(%s)" % (self._target.__name__,)
inspection._inspects(AliasedClass)(lambda target: target._aliased_insp)
inspection._inspects(AliasedInsp)(lambda target: target)
def aliased(element, alias=None, name=None, flat=False, adapt_on_names=False):
"""Produce an alias of the given element, usually an :class:`.AliasedClass`
instance.
E.g.::
my_alias = aliased(MyClass)
session.query(MyClass, my_alias).filter(MyClass.id > my_alias.id)
The :func:`.aliased` function is used to create an ad-hoc mapping
of a mapped class to a new selectable. By default, a selectable
is generated from the normally mapped selectable (typically a
:class:`_schema.Table`) using the :meth:`_expression.FromClause.alias`
method.
However, :func:`.aliased` can also be used to link the class to
a new :func:`_expression.select` statement. Also, the
:func:`.with_polymorphic`
function is a variant of :func:`.aliased` that is intended to specify
a so-called "polymorphic selectable", that corresponds to the union
of several joined-inheritance subclasses at once.
For convenience, the :func:`.aliased` function also accepts plain
:class:`_expression.FromClause` constructs, such as a
:class:`_schema.Table` or
:func:`_expression.select` construct. In those cases, the
:meth:`_expression.FromClause.alias`
method is called on the object and the new :class:`_expression.Alias`
object
returned. The returned :class:`_expression.Alias`
is not ORM-mapped in this case.
:param element: element to be aliased. Is normally a mapped class,
but for convenience can also be a :class:`_expression.FromClause` element
.
:param alias: Optional selectable unit to map the element to. This is
usually used to link the object to a subquery, and should be an aliased
select construct as one would produce from the
:meth:`_query.Query.subquery` method or
the :meth:`_expression.Select.alias` methods of the
:func:`_expression.select` construct.
:param name: optional string name to use for the alias, if not specified
by the ``alias`` parameter. The name, among other things, forms the
attribute name that will be accessible via tuples returned by a
:class:`_query.Query` object.
:param flat: Boolean, will be passed through to the
:meth:`_expression.FromClause.alias` call so that aliases of
:class:`_expression.Join` objects
don't include an enclosing SELECT. This can lead to more efficient
queries in many circumstances. A JOIN against a nested JOIN will be
rewritten as a JOIN against an aliased SELECT subquery on backends that
don't support this syntax.
.. seealso:: :meth:`_expression.Join.alias`
:param adapt_on_names: if True, more liberal "matching" will be used when
mapping the mapped columns of the ORM entity to those of the
given selectable - a name-based match will be performed if the
given selectable doesn't otherwise have a column that corresponds
to one on the entity. The use case for this is when associating
an entity with some derived selectable such as one that uses
aggregate functions::
class UnitPrice(Base):
__tablename__ = 'unit_price'
...
unit_id = Column(Integer)
price = Column(Numeric)
aggregated_unit_price = Session.query(
func.sum(UnitPrice.price).label('price')
).group_by(UnitPrice.unit_id).subquery()
aggregated_unit_price = aliased(UnitPrice,
alias=aggregated_unit_price, adapt_on_names=True)
Above, functions on ``aggregated_unit_price`` which refer to
``.price`` will return the
``func.sum(UnitPrice.price).label('price')`` column, as it is
matched on the name "price". Ordinarily, the "price" function
wouldn't have any "column correspondence" to the actual
``UnitPrice.price`` column as it is not a proxy of the original.
"""
if isinstance(element, expression.FromClause):
if adapt_on_names:
raise sa_exc.ArgumentError(
"adapt_on_names only applies to ORM elements"
)
return element.alias(name, flat=flat)
else:
return AliasedClass(
element,
alias=alias,
flat=flat,
name=name,
adapt_on_names=adapt_on_names,
)
def with_polymorphic(
base,
classes,
selectable=False,
flat=False,
polymorphic_on=None,
aliased=False,
innerjoin=False,
_use_mapper_path=False,
_existing_alias=None,
):
"""Produce an :class:`.AliasedClass` construct which specifies
columns for descendant mappers of the given base.
Using this method will ensure that each descendant mapper's
tables are included in the FROM clause, and will allow filter()
criterion to be used against those tables. The resulting
instances will also have those columns already loaded so that
no "post fetch" of those columns will be required.
.. seealso::
:ref:`with_polymorphic` - full discussion of
:func:`_orm.with_polymorphic`.
:param base: Base class to be aliased.
:param classes: a single class or mapper, or list of
class/mappers, which inherit from the base class.
Alternatively, it may also be the string ``'*'``, in which case
all descending mapped classes will be added to the FROM clause.
:param aliased: when True, the selectable will be wrapped in an
alias, that is ``(SELECT * FROM <fromclauses>) AS anon_1``.
This can be important when using the with_polymorphic()
to create the target of a JOIN on a backend that does not
support parenthesized joins, such as SQLite and older
versions of MySQL. However if the
:paramref:`.with_polymorphic.selectable` parameter is in use
with an existing :class:`_expression.Alias` construct,
then you should not
set this flag.
:param flat: Boolean, will be passed through to the
:meth:`_expression.FromClause.alias` call so that aliases of
:class:`_expression.Join`
objects don't include an enclosing SELECT. This can lead to more
efficient queries in many circumstances. A JOIN against a nested JOIN
will be rewritten as a JOIN against an aliased SELECT subquery on
backends that don't support this syntax.
Setting ``flat`` to ``True`` implies the ``aliased`` flag is
also ``True``.
.. versionadded:: 0.9.0
.. seealso:: :meth:`_expression.Join.alias`
:param selectable: a table or select() statement that will
be used in place of the generated FROM clause. This argument is
required if any of the desired classes use concrete table
inheritance, since SQLAlchemy currently cannot generate UNIONs
among tables automatically. If used, the ``selectable`` argument
must represent the full set of tables and columns mapped by every
mapped class. Otherwise, the unaccounted mapped columns will
result in their table being appended directly to the FROM clause
which will usually lead to incorrect results.
:param polymorphic_on: a column to be used as the "discriminator"
column for the given selectable. If not given, the polymorphic_on
attribute of the base classes' mapper will be used, if any. This
is useful for mappings that don't have polymorphic loading
behavior by default.
:param innerjoin: if True, an INNER JOIN will be used. This should
only be specified if querying for one specific subtype only
"""
primary_mapper = _class_to_mapper(base)
if _existing_alias:
assert _existing_alias.mapper is primary_mapper
classes = util.to_set(classes)
new_classes = set(
[mp.class_ for mp in _existing_alias.with_polymorphic_mappers]
)
if classes == new_classes:
return _existing_alias
else:
classes = classes.union(new_classes)
mappers, selectable = primary_mapper._with_polymorphic_args(
classes, selectable, innerjoin=innerjoin
)
if aliased or flat:
selectable = selectable.alias(flat=flat)
return AliasedClass(
base,
selectable,
with_polymorphic_mappers=mappers,
with_polymorphic_discriminator=polymorphic_on,
use_mapper_path=_use_mapper_path,
represents_outer_join=not innerjoin,
)
def _orm_annotate(element, exclude=None):
"""Deep copy the given ClauseElement, annotating each element with the
"_orm_adapt" flag.
Elements within the exclude collection will be cloned but not annotated.
"""
return sql_util._deep_annotate(element, {"_orm_adapt": True}, exclude)
def _orm_deannotate(element):
"""Remove annotations that link a column to a particular mapping.
Note this doesn't affect "remote" and "foreign" annotations
passed by the :func:`_orm.foreign` and :func:`_orm.remote`
annotators.
"""
return sql_util._deep_deannotate(
element, values=("_orm_adapt", "parententity")
)
def _orm_full_deannotate(element):
return sql_util._deep_deannotate(element)
class _ORMJoin(expression.Join):
"""Extend Join to support ORM constructs as input."""
__visit_name__ = expression.Join.__visit_name__
def __init__(
self,
left,
right,
onclause=None,
isouter=False,
full=False,
_left_memo=None,
_right_memo=None,
):
left_info = inspection.inspect(left)
left_orm_info = getattr(left, "_joined_from_info", left_info)
right_info = inspection.inspect(right)
adapt_to = right_info.selectable
self._joined_from_info = right_info
self._left_memo = _left_memo
self._right_memo = _right_memo
if isinstance(onclause, util.string_types):
onclause = getattr(left_orm_info.entity, onclause)
if isinstance(onclause, attributes.QueryableAttribute):
on_selectable = onclause.comparator._source_selectable()
prop = onclause.property
elif isinstance(onclause, MapperProperty):
prop = onclause
on_selectable = prop.parent.selectable
else:
prop = None
if prop:
if sql_util.clause_is_present(on_selectable, left_info.selectable):
adapt_from = on_selectable
else:
adapt_from = left_info.selectable
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = prop._create_joins(
source_selectable=adapt_from,
dest_selectable=adapt_to,
source_polymorphic=True,
dest_polymorphic=True,
of_type_mapper=right_info.mapper,
alias_secondary=True,
)
if sj is not None:
if isouter:
# note this is an inner join from secondary->right
right = sql.join(secondary, right, sj)
onclause = pj
else:
left = sql.join(left, secondary, pj, isouter)
onclause = sj
else:
onclause = pj
self._target_adapter = target_adapter
expression.Join.__init__(self, left, right, onclause, isouter, full)
if (
not prop
and getattr(right_info, "mapper", None)
and right_info.mapper.single
):
# if single inheritance target and we are using a manual
# or implicit ON clause, augment it the same way we'd augment the
# WHERE.
single_crit = right_info.mapper._single_table_criterion
if single_crit is not None:
if right_info.is_aliased_class:
single_crit = right_info._adapter.traverse(single_crit)
self.onclause = self.onclause & single_crit
def _splice_into_center(self, other):
"""Splice a join into the center.
Given join(a, b) and join(b, c), return join(a, b).join(c)
"""
leftmost = other
while isinstance(leftmost, sql.Join):
leftmost = leftmost.left
assert self.right is leftmost
left = _ORMJoin(
self.left,
other.left,
self.onclause,
isouter=self.isouter,
_left_memo=self._left_memo,
_right_memo=other._left_memo,
)
return _ORMJoin(
left,
other.right,
other.onclause,
isouter=other.isouter,
_right_memo=other._right_memo,
)
def join(
self,
right,
onclause=None,
isouter=False,
full=False,
join_to_left=None,
):
return _ORMJoin(self, right, onclause, full=full, isouter=isouter)
def outerjoin(self, right, onclause=None, full=False, join_to_left=None):
return _ORMJoin(self, right, onclause, isouter=True, full=full)
def join(
left, right, onclause=None, isouter=False, full=False, join_to_left=None
):
r"""Produce an inner join between left and right clauses.
:func:`_orm.join` is an extension to the core join interface
provided by :func:`_expression.join()`, where the
left and right selectables may be not only core selectable
objects such as :class:`_schema.Table`, but also mapped classes or
:class:`.AliasedClass` instances. The "on" clause can
be a SQL expression, or an attribute or string name
referencing a configured :func:`_orm.relationship`.
:func:`_orm.join` is not commonly needed in modern usage,
as its functionality is encapsulated within that of the
:meth:`_query.Query.join` method, which features a
significant amount of automation beyond :func:`_orm.join`
by itself. Explicit usage of :func:`_orm.join`
with :class:`_query.Query` involves usage of the
:meth:`_query.Query.select_from` method, as in::
from sqlalchemy.orm import join
session.query(User).\
select_from(join(User, Address, User.addresses)).\
filter(Address.email_address=='foo@bar.com')
In modern SQLAlchemy the above join can be written more
succinctly as::
session.query(User).\
join(User.addresses).\
filter(Address.email_address=='foo@bar.com')
See :meth:`_query.Query.join` for information on modern usage
of ORM level joins.
.. deprecated:: 0.8
the ``join_to_left`` parameter is deprecated, and will be removed
in a future release. The parameter has no effect.
"""
return _ORMJoin(left, right, onclause, isouter, full)
def outerjoin(left, right, onclause=None, full=False, join_to_left=None):
"""Produce a left outer join between left and right clauses.
This is the "outer join" version of the :func:`_orm.join` function,
featuring the same behavior except that an OUTER JOIN is generated.
See that function's documentation for other usage details.
"""
return _ORMJoin(left, right, onclause, True, full)
def with_parent(instance, prop, from_entity=None):
"""Create filtering criterion that relates this query's primary entity
to the given related instance, using established
:func:`_orm.relationship()`
configuration.
The SQL rendered is the same as that rendered when a lazy loader
would fire off from the given parent on that attribute, meaning
that the appropriate state is taken from the parent object in
Python without the need to render joins to the parent table
in the rendered statement.
:param instance:
An instance which has some :func:`_orm.relationship`.
:param property:
String property name, or class-bound attribute, which indicates
what relationship from the instance should be used to reconcile the
parent/child relationship.
:param from_entity:
Entity in which to consider as the left side. This defaults to the
"zero" entity of the :class:`_query.Query` itself.
.. versionadded:: 1.2
"""
if isinstance(prop, util.string_types):
mapper = object_mapper(instance)
prop = getattr(mapper.class_, prop).property
elif isinstance(prop, attributes.QueryableAttribute):
prop = prop.property
return prop._with_parent(instance, from_entity=from_entity)
def has_identity(object_):
"""Return True if the given object has a database
identity.
This typically corresponds to the object being
in either the persistent or detached state.
.. seealso::
:func:`.was_deleted`
"""
state = attributes.instance_state(object_)
return state.has_identity
def was_deleted(object_):
"""Return True if the given object was deleted
within a session flush.
This is regardless of whether or not the object is
persistent or detached.
.. seealso::
:attr:`.InstanceState.was_deleted`
"""
state = attributes.instance_state(object_)
return state.was_deleted
def _entity_corresponds_to(given, entity):
"""determine if 'given' corresponds to 'entity', in terms
of an entity passed to Query that would match the same entity
being referred to elsewhere in the query.
"""
if entity.is_aliased_class:
if given.is_aliased_class:
if entity._base_alias() is given._base_alias():
return True
return False
elif given.is_aliased_class:
if given._use_mapper_path:
return entity in given.with_polymorphic_mappers
else:
return entity is given
return entity.common_parent(given)
def _entity_corresponds_to_use_path_impl(given, entity):
"""determine if 'given' corresponds to 'entity', in terms
of a path of loader options where a mapped attribute is taken to
be a member of a parent entity.
e.g.::
someoption(A).someoption(A.b) # -> fn(A, A) -> True
someoption(A).someoption(C.d) # -> fn(A, C) -> False
a1 = aliased(A)
someoption(a1).someoption(A.b) # -> fn(a1, A) -> False
someoption(a1).someoption(a1.b) # -> fn(a1, a1) -> True
wp = with_polymorphic(A, [A1, A2])
someoption(wp).someoption(A1.foo) # -> fn(wp, A1) -> False
someoption(wp).someoption(wp.A1.foo) # -> fn(wp, wp.A1) -> True
"""
if given.is_aliased_class:
return (
entity.is_aliased_class
and not entity._use_mapper_path
and (given is entity or given in entity._with_polymorphic_entities)
)
elif not entity.is_aliased_class:
return given.common_parent(entity.mapper)
else:
return (
entity._use_mapper_path
and given in entity.with_polymorphic_mappers
)
def _entity_isa(given, mapper):
"""determine if 'given' "is a" mapper, in terms of the given
would load rows of type 'mapper'.
"""
if given.is_aliased_class:
return mapper in given.with_polymorphic_mappers or given.mapper.isa(
mapper
)
elif given.with_polymorphic_mappers:
return mapper in given.with_polymorphic_mappers
else:
return given.isa(mapper)
def randomize_unitofwork():
"""Use random-ordering sets within the unit of work in order
to detect unit of work sorting issues.
This is a utility function that can be used to help reproduce
inconsistent unit of work sorting issues. For example,
if two kinds of objects A and B are being inserted, and
B has a foreign key reference to A - the A must be inserted first.
However, if there is no relationship between A and B, the unit of work
won't know to perform this sorting, and an operation may or may not
fail, depending on how the ordering works out. Since Python sets
and dictionaries have non-deterministic ordering, such an issue may
occur on some runs and not on others, and in practice it tends to
have a great dependence on the state of the interpreter. This leads
to so-called "heisenbugs" where changing entirely irrelevant aspects
of the test program still cause the failure behavior to change.
By calling ``randomize_unitofwork()`` when a script first runs, the
ordering of a key series of sets within the unit of work implementation
are randomized, so that the script can be minimized down to the
fundamental mapping and operation that's failing, while still reproducing
the issue on at least some runs.
This utility is also available when running the test suite via the
``--reversetop`` flag.
"""
from sqlalchemy.orm import unitofwork, session, mapper, dependency
from sqlalchemy.util import topological
from sqlalchemy.testing.util import RandomSet
topological.set = (
unitofwork.set
) = session.set = mapper.set = dependency.set = RandomSet
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/events.py
|
# orm/events.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""ORM event interfaces.
"""
import weakref
from . import instrumentation
from . import interfaces
from . import mapperlib
from .attributes import QueryableAttribute
from .base import _mapper_or_none
from .query import Query
from .scoping import scoped_session
from .session import Session
from .session import sessionmaker
from .. import event
from .. import exc
from .. import util
from ..util.compat import inspect_getfullargspec
class InstrumentationEvents(event.Events):
"""Events related to class instrumentation events.
The listeners here support being established against
any new style class, that is any object that is a subclass
of 'type'. Events will then be fired off for events
against that class. If the "propagate=True" flag is passed
to event.listen(), the event will fire off for subclasses
of that class as well.
The Python ``type`` builtin is also accepted as a target,
which when used has the effect of events being emitted
for all classes.
Note the "propagate" flag here is defaulted to ``True``,
unlike the other class level events where it defaults
to ``False``. This means that new subclasses will also
be the subject of these events, when a listener
is established on a superclass.
"""
_target_class_doc = "SomeBaseClass"
_dispatch_target = instrumentation.InstrumentationFactory
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
return _InstrumentationEventsHold(target)
else:
return None
@classmethod
def _listen(cls, event_key, propagate=True, **kw):
target, identifier, fn = (
event_key.dispatch_target,
event_key.identifier,
event_key._listen_fn,
)
def listen(target_cls, *arg):
listen_cls = target()
if propagate and issubclass(target_cls, listen_cls):
return fn(target_cls, *arg)
elif not propagate and target_cls is listen_cls:
return fn(target_cls, *arg)
def remove(ref):
key = event.registry._EventKey(
None,
identifier,
listen,
instrumentation._instrumentation_factory,
)
getattr(
instrumentation._instrumentation_factory.dispatch, identifier
).remove(key)
target = weakref.ref(target.class_, remove)
event_key.with_dispatch_target(
instrumentation._instrumentation_factory
).with_wrapper(listen).base_listen(**kw)
@classmethod
def _clear(cls):
super(InstrumentationEvents, cls)._clear()
instrumentation._instrumentation_factory.dispatch._clear()
def class_instrument(self, cls):
"""Called after the given class is instrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def class_uninstrument(self, cls):
"""Called before the given class is uninstrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def attribute_instrument(self, cls, key, inst):
"""Called when an attribute is instrumented."""
class _InstrumentationEventsHold(object):
"""temporary marker object used to transfer from _accept_with() to
_listen() on the InstrumentationEvents class.
"""
def __init__(self, class_):
self.class_ = class_
dispatch = event.dispatcher(InstrumentationEvents)
class InstanceEvents(event.Events):
"""Define events specific to object lifecycle.
e.g.::
from sqlalchemy import event
def my_load_listener(target, context):
print("on load!")
event.listen(SomeClass, 'load', my_load_listener)
Available targets include:
* mapped classes
* unmapped superclasses of mapped or to-be-mapped classes
(using the ``propagate=True`` flag)
* :class:`_orm.Mapper` objects
* the :class:`_orm.Mapper` class itself and the :func:`.mapper`
function indicate listening for all mappers.
Instance events are closely related to mapper events, but
are more specific to the instance and its instrumentation,
rather than its system of persistence.
When using :class:`.InstanceEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting classes as well as the
class which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param restore_load_context=False: Applies to the
:meth:`.InstanceEvents.load` and :meth:`.InstanceEvents.refresh`
events. Restores the loader context of the object when the event
hook is complete, so that ongoing eager load operations continue
to target the object appropriately. A warning is emitted if the
object is moved to a new loader context from within one of these
events if this flag is not set.
.. versionadded:: 1.3.14
"""
_target_class_doc = "SomeClass"
_dispatch_target = instrumentation.ClassManager
@classmethod
def _new_classmanager_instance(cls, class_, classmanager):
_InstanceEventsHold.populate(class_, classmanager)
@classmethod
@util.dependencies("sqlalchemy.orm")
def _accept_with(cls, orm, target):
if isinstance(target, instrumentation.ClassManager):
return target
elif isinstance(target, mapperlib.Mapper):
return target.class_manager
elif target is orm.mapper:
return instrumentation.ClassManager
elif isinstance(target, type):
if issubclass(target, mapperlib.Mapper):
return instrumentation.ClassManager
else:
manager = instrumentation.manager_of_class(target)
if manager:
return manager
else:
return _InstanceEventsHold(target)
return None
@classmethod
def _listen(
cls,
event_key,
raw=False,
propagate=False,
restore_load_context=False,
**kw
):
target, fn = (event_key.dispatch_target, event_key._listen_fn)
if not raw or restore_load_context:
def wrap(state, *arg, **kw):
if not raw:
target = state.obj()
else:
target = state
if restore_load_context:
runid = state.runid
try:
return fn(target, *arg, **kw)
finally:
if restore_load_context:
state.runid = runid
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(propagate=propagate, **kw)
if propagate:
for mgr in target.subclass_managers(True):
event_key.with_dispatch_target(mgr).base_listen(propagate=True)
@classmethod
def _clear(cls):
super(InstanceEvents, cls)._clear()
_InstanceEventsHold._clear()
def first_init(self, manager, cls):
"""Called when the first instance of a particular mapping is called.
This event is called when the ``__init__`` method of a class
is called the first time for that particular class. The event
invokes before ``__init__`` actually proceeds as well as before
the :meth:`.InstanceEvents.init` event is invoked.
"""
def init(self, target, args, kwargs):
"""Receive an instance when its constructor is called.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is
loaded from the database; see the :meth:`.InstanceEvents.load`
event in order to intercept a database load.
The event is called before the actual ``__init__`` constructor
of the object is called. The ``kwargs`` dictionary may be
modified in-place in order to affect what is passed to
``__init__``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments passed to the ``__init__`` method.
This is passed as a tuple and is currently immutable.
:param kwargs: keyword arguments passed to the ``__init__`` method.
This structure *can* be altered in place.
.. seealso::
:meth:`.InstanceEvents.init_failure`
:meth:`.InstanceEvents.load`
"""
def init_failure(self, target, args, kwargs):
"""Receive an instance when its constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is loaded
from the database.
The event is invoked after an exception raised by the ``__init__``
method is caught. After the event
is invoked, the original exception is re-raised outwards, so that
the construction of the object still raises an exception. The
actual exception and stack trace raised should be present in
``sys.exc_info()``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments that were passed to the ``__init__``
method.
:param kwargs: keyword arguments that were passed to the ``__init__``
method.
.. seealso::
:meth:`.InstanceEvents.init`
:meth:`.InstanceEvents.load`
"""
def load(self, target, context):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
.. warning::
During a result-row load, this event is invoked when the
first row received for this instance is processed. When using
eager loading with collection-oriented attributes, the additional
rows that are to be loaded / processed in order to load subsequent
collection items have not occurred yet. This has the effect
both that collections will not be fully loaded, as well as that
if an operation occurs within this event handler that emits
another database load operation for the object, the "loading
context" for the object can change and interfere with the
existing eager loaders still in progress.
Examples of what can cause the "loading context" to change within
the event handler include, but are not necessarily limited to:
* accessing deferred attributes that weren't part of the row,
will trigger an "undefer" operation and refresh the object
* accessing attributes on a joined-inheritance subclass that
weren't part of the row, will trigger a refresh operation.
As of SQLAlchemy 1.3.14, a warning is emitted when this occurs. The
:paramref:`.InstanceEvents.restore_load_context` option may be
used on the event to prevent this warning; this will ensure that
the existing loading context is maintained for the object after the
event is called::
@event.listens_for(
SomeClass, "load", restore_load_context=True)
def on_load(instance, context):
instance.some_unloaded_attribute
.. versionchanged:: 1.3.14 Added
:paramref:`.InstanceEvents.restore_load_context`
and :paramref:`.SessionEvents.restore_load_context` flags which
apply to "on load" events, which will ensure that the loading
context for an object is restored when the event hook is
complete; a warning is emitted if the load context of the object
changes without this flag being set.
The :meth:`.InstanceEvents.load` event is also available in a
class-method decorator format called :func:`_orm.reconstructor`.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`_query.Query` in progress. This argument may be
``None`` if the load does not correspond to a :class:`_query.Query`,
such as during :meth:`.Session.merge`.
.. seealso::
:meth:`.InstanceEvents.init`
:meth:`.InstanceEvents.refresh`
:meth:`.SessionEvents.loaded_as_persistent`
:ref:`mapping_constructors`
"""
def refresh(self, target, context, attrs):
"""Receive an object instance after one or more attributes have
been refreshed from a query.
Contrast this to the :meth:`.InstanceEvents.load` method, which
is invoked when the object is first loaded from a query.
.. note:: This event is invoked within the loader process before
eager loaders may have been completed, and the object's state may
not be complete. Additionally, invoking row-level refresh
operations on the object will place the object into a new loader
context, interfering with the existing load context. See the note
on :meth:`.InstanceEvents.load` for background on making use of the
:paramref:`.InstanceEvents.restore_load_context` parameter, in
order to resolve this scenario.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`_query.Query` in progress.
:param attrs: sequence of attribute names which
were populated, or None if all column-mapped, non-deferred
attributes were populated.
.. seealso::
:meth:`.InstanceEvents.load`
"""
def refresh_flush(self, target, flush_context, attrs):
"""Receive an object instance after one or more attributes that
contain a column-level default or onupdate handler have been refreshed
during persistence of the object's state.
This event is the same as :meth:`.InstanceEvents.refresh` except
it is invoked within the unit of work flush process, and includes
only non-primary-key columns that have column level default or
onupdate handlers, including Python callables as well as server side
defaults and triggers which may be fetched via the RETURNING clause.
.. note::
While the :meth:`.InstanceEvents.refresh_flush` event is triggered
for an object that was INSERTed as well as for an object that was
UPDATEd, the event is geared primarily towards the UPDATE process;
it is mostly an internal artifact that INSERT actions can also
trigger this event, and note that **primary key columns for an
INSERTed row are explicitly omitted** from this event. In order to
intercept the newly INSERTed state of an object, the
:meth:`.SessionEvents.pending_to_persistent` and
:meth:`.MapperEvents.after_insert` are better choices.
.. versionadded:: 1.0.5
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
:param attrs: sequence of attribute names which
were populated.
.. seealso::
:ref:`orm_server_defaults`
:ref:`metadata_defaults_toplevel`
"""
def expire(self, target, attrs):
"""Receive an object instance after its attributes or some subset
have been expired.
'keys' is a list of attribute names. If None, the entire
state was expired.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param attrs: sequence of attribute
names which were expired, or None if all attributes were
expired.
"""
def pickle(self, target, state_dict):
"""Receive an object instance when its associated state is
being pickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary returned by
:class:`.InstanceState.__getstate__`, containing the state
to be pickled.
"""
def unpickle(self, target, state_dict):
"""Receive an object instance after its associated state has
been unpickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary sent to
:class:`.InstanceState.__setstate__`, containing the state
dictionary which was pickled.
"""
class _EventsHold(event.RefCollection):
"""Hold onto listeners against unmapped, uninstrumented classes.
Establish _listen() for that class' mapper/instrumentation when
those objects are created for that class.
"""
def __init__(self, class_):
self.class_ = class_
@classmethod
def _clear(cls):
cls.all_holds.clear()
class HoldEvents(object):
_dispatch_target = None
@classmethod
def _listen(
cls, event_key, raw=False, propagate=False, retval=False, **kw
):
target = event_key.dispatch_target
if target.class_ in target.all_holds:
collection = target.all_holds[target.class_]
else:
collection = target.all_holds[target.class_] = {}
event.registry._stored_in_collection(event_key, target)
collection[event_key._key] = (event_key, raw, propagate, retval)
if propagate:
stack = list(target.class_.__subclasses__())
while stack:
subclass = stack.pop(0)
stack.extend(subclass.__subclasses__())
subject = target.resolve(subclass)
if subject is not None:
# we are already going through __subclasses__()
# so leave generic propagate flag False
event_key.with_dispatch_target(subject).listen(
raw=raw, propagate=False, retval=retval, **kw
)
def remove(self, event_key):
target = event_key.dispatch_target
if isinstance(target, _EventsHold):
collection = target.all_holds[target.class_]
del collection[event_key._key]
@classmethod
def populate(cls, class_, subject):
for subclass in class_.__mro__:
if subclass in cls.all_holds:
collection = cls.all_holds[subclass]
for event_key, raw, propagate, retval in collection.values():
if propagate or subclass is class_:
# since we can't be sure in what order different
# classes in a hierarchy are triggered with
# populate(), we rely upon _EventsHold for all event
# assignment, instead of using the generic propagate
# flag.
event_key.with_dispatch_target(subject).listen(
raw=raw, propagate=False, retval=retval
)
class _InstanceEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return instrumentation.manager_of_class(class_)
class HoldInstanceEvents(_EventsHold.HoldEvents, InstanceEvents):
pass
dispatch = event.dispatcher(HoldInstanceEvents)
class MapperEvents(event.Events):
"""Define events specific to mappings.
e.g.::
from sqlalchemy import event
def my_before_insert_listener(mapper, connection, target):
# execute a stored procedure upon INSERT,
# apply the value to the row to be inserted
target.calculated_value = connection.scalar(
"select my_special_function(%d)"
% target.special_number)
# associate the listener function with SomeClass,
# to execute during the "before_insert" hook
event.listen(
SomeClass, 'before_insert', my_before_insert_listener)
Available targets include:
* mapped classes
* unmapped superclasses of mapped or to-be-mapped classes
(using the ``propagate=True`` flag)
* :class:`_orm.Mapper` objects
* the :class:`_orm.Mapper` class itself and the :func:`.mapper`
function indicate listening for all mappers.
Mapper events provide hooks into critical sections of the
mapper, including those related to object instrumentation,
object loading, and object persistence. In particular, the
persistence methods :meth:`~.MapperEvents.before_insert`,
and :meth:`~.MapperEvents.before_update` are popular
places to augment the state being persisted - however, these
methods operate with several significant restrictions. The
user is encouraged to evaluate the
:meth:`.SessionEvents.before_flush` and
:meth:`.SessionEvents.after_flush` methods as more
flexible and user-friendly hooks in which to apply
additional database state during a flush.
When using :class:`.MapperEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting mappers and/or the mappers of
inheriting classes, as well as any
mapper which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event function
must have a return value, the purpose of which is either to
control subsequent event propagation, or to otherwise alter
the operation in progress by the mapper. Possible return
values are:
* ``sqlalchemy.orm.interfaces.EXT_CONTINUE`` - continue event
processing normally.
* ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent
event handlers in the chain.
* other values - the return value specified by specific listeners.
"""
_target_class_doc = "SomeClass"
_dispatch_target = mapperlib.Mapper
@classmethod
def _new_mapper_instance(cls, class_, mapper):
_MapperEventsHold.populate(class_, mapper)
@classmethod
@util.dependencies("sqlalchemy.orm")
def _accept_with(cls, orm, target):
if target is orm.mapper:
return mapperlib.Mapper
elif isinstance(target, type):
if issubclass(target, mapperlib.Mapper):
return target
else:
mapper = _mapper_or_none(target)
if mapper is not None:
return mapper
else:
return _MapperEventsHold(target)
else:
return target
@classmethod
def _listen(
cls, event_key, raw=False, retval=False, propagate=False, **kw
):
target, identifier, fn = (
event_key.dispatch_target,
event_key.identifier,
event_key._listen_fn,
)
if (
identifier in ("before_configured", "after_configured")
and target is not mapperlib.Mapper
):
util.warn(
"'before_configured' and 'after_configured' ORM events "
"only invoke with the mapper() function or Mapper class "
"as the target."
)
if not raw or not retval:
if not raw:
meth = getattr(cls, identifier)
try:
target_index = (
inspect_getfullargspec(meth)[0].index("target") - 1
)
except ValueError:
target_index = None
def wrap(*arg, **kw):
if not raw and target_index is not None:
arg = list(arg)
arg[target_index] = arg[target_index].obj()
if not retval:
fn(*arg, **kw)
return interfaces.EXT_CONTINUE
else:
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
if propagate:
for mapper in target.self_and_descendants:
event_key.with_dispatch_target(mapper).base_listen(
propagate=True, **kw
)
else:
event_key.base_listen(**kw)
@classmethod
def _clear(cls):
super(MapperEvents, cls)._clear()
_MapperEventsHold._clear()
def instrument_class(self, mapper, class_):
r"""Receive a class when the mapper is first constructed,
before instrumentation is applied to the mapped class.
This event is the earliest phase of mapper construction.
Most attributes of the mapper are not yet initialized.
This listener can either be applied to the :class:`_orm.Mapper`
class overall, or to any un-mapped class which serves as a base
for classes that will be mapped (using the ``propagate=True`` flag)::
Base = declarative_base()
@event.listens_for(Base, "instrument_class", propagate=True)
def on_new_class(mapper, cls_):
" ... "
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param class\_: the mapped class.
"""
def before_mapper_configured(self, mapper, class_):
"""Called right before a specific mapper is to be configured.
This event is intended to allow a specific mapper to be skipped during
the configure step, by returning the :attr:`.orm.interfaces.EXT_SKIP`
symbol which indicates to the :func:`.configure_mappers` call that this
particular mapper (or hierarchy of mappers, if ``propagate=True`` is
used) should be skipped in the current configuration run. When one or
more mappers are skipped, the he "new mappers" flag will remain set,
meaning the :func:`.configure_mappers` function will continue to be
called when mappers are used, to continue to try to configure all
available mappers.
In comparison to the other configure-level events,
:meth:`.MapperEvents.before_configured`,
:meth:`.MapperEvents.after_configured`, and
:meth:`.MapperEvents.mapper_configured`, the
:meth;`.MapperEvents.before_mapper_configured` event provides for a
meaningful return value when it is registered with the ``retval=True``
parameter.
.. versionadded:: 1.3
e.g.::
from sqlalchemy.orm import EXT_SKIP
Base = declarative_base()
DontConfigureBase = declarative_base()
@event.listens_for(
DontConfigureBase,
"before_mapper_configured", retval=True, propagate=True)
def dont_configure(mapper, cls):
return EXT_SKIP
.. seealso::
:meth:`.MapperEvents.before_configured`
:meth:`.MapperEvents.after_configured`
:meth:`.MapperEvents.mapper_configured`
"""
def mapper_configured(self, mapper, class_):
r"""Called when a specific mapper has completed its own configuration
within the scope of the :func:`.configure_mappers` call.
The :meth:`.MapperEvents.mapper_configured` event is invoked
for each mapper that is encountered when the
:func:`_orm.configure_mappers` function proceeds through the current
list of not-yet-configured mappers.
:func:`_orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
When the event is called, the mapper should be in its final
state, but **not including backrefs** that may be invoked from
other mappers; they might still be pending within the
configuration operation. Bidirectional relationships that
are instead configured via the
:paramref:`.orm.relationship.back_populates` argument
*will* be fully available, since this style of relationship does not
rely upon other possibly-not-configured mappers to know that they
exist.
For an event that is guaranteed to have **all** mappers ready
to go including backrefs that are defined only on other
mappings, use the :meth:`.MapperEvents.after_configured`
event; this event invokes only after all known mappings have been
fully configured.
The :meth:`.MapperEvents.mapper_configured` event, unlike
:meth:`.MapperEvents.before_configured` or
:meth:`.MapperEvents.after_configured`,
is called for each mapper/class individually, and the mapper is
passed to the event itself. It also is called exactly once for
a particular mapper. The event is therefore useful for
configurational steps that benefit from being invoked just once
on a specific mapper basis, which don't require that "backref"
configurations are necessarily ready yet.
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param class\_: the mapped class.
.. seealso::
:meth:`.MapperEvents.before_configured`
:meth:`.MapperEvents.after_configured`
:meth:`.MapperEvents.before_mapper_configured`
"""
# TODO: need coverage for this event
def before_configured(self):
"""Called before a series of mappers have been configured.
The :meth:`.MapperEvents.before_configured` event is invoked
each time the :func:`_orm.configure_mappers` function is
invoked, before the function has done any of its work.
:func:`_orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
This event can **only** be applied to the :class:`_orm.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
mapped classes. It is only invoked for all mappings as a whole::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "before_configured")
def go():
# ...
Contrast this event to :meth:`.MapperEvents.after_configured`,
which is invoked after the series of mappers has been configured,
as well as :meth:`.MapperEvents.before_mapper_configured`
and :meth:`.MapperEvents.mapper_configured`, which are both invoked
on a per-mapper basis.
Theoretically this event is called once per
application, but is actually called any time new mappers
are to be affected by a :func:`_orm.configure_mappers`
call. If new mappings are constructed after existing ones have
already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "before_configured", once=True)
def go():
# ...
.. versionadded:: 0.9.3
.. seealso::
:meth:`.MapperEvents.before_mapper_configured`
:meth:`.MapperEvents.mapper_configured`
:meth:`.MapperEvents.after_configured`
"""
def after_configured(self):
"""Called after a series of mappers have been configured.
The :meth:`.MapperEvents.after_configured` event is invoked
each time the :func:`_orm.configure_mappers` function is
invoked, after the function has completed its work.
:func:`_orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
Contrast this event to the :meth:`.MapperEvents.mapper_configured`
event, which is called on a per-mapper basis while the configuration
operation proceeds; unlike that event, when this event is invoked,
all cross-configurations (e.g. backrefs) will also have been made
available for any mappers that were pending.
Also contrast to :meth:`.MapperEvents.before_configured`,
which is invoked before the series of mappers has been configured.
This event can **only** be applied to the :class:`_orm.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
mapped classes. It is only invoked for all mappings as a whole::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "after_configured")
def go():
# ...
Theoretically this event is called once per
application, but is actually called any time new mappers
have been affected by a :func:`_orm.configure_mappers`
call. If new mappings are constructed after existing ones have
already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "after_configured", once=True)
def go():
# ...
.. seealso::
:meth:`.MapperEvents.before_mapper_configured`
:meth:`.MapperEvents.mapper_configured`
:meth:`.MapperEvents.before_configured`
"""
def before_insert(self, mapper, connection, target):
"""Receive an object instance before an INSERT statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class before their INSERT statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`_engine.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param connection: the :class:`_engine.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_insert(self, mapper, connection, target):
"""Receive an object instance after an INSERT statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class after their INSERT statements have been
emitted at once in a previous step. In the extremely
rare case that this is not desirable, the
:func:`.mapper` can be configured with ``batch=False``,
which will cause batches of instances to be broken up
into individual (and more poorly performing)
event->persist->event steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`_engine.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param connection: the :class:`_engine.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def before_update(self, mapper, connection, target):
"""Receive an object instance before an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.before_update` is
*not* a guarantee that an UPDATE statement will be
issued, although you can affect the outcome here by
modifying attributes so that a net change in value does
exist.
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class before their UPDATE statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`_engine.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param connection: the :class:`_engine.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_update(self, mapper, connection, target):
"""Receive an object instance after an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*, and for which
no UPDATE statement has proceeded. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.after_update` is
*not* a guarantee that an UPDATE statement has been
issued.
To detect if the column-based attributes on the object have net
changes, and therefore resulted in an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class after their UPDATE statements have been emitted at
once in a previous step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`_engine.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param connection: the :class:`_engine.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def before_delete(self, mapper, connection, target):
"""Receive an object instance before a DELETE statement
is emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class before their DELETE statements are emitted at
once in a later step.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`_engine.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param connection: the :class:`_engine.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_delete(self, mapper, connection, target):
"""Receive an object instance after a DELETE statement
has been emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class after their DELETE statements have been emitted at
once in a previous step.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`_engine.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`_orm.Mapper` which is the target
of this event.
:param connection: the :class:`_engine.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
class _MapperEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return _mapper_or_none(class_)
class HoldMapperEvents(_EventsHold.HoldEvents, MapperEvents):
pass
dispatch = event.dispatcher(HoldMapperEvents)
_sessionevents_lifecycle_event_names = set()
class SessionEvents(event.Events):
"""Define events specific to :class:`.Session` lifecycle.
e.g.::
from sqlalchemy import event
from sqlalchemy.orm import sessionmaker
def my_before_commit(session):
print("before commit!")
Session = sessionmaker()
event.listen(Session, "before_commit", my_before_commit)
The :func:`~.event.listen` function will accept
:class:`.Session` objects as well as the return result
of :class:`~.sessionmaker()` and :class:`~.scoped_session()`.
Additionally, it accepts the :class:`.Session` class which
will apply listeners to all :class:`.Session` instances
globally.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions that work on individual
objects will be the instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
.. versionadded:: 1.3.14
:param restore_load_context=False: Applies to the
:meth:`.SessionEvents.loaded_as_persistent` event. Restores the loader
context of the object when the event hook is complete, so that ongoing
eager load operations continue to target the object appropriately. A
warning is emitted if the object is moved to a new loader context from
within this event if this flag is not set.
.. versionadded:: 1.3.14
"""
_target_class_doc = "SomeSessionOrFactory"
_dispatch_target = Session
def _lifecycle_event(fn):
_sessionevents_lifecycle_event_names.add(fn.__name__)
return fn
@classmethod
def _accept_with(cls, target):
if isinstance(target, scoped_session):
target = target.session_factory
if not isinstance(target, sessionmaker) and (
not isinstance(target, type) or not issubclass(target, Session)
):
raise exc.ArgumentError(
"Session event listen on a scoped_session "
"requires that its creation callable "
"is associated with the Session class."
)
if isinstance(target, sessionmaker):
return target.class_
elif isinstance(target, type):
if issubclass(target, scoped_session):
return Session
elif issubclass(target, Session):
return target
elif isinstance(target, Session):
return target
else:
return None
@classmethod
def _listen(cls, event_key, raw=False, restore_load_context=False, **kw):
is_instance_event = (
event_key.identifier in _sessionevents_lifecycle_event_names
)
if is_instance_event:
if not raw or restore_load_context:
fn = event_key._listen_fn
def wrap(session, state, *arg, **kw):
if not raw:
target = state.obj()
if target is None:
# existing behavior is that if the object is
# garbage collected, no event is emitted
return
else:
target = state
if restore_load_context:
runid = state.runid
try:
return fn(session, target, *arg, **kw)
finally:
if restore_load_context:
state.runid = runid
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(**kw)
def after_transaction_create(self, session, transaction):
"""Execute when a new :class:`.SessionTransaction` is created.
This event differs from :meth:`~.SessionEvents.after_begin`
in that it occurs for each :class:`.SessionTransaction`
overall, as opposed to when transactions are begun
on individual database connections. It is also invoked
for nested transactions and subtransactions, and is always
matched by a corresponding
:meth:`~.SessionEvents.after_transaction_end` event
(assuming normal operation of the :class:`.Session`).
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
To detect if this is the outermost
:class:`.SessionTransaction`, as opposed to a "subtransaction" or a
SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
is ``None``::
@event.listens_for(session, "after_transaction_create")
def after_transaction_create(session, transaction):
if transaction.parent is None:
# work with top-level transaction
To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
:attr:`.SessionTransaction.nested` attribute::
@event.listens_for(session, "after_transaction_create")
def after_transaction_create(session, transaction):
if transaction.nested:
# work with SAVEPOINT transaction
.. seealso::
:class:`.SessionTransaction`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_transaction_end(self, session, transaction):
"""Execute when the span of a :class:`.SessionTransaction` ends.
This event differs from :meth:`~.SessionEvents.after_commit`
in that it corresponds to all :class:`.SessionTransaction`
objects in use, including those for nested transactions
and subtransactions, and is always matched by a corresponding
:meth:`~.SessionEvents.after_transaction_create` event.
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
To detect if this is the outermost
:class:`.SessionTransaction`, as opposed to a "subtransaction" or a
SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
is ``None``::
@event.listens_for(session, "after_transaction_create")
def after_transaction_end(session, transaction):
if transaction.parent is None:
# work with top-level transaction
To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
:attr:`.SessionTransaction.nested` attribute::
@event.listens_for(session, "after_transaction_create")
def after_transaction_end(session, transaction):
if transaction.nested:
# work with SAVEPOINT transaction
.. seealso::
:class:`.SessionTransaction`
:meth:`~.SessionEvents.after_transaction_create`
"""
def before_commit(self, session):
"""Execute before commit is called.
.. note::
The :meth:`~.SessionEvents.before_commit` hook is *not* per-flush,
that is, the :class:`.Session` can emit SQL to the database
many times within the scope of a transaction.
For interception of these events, use the
:meth:`~.SessionEvents.before_flush`,
:meth:`~.SessionEvents.after_flush`, or
:meth:`~.SessionEvents.after_flush_postexec`
events.
:param session: The target :class:`.Session`.
.. seealso::
:meth:`~.SessionEvents.after_commit`
:meth:`~.SessionEvents.after_begin`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_commit(self, session):
"""Execute after a commit has occurred.
.. note::
The :meth:`~.SessionEvents.after_commit` hook is *not* per-flush,
that is, the :class:`.Session` can emit SQL to the database
many times within the scope of a transaction.
For interception of these events, use the
:meth:`~.SessionEvents.before_flush`,
:meth:`~.SessionEvents.after_flush`, or
:meth:`~.SessionEvents.after_flush_postexec`
events.
.. note::
The :class:`.Session` is not in an active transaction
when the :meth:`~.SessionEvents.after_commit` event is invoked,
and therefore can not emit SQL. To emit SQL corresponding to
every transaction, use the :meth:`~.SessionEvents.before_commit`
event.
:param session: The target :class:`.Session`.
.. seealso::
:meth:`~.SessionEvents.before_commit`
:meth:`~.SessionEvents.after_begin`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_rollback(self, session):
"""Execute after a real DBAPI rollback has occurred.
Note that this event only fires when the *actual* rollback against
the database occurs - it does *not* fire each time the
:meth:`.Session.rollback` method is called, if the underlying
DBAPI transaction has already been rolled back. In many
cases, the :class:`.Session` will not be in
an "active" state during this event, as the current
transaction is not valid. To acquire a :class:`.Session`
which is active after the outermost rollback has proceeded,
use the :meth:`.SessionEvents.after_soft_rollback` event, checking the
:attr:`.Session.is_active` flag.
:param session: The target :class:`.Session`.
"""
def after_soft_rollback(self, session, previous_transaction):
"""Execute after any rollback has occurred, including "soft"
rollbacks that don't actually emit at the DBAPI level.
This corresponds to both nested and outer rollbacks, i.e.
the innermost rollback that calls the DBAPI's
rollback() method, as well as the enclosing rollback
calls that only pop themselves from the transaction stack.
The given :class:`.Session` can be used to invoke SQL and
:meth:`.Session.query` operations after an outermost rollback
by first checking the :attr:`.Session.is_active` flag::
@event.listens_for(Session, "after_soft_rollback")
def do_something(session, previous_transaction):
if session.is_active:
session.execute("select * from some_table")
:param session: The target :class:`.Session`.
:param previous_transaction: The :class:`.SessionTransaction`
transactional marker object which was just closed. The current
:class:`.SessionTransaction` for the given :class:`.Session` is
available via the :attr:`.Session.transaction` attribute.
"""
def before_flush(self, session, flush_context, instances):
"""Execute before flush process has started.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
:param instances: Usually ``None``, this is the collection of
objects which can be passed to the :meth:`.Session.flush` method
(note this usage is deprecated).
.. seealso::
:meth:`~.SessionEvents.after_flush`
:meth:`~.SessionEvents.after_flush_postexec`
:ref:`session_persistence_events`
"""
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes.
.. warning:: This event runs after the :class:`.Session` has emitted
SQL to modify the database, but **before** it has altered its
internal state to reflect those changes, including that newly
inserted objects are placed into the identity map. ORM operations
emitted within this event such as loads of related items
may produce new identity map entries that will immediately
be replaced, sometimes causing confusing results. SQLAlchemy will
emit a warning for this condition as of version 1.3.9.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
.. seealso::
:meth:`~.SessionEvents.before_flush`
:meth:`~.SessionEvents.after_flush_postexec`
:ref:`session_persistence_events`
"""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
.. seealso::
:meth:`~.SessionEvents.before_flush`
:meth:`~.SessionEvents.after_flush`
:ref:`session_persistence_events`
"""
def after_begin(self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
:param session: The target :class:`.Session`.
:param transaction: The :class:`.SessionTransaction`.
:param connection: The :class:`_engine.Connection` object
which will be used for SQL statements.
.. seealso::
:meth:`~.SessionEvents.before_commit`
:meth:`~.SessionEvents.after_commit`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
@_lifecycle_event
def before_attach(self, session, instance):
"""Execute before an instance is attached to a session.
This is called before an add, delete or merge causes
the object to be part of the session.
.. seealso::
:meth:`~.SessionEvents.after_attach`
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge.
.. note::
As of 0.8, this event fires off *after* the item
has been fully associated with the session, which is
different than previous releases. For event
handlers that require the object not yet
be part of session state (such as handlers which
may autoflush while the target object is not
yet complete) consider the
new :meth:`.before_attach` event.
.. seealso::
:meth:`~.SessionEvents.before_attach`
:ref:`session_lifecycle_events`
"""
@event._legacy_signature(
"0.9",
["session", "query", "query_context", "result"],
lambda update_context: (
update_context.session,
update_context.query,
update_context.context,
update_context.result,
),
)
def after_bulk_update(self, update_context):
"""Execute after a bulk update operation to the session.
This is called as a result of the :meth:`_query.Query.update` method.
:param update_context: an "update context" object which contains
details about the update, including these attributes:
* ``session`` - the :class:`.Session` involved
* ``query`` -the :class:`_query.Query`
object that this update operation
was called upon.
* ``values`` The "values" dictionary that was passed to
:meth:`_query.Query.update`.
* ``context`` The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
* ``result`` the :class:`_engine.ResultProxy`
returned as a result of the
bulk UPDATE operation.
.. seealso::
:meth:`.QueryEvents.before_compile_update`
:meth:`.SessionEvents.after_bulk_delete`
"""
@event._legacy_signature(
"0.9",
["session", "query", "query_context", "result"],
lambda delete_context: (
delete_context.session,
delete_context.query,
delete_context.context,
delete_context.result,
),
)
def after_bulk_delete(self, delete_context):
"""Execute after a bulk delete operation to the session.
This is called as a result of the :meth:`_query.Query.delete` method.
:param delete_context: a "delete context" object which contains
details about the update, including these attributes:
* ``session`` - the :class:`.Session` involved
* ``query`` -the :class:`_query.Query`
object that this update operation
was called upon.
* ``context`` The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
* ``result`` the :class:`_engine.ResultProxy`
returned as a result of the
bulk DELETE operation.
.. seealso::
:meth:`.QueryEvents.before_compile_delete`
:meth:`.SessionEvents.after_bulk_update`
"""
@_lifecycle_event
def transient_to_pending(self, session, instance):
"""Intercept the "transient to pending" transition for a specific object.
This event is a specialization of the
:meth:`.SessionEvents.after_attach` event which is only invoked
for this specific transition. It is invoked typically during the
:meth:`.Session.add` call.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def pending_to_transient(self, session, instance):
"""Intercept the "pending to transient" transition for a specific object.
This less common transition occurs when an pending object that has
not been flushed is evicted from the session; this can occur
when the :meth:`.Session.rollback` method rolls back the transaction,
or when the :meth:`.Session.expunge` method is used.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def persistent_to_transient(self, session, instance):
"""Intercept the "persistent to transient" transition for a specific object.
This less common transition occurs when an pending object that has
has been flushed is evicted from the session; this can occur
when the :meth:`.Session.rollback` method rolls back the transaction.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def pending_to_persistent(self, session, instance):
"""Intercept the "pending to persistent"" transition for a specific object.
This event is invoked within the flush process, and is
similar to scanning the :attr:`.Session.new` collection within
the :meth:`.SessionEvents.after_flush` event. However, in this
case the object has already been moved to the persistent state
when the event is called.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def detached_to_persistent(self, session, instance):
"""Intercept the "detached to persistent" transition for a specific object.
This event is a specialization of the
:meth:`.SessionEvents.after_attach` event which is only invoked
for this specific transition. It is invoked typically during the
:meth:`.Session.add` call, as well as during the
:meth:`.Session.delete` call if the object was not previously
associated with the
:class:`.Session` (note that an object marked as "deleted" remains
in the "persistent" state until the flush proceeds).
.. note::
If the object becomes persistent as part of a call to
:meth:`.Session.delete`, the object is **not** yet marked as
deleted when this event is called. To detect deleted objects,
check the ``deleted`` flag sent to the
:meth:`.SessionEvents.persistent_to_detached` to event after the
flush proceeds, or check the :attr:`.Session.deleted` collection
within the :meth:`.SessionEvents.before_flush` event if deleted
objects need to be intercepted before the flush.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def loaded_as_persistent(self, session, instance):
"""Intercept the "loaded as persistent" transition for a specific object.
This event is invoked within the ORM loading process, and is invoked
very similarly to the :meth:`.InstanceEvents.load` event. However,
the event here is linkable to a :class:`.Session` class or instance,
rather than to a mapper or class hierarchy, and integrates
with the other session lifecycle events smoothly. The object
is guaranteed to be present in the session's identity map when
this event is called.
.. note:: This event is invoked within the loader process before
eager loaders may have been completed, and the object's state may
not be complete. Additionally, invoking row-level refresh
operations on the object will place the object into a new loader
context, interfering with the existing load context. See the note
on :meth:`.InstanceEvents.load` for background on making use of the
:paramref:`.SessionEvents.restore_load_context` parameter, which
works in the same manner as that of
:paramref:`.InstanceEvents.restore_load_context`, in order to
resolve this scenario.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def persistent_to_deleted(self, session, instance):
"""Intercept the "persistent to deleted" transition for a specific object.
This event is invoked when a persistent object's identity
is deleted from the database within a flush, however the object
still remains associated with the :class:`.Session` until the
transaction completes.
If the transaction is rolled back, the object moves again
to the persistent state, and the
:meth:`.SessionEvents.deleted_to_persistent` event is called.
If the transaction is committed, the object becomes detached,
which will emit the :meth:`.SessionEvents.deleted_to_detached`
event.
Note that while the :meth:`.Session.delete` method is the primary
public interface to mark an object as deleted, many objects
get deleted due to cascade rules, which are not always determined
until flush time. Therefore, there's no way to catch
every object that will be deleted until the flush has proceeded.
the :meth:`.SessionEvents.persistent_to_deleted` event is therefore
invoked at the end of a flush.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def deleted_to_persistent(self, session, instance):
"""Intercept the "deleted to persistent" transition for a specific object.
This transition occurs only when an object that's been deleted
successfully in a flush is restored due to a call to
:meth:`.Session.rollback`. The event is not called under
any other circumstances.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def deleted_to_detached(self, session, instance):
"""Intercept the "deleted to detached" transition for a specific object.
This event is invoked when a deleted object is evicted
from the session. The typical case when this occurs is when
the transaction for a :class:`.Session` in which the object
was deleted is committed; the object moves from the deleted
state to the detached state.
It is also invoked for objects that were deleted in a flush
when the :meth:`.Session.expunge_all` or :meth:`.Session.close`
events are called, as well as if the object is individually
expunged from its deleted state via :meth:`.Session.expunge`.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
@_lifecycle_event
def persistent_to_detached(self, session, instance):
"""Intercept the "persistent to detached" transition for a specific object.
This event is invoked when a persistent object is evicted
from the session. There are many conditions that cause this
to happen, including:
* using a method such as :meth:`.Session.expunge`
or :meth:`.Session.close`
* Calling the :meth:`.Session.rollback` method, when the object
was part of an INSERT statement for that session's transaction
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
:param deleted: boolean. If True, indicates this object moved
to the detached state because it was marked as deleted and flushed.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
class AttributeEvents(event.Events):
r"""Define events for object attributes.
These are typically defined on the class-bound descriptor for the
target class.
e.g.::
from sqlalchemy import event
@event.listens_for(MyClass.collection, 'append', propagate=True)
def my_append_listener(target, value, initiator):
print("received append event for target: %s" % target)
Listeners have the option to return a possibly modified version of the
value, when the :paramref:`.AttributeEvents.retval` flag is passed to
:func:`.event.listen` or :func:`.event.listens_for`::
def validate_phone(target, value, oldvalue, initiator):
"Strip non-numeric characters from a phone number"
return re.sub(r'\D', '', value)
# setup listener on UserContact.phone attribute, instructing
# it to use the return value
listen(UserContact.phone, 'set', validate_phone, retval=True)
A validation function like the above can also raise an exception
such as :exc:`ValueError` to halt the operation.
The :paramref:`.AttributeEvents.propagate` flag is also important when
applying listeners to mapped classes that also have mapped subclasses,
as when using mapper inheritance patterns::
@event.listens_for(MySuperClass.attr, 'set', propagate=True)
def receive_set(target, value, initiator):
print("value set: %s" % target)
The full list of modifiers available to the :func:`.event.listen`
and :func:`.event.listens_for` functions are below.
:param active_history=False: When True, indicates that the
"set" event would like to receive the "old" value being
replaced unconditionally, even if this requires firing off
database loads. Note that ``active_history`` can also be
set directly via :func:`.column_property` and
:func:`_orm.relationship`.
:param propagate=False: When True, the listener function will
be established not just for the class attribute given, but
for attributes of the same name on all current subclasses
of that class, as well as all future subclasses of that
class, using an additional listener that listens for
instrumentation events.
:param raw=False: When True, the "target" argument to the
event will be the :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event
listening must return the "value" argument from the
function. This gives the listening function the opportunity
to change the value that is ultimately used for a "set"
or "append" event.
"""
_target_class_doc = "SomeClass.some_attribute"
_dispatch_target = QueryableAttribute
@staticmethod
def _set_dispatch(cls, dispatch_cls):
dispatch = event.Events._set_dispatch(cls, dispatch_cls)
dispatch_cls._active_history = False
return dispatch
@classmethod
def _accept_with(cls, target):
# TODO: coverage
if isinstance(target, interfaces.MapperProperty):
return getattr(target.parent.class_, target.key)
else:
return target
@classmethod
def _listen(
cls,
event_key,
active_history=False,
raw=False,
retval=False,
propagate=False,
):
target, fn = event_key.dispatch_target, event_key._listen_fn
if active_history:
target.dispatch._active_history = True
if not raw or not retval:
def wrap(target, *arg):
if not raw:
target = target.obj()
if not retval:
if arg:
value = arg[0]
else:
value = None
fn(target, *arg)
return value
else:
return fn(target, *arg)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(propagate=propagate)
if propagate:
manager = instrumentation.manager_of_class(target.class_)
for mgr in manager.subclass_managers(True):
event_key.with_dispatch_target(mgr[target.key]).base_listen(
propagate=True
)
if active_history:
mgr[target.key].dispatch._active_history = True
def append(self, target, value, initiator):
"""Receive a collection append event.
The append event is invoked for each element as it is appended
to the collection. This occurs for single-item appends as well
as for a "bulk replace" operation.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being appended. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation, as well as be inspected for information
about the source of the event.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
:meth:`.AttributeEvents.bulk_replace`
"""
def bulk_replace(self, target, values, initiator):
"""Receive a collection 'bulk replace' event.
This event is invoked for a sequence of values as they are incoming
to a bulk collection set operation, which can be
modified in place before the values are treated as ORM objects.
This is an "early hook" that runs before the bulk replace routine
attempts to reconcile which objects are already present in the
collection and which are being removed by the net replace operation.
It is typical that this method be combined with use of the
:meth:`.AttributeEvents.append` event. When using both of these
events, note that a bulk replace operation will invoke
the :meth:`.AttributeEvents.append` event for all new items,
even after :meth:`.AttributeEvents.bulk_replace` has been invoked
for the collection as a whole. In order to determine if an
:meth:`.AttributeEvents.append` event is part of a bulk replace,
use the symbol :attr:`~.attributes.OP_BULK_REPLACE` to test the
incoming initiator::
from sqlalchemy.orm.attributes import OP_BULK_REPLACE
@event.listens_for(SomeObject.collection, "bulk_replace")
def process_collection(target, values, initiator):
values[:] = [_make_value(value) for value in values]
@event.listens_for(SomeObject.collection, "append", retval=True)
def process_collection(target, value, initiator):
# make sure bulk_replace didn't already do it
if initiator is None or initiator.op is not OP_BULK_REPLACE:
return _make_value(value)
else:
return value
.. versionadded:: 1.2
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: a sequence (e.g. a list) of the values being set. The
handler can modify this list in place.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
"""
def remove(self, target, value, initiator):
"""Receive a collection remove event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being removed.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: No return value is defined for this event.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
"""
def set(self, target, value, oldvalue, initiator):
"""Receive a scalar set event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being set. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param oldvalue: the previous value being replaced. This
may also be the symbol ``NEVER_SET`` or ``NO_VALUE``.
If the listener is registered with ``active_history=True``,
the previous value of the attribute will be loaded from
the database if the existing value is currently unloaded
or expired.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
"""
def init_scalar(self, target, value, dict_):
r"""Receive a scalar "init" event.
This event is invoked when an uninitialized, unpersisted scalar
attribute is accessed, e.g. read::
x = my_object.some_attribute
The ORM's default behavior when this occurs for an un-initialized
attribute is to return the value ``None``; note this differs from
Python's usual behavior of raising ``AttributeError``. The
event here can be used to customize what value is actually returned,
with the assumption that the event listener would be mirroring
a default generator that is configured on the Core
:class:`_schema.Column`
object as well.
Since a default generator on a :class:`_schema.Column`
might also produce
a changing value such as a timestamp, the
:meth:`.AttributeEvents.init_scalar`
event handler can also be used to **set** the newly returned value, so
that a Core-level default generation function effectively fires off
only once, but at the moment the attribute is accessed on the
non-persisted object. Normally, no change to the object's state
is made when an uninitialized attribute is accessed (much older
SQLAlchemy versions did in fact change the object's state).
If a default generator on a column returned a particular constant,
a handler might be used as follows::
SOME_CONSTANT = 3.1415926
class MyClass(Base):
# ...
some_attribute = Column(Numeric, default=SOME_CONSTANT)
@event.listens_for(
MyClass.some_attribute, "init_scalar",
retval=True, propagate=True)
def _init_some_attribute(target, dict_, value):
dict_['some_attribute'] = SOME_CONSTANT
return SOME_CONSTANT
Above, we initialize the attribute ``MyClass.some_attribute`` to the
value of ``SOME_CONSTANT``. The above code includes the following
features:
* By setting the value ``SOME_CONSTANT`` in the given ``dict_``,
we indicate that this value is to be persisted to the database.
This supersedes the use of ``SOME_CONSTANT`` in the default generator
for the :class:`_schema.Column`. The ``active_column_defaults.py``
example given at :ref:`examples_instrumentation` illustrates using
the same approach for a changing default, e.g. a timestamp
generator. In this particular example, it is not strictly
necessary to do this since ``SOME_CONSTANT`` would be part of the
INSERT statement in either case.
* By establishing the ``retval=True`` flag, the value we return
from the function will be returned by the attribute getter.
Without this flag, the event is assumed to be a passive observer
and the return value of our function is ignored.
* The ``propagate=True`` flag is significant if the mapped class
includes inheriting subclasses, which would also make use of this
event listener. Without this flag, an inheriting subclass will
not use our event handler.
In the above example, the attribute set event
:meth:`.AttributeEvents.set` as well as the related validation feature
provided by :obj:`_orm.validates` is **not** invoked when we apply our
value to the given ``dict_``. To have these events to invoke in
response to our newly generated value, apply the value to the given
object as a normal attribute set operation::
SOME_CONSTANT = 3.1415926
@event.listens_for(
MyClass.some_attribute, "init_scalar",
retval=True, propagate=True)
def _init_some_attribute(target, dict_, value):
# will also fire off attribute set events
target.some_attribute = SOME_CONSTANT
return SOME_CONSTANT
When multiple listeners are set up, the generation of the value
is "chained" from one listener to the next by passing the value
returned by the previous listener that specifies ``retval=True``
as the ``value`` argument of the next listener.
.. versionadded:: 1.1
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value that is to be returned before this event
listener were invoked. This value begins as the value ``None``,
however will be the return value of the previous event handler
function if multiple listeners are present.
:param dict\_: the attribute dictionary of this mapped object.
This is normally the ``__dict__`` of the object, but in all cases
represents the destination that the attribute system uses to get
at the actual value of this attribute. Placing the value in this
dictionary has the effect that the value will be used in the
INSERT statement generated by the unit of work.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
:ref:`examples_instrumentation` - see the
``active_column_defaults.py`` example.
"""
def init_collection(self, target, collection, collection_adapter):
"""Receive a 'collection init' event.
This event is triggered for a collection-based attribute, when
the initial "empty collection" is first generated for a blank
attribute, as well as for when the collection is replaced with
a new one, such as via a set event.
E.g., given that ``User.addresses`` is a relationship-based
collection, the event is triggered here::
u1 = User()
u1.addresses.append(a1) # <- new collection
and also during replace operations::
u1.addresses = [a2, a3] # <- new collection
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param collection: the new collection. This will always be generated
from what was specified as
:paramref:`_orm.relationship.collection_class`, and will always
be empty.
:param collection_adapter: the :class:`.CollectionAdapter` that will
mediate internal access to the collection.
.. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` events supersede
the :class:`.orm.collection.linker` hook.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
"""
def dispose_collection(self, target, collection, collection_adapter):
"""Receive a 'collection dispose' event.
This event is triggered for a collection-based attribute when
a collection is replaced, that is::
u1.addresses.append(a1)
u1.addresses = [a2, a3] # <- old collection is disposed
The old collection received will contain its previous contents.
.. versionchanged:: 1.2 The collection passed to
:meth:`.AttributeEvents.dispose_collection` will now have its
contents before the dispose intact; previously, the collection
would be empty.
.. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` events supersede
the :class:`.collection.linker` hook.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
"""
def modified(self, target, initiator):
"""Receive a 'modified' event.
This event is triggered when the :func:`.attributes.flag_modified`
function is used to trigger a modify event on an attribute without
any specific value being set.
.. versionadded:: 1.2
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event.
.. seealso::
:class:`.AttributeEvents` - background on listener options such
as propagation to subclasses.
"""
class QueryEvents(event.Events):
"""Represent events within the construction of a :class:`_query.Query`
object.
The events here are intended to be used with an as-yet-unreleased
inspection system for :class:`_query.Query`. Some very basic operations
are possible now, however the inspection system is intended to allow
complex query manipulations to be automated.
.. versionadded:: 1.0.0
"""
_target_class_doc = "SomeQuery"
_dispatch_target = Query
def before_compile(self, query):
"""Receive the :class:`_query.Query`
object before it is composed into a
core :class:`_expression.Select` object.
This event is intended to allow changes to the query given::
@event.listens_for(Query, "before_compile", retval=True)
def no_deleted(query):
for desc in query.column_descriptions:
if desc['type'] is User:
entity = desc['entity']
query = query.filter(entity.deleted == False)
return query
The event should normally be listened with the ``retval=True``
parameter set, so that the modified query may be returned.
The :meth:`.QueryEvents.before_compile` event by default
will disallow "baked" queries from caching a query, if the event
hook returns a new :class:`_query.Query` object.
This affects both direct
use of the baked query extension as well as its operation within
lazy loaders and eager loaders for relationships. In order to
re-establish the query being cached, apply the event adding the
``bake_ok`` flag::
@event.listens_for(
Query, "before_compile", retval=True, bake_ok=True)
def my_event(query):
for desc in query.column_descriptions:
if desc['type'] is User:
entity = desc['entity']
query = query.filter(entity.deleted == False)
return query
When ``bake_ok`` is set to True, the event hook will only be invoked
once, and not called for subsequent invocations of a particular query
that is being cached.
.. versionadded:: 1.3.11 - added the "bake_ok" flag to the
:meth:`.QueryEvents.before_compile` event and disallowed caching via
the "baked" extension from occurring for event handlers that
return a new :class:`_query.Query` object if this flag is not set.
.. seealso::
:meth:`.QueryEvents.before_compile_update`
:meth:`.QueryEvents.before_compile_delete`
:ref:`baked_with_before_compile`
"""
def before_compile_update(self, query, update_context):
"""Allow modifications to the :class:`_query.Query` object within
:meth:`_query.Query.update`.
Like the :meth:`.QueryEvents.before_compile` event, if the event
is to be used to alter the :class:`_query.Query` object, it should
be configured with ``retval=True``, and the modified
:class:`_query.Query` object returned, as in ::
@event.listens_for(Query, "before_compile_update", retval=True)
def no_deleted(query, update_context):
for desc in query.column_descriptions:
if desc['type'] is User:
entity = desc['entity']
query = query.filter(entity.deleted == False)
update_context.values['timestamp'] = datetime.utcnow()
return query
The ``.values`` dictionary of the "update context" object can also
be modified in place as illustrated above.
:param query: a :class:`_query.Query` instance; this is also
the ``.query`` attribute of the given "update context"
object.
:param update_context: an "update context" object which is
the same kind of object as described in
:paramref:`.QueryEvents.after_bulk_update.update_context`.
The object has a ``.values`` attribute in an UPDATE context which is
the dictionary of parameters passed to :meth:`_query.Query.update`.
This
dictionary can be modified to alter the VALUES clause of the
resulting UPDATE statement.
.. versionadded:: 1.2.17
.. seealso::
:meth:`.QueryEvents.before_compile`
:meth:`.QueryEvents.before_compile_delete`
"""
def before_compile_delete(self, query, delete_context):
"""Allow modifications to the :class:`_query.Query` object within
:meth:`_query.Query.delete`.
Like the :meth:`.QueryEvents.before_compile` event, this event
should be configured with ``retval=True``, and the modified
:class:`_query.Query` object returned, as in ::
@event.listens_for(Query, "before_compile_delete", retval=True)
def no_deleted(query, delete_context):
for desc in query.column_descriptions:
if desc['type'] is User:
entity = desc['entity']
query = query.filter(entity.deleted == False)
return query
:param query: a :class:`_query.Query` instance; this is also
the ``.query`` attribute of the given "delete context"
object.
:param delete_context: a "delete context" object which is
the same kind of object as described in
:paramref:`.QueryEvents.after_bulk_delete.delete_context`.
.. versionadded:: 1.2.17
.. seealso::
:meth:`.QueryEvents.before_compile`
:meth:`.QueryEvents.before_compile_update`
"""
@classmethod
def _listen(cls, event_key, retval=False, bake_ok=False, **kw):
fn = event_key._listen_fn
if not retval:
def wrap(*arg, **kw):
if not retval:
query = arg[0]
fn(*arg, **kw)
return query
else:
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
else:
# don't assume we can apply an attribute to the callable
def wrap(*arg, **kw):
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
wrap._bake_ok = bake_ok
event_key.base_listen(**kw)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/mapper.py
|
# orm/mapper.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Logic to map Python classes to and from selectables.
Defines the :class:`~sqlalchemy.orm.mapper.Mapper` class, the central
configurational unit which associates a class with a database table.
This is a semi-private module; the main configurational API of the ORM is
available in :class:`~sqlalchemy.orm.`.
"""
from __future__ import absolute_import
from collections import deque
from itertools import chain
import sys
import types
import weakref
from . import attributes
from . import exc as orm_exc
from . import instrumentation
from . import loading
from . import properties
from . import util as orm_util
from .base import _class_to_mapper
from .base import _INSTRUMENTOR
from .base import _state_mapper
from .base import class_mapper
from .base import state_str
from .interfaces import _MappedAttribute
from .interfaces import EXT_SKIP
from .interfaces import InspectionAttr
from .interfaces import MapperProperty
from .path_registry import PathRegistry
from .. import event
from .. import exc as sa_exc
from .. import inspection
from .. import log
from .. import schema
from .. import sql
from .. import util
from ..sql import expression
from ..sql import operators
from ..sql import util as sql_util
from ..sql import visitors
_mapper_registry = weakref.WeakKeyDictionary()
_already_compiling = False
_memoized_configured_property = util.group_expirable_memoized_property()
# a constant returned by _get_attr_by_column to indicate
# this mapper is not handling an attribute for a particular
# column
NO_ATTRIBUTE = util.symbol("NO_ATTRIBUTE")
# lock used to synchronize the "mapper configure" step
_CONFIGURE_MUTEX = util.threading.RLock()
@inspection._self_inspects
@log.class_logger
class Mapper(InspectionAttr):
"""Define the correlation of class attributes to database table
columns.
The :class:`_orm.Mapper` object is instantiated using the
:func:`~sqlalchemy.orm.mapper` function. For information
about instantiating new :class:`_orm.Mapper` objects, see
that function's documentation.
When :func:`.mapper` is used
explicitly to link a user defined class with table
metadata, this is referred to as *classical mapping*.
Modern SQLAlchemy usage tends to favor the
:mod:`sqlalchemy.ext.declarative` extension for class
configuration, which
makes usage of :func:`.mapper` behind the scenes.
Given a particular class known to be mapped by the ORM,
the :class:`_orm.Mapper` which maintains it can be acquired
using the :func:`_sa.inspect` function::
from sqlalchemy import inspect
mapper = inspect(MyClass)
A class which was mapped by the :mod:`sqlalchemy.ext.declarative`
extension will also have its mapper available via the ``__mapper__``
attribute.
"""
_new_mappers = False
_dispose_called = False
@util.deprecated_params(
extension=(
"0.7",
":class:`.MapperExtension` is deprecated in favor of the "
":class:`.MapperEvents` listener interface. The "
":paramref:`.mapper.extension` parameter will be "
"removed in a future release.",
),
order_by=(
"1.1",
"The :paramref:`.mapper.order_by` parameter "
"is deprecated, and will be removed in a future release. "
"Use :meth:`_query.Query.order_by` "
"to determine the ordering of a "
"result set.",
),
non_primary=(
"1.3",
"The :paramref:`.mapper.non_primary` parameter is deprecated, "
"and will be removed in a future release. The functionality "
"of non primary mappers is now better suited using the "
":class:`.AliasedClass` construct, which can also be used "
"as the target of a :func:`_orm.relationship` in 1.3.",
),
)
def __init__(
self,
class_,
local_table=None,
properties=None,
primary_key=None,
non_primary=False,
inherits=None,
inherit_condition=None,
inherit_foreign_keys=None,
extension=None,
order_by=False,
always_refresh=False,
version_id_col=None,
version_id_generator=None,
polymorphic_on=None,
_polymorphic_map=None,
polymorphic_identity=None,
concrete=False,
with_polymorphic=None,
polymorphic_load=None,
allow_partial_pks=True,
batch=True,
column_prefix=None,
include_properties=None,
exclude_properties=None,
passive_updates=True,
passive_deletes=False,
confirm_deleted_rows=True,
eager_defaults=False,
legacy_is_orphan=False,
_compiled_cache_size=100,
):
r"""Return a new :class:`_orm.Mapper` object.
This function is typically used behind the scenes
via the Declarative extension. When using Declarative,
many of the usual :func:`.mapper` arguments are handled
by the Declarative extension itself, including ``class_``,
``local_table``, ``properties``, and ``inherits``.
Other options are passed to :func:`.mapper` using
the ``__mapper_args__`` class variable::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
type = Column(String(50))
alt = Column("some_alt", Integer)
__mapper_args__ = {
'polymorphic_on' : type
}
Explicit use of :func:`.mapper`
is often referred to as *classical mapping*. The above
declarative example is equivalent in classical form to::
my_table = Table("my_table", metadata,
Column('id', Integer, primary_key=True),
Column('type', String(50)),
Column("some_alt", Integer)
)
class MyClass(object):
pass
mapper(MyClass, my_table,
polymorphic_on=my_table.c.type,
properties={
'alt':my_table.c.some_alt
})
.. seealso::
:ref:`classical_mapping` - discussion of direct usage of
:func:`.mapper`
:param class\_: The class to be mapped. When using Declarative,
this argument is automatically passed as the declared class
itself.
:param local_table: The :class:`_schema.Table` or other selectable
to which the class is mapped. May be ``None`` if
this mapper inherits from another mapper using single-table
inheritance. When using Declarative, this argument is
automatically passed by the extension, based on what
is configured via the ``__table__`` argument or via the
:class:`_schema.Table`
produced as a result of the ``__tablename__``
and :class:`_schema.Column` arguments present.
:param always_refresh: If True, all query operations for this mapped
class will overwrite all data within object instances that already
exist within the session, erasing any in-memory changes with
whatever information was loaded from the database. Usage of this
flag is highly discouraged; as an alternative, see the method
:meth:`_query.Query.populate_existing`.
:param allow_partial_pks: Defaults to True. Indicates that a
composite primary key with some NULL values should be considered as
possibly existing within the database. This affects whether a
mapper will assign an incoming row to an existing identity, as well
as if :meth:`.Session.merge` will check the database first for a
particular primary key value. A "partial primary key" can occur if
one has mapped to an OUTER JOIN, for example.
:param batch: Defaults to ``True``, indicating that save operations
of multiple entities can be batched together for efficiency.
Setting to False indicates
that an instance will be fully saved before saving the next
instance. This is used in the extremely rare case that a
:class:`.MapperEvents` listener requires being called
in between individual row persistence operations.
:param column_prefix: A string which will be prepended
to the mapped attribute name when :class:`_schema.Column`
objects are automatically assigned as attributes to the
mapped class. Does not affect explicitly specified
column-based properties.
See the section :ref:`column_prefix` for an example.
:param concrete: If True, indicates this mapper should use concrete
table inheritance with its parent mapper.
See the section :ref:`concrete_inheritance` for an example.
:param confirm_deleted_rows: defaults to True; when a DELETE occurs
of one more rows based on specific primary keys, a warning is
emitted when the number of rows matched does not equal the number
of rows expected. This parameter may be set to False to handle the
case where database ON DELETE CASCADE rules may be deleting some of
those rows automatically. The warning may be changed to an
exception in a future release.
.. versionadded:: 0.9.4 - added
:paramref:`.mapper.confirm_deleted_rows` as well as conditional
matched row checking on delete.
:param eager_defaults: if True, the ORM will immediately fetch the
value of server-generated default values after an INSERT or UPDATE,
rather than leaving them as expired to be fetched on next access.
This can be used for event schemes where the server-generated values
are needed immediately before the flush completes. By default,
this scheme will emit an individual ``SELECT`` statement per row
inserted or updated, which note can add significant performance
overhead. However, if the
target database supports :term:`RETURNING`, the default values will
be returned inline with the INSERT or UPDATE statement, which can
greatly enhance performance for an application that needs frequent
access to just-generated server defaults.
.. seealso::
:ref:`orm_server_defaults`
.. versionchanged:: 0.9.0 The ``eager_defaults`` option can now
make use of :term:`RETURNING` for backends which support it.
:param exclude_properties: A list or set of string column names to
be excluded from mapping.
See :ref:`include_exclude_cols` for an example.
:param extension: A :class:`.MapperExtension` instance or
list of :class:`.MapperExtension` instances which will be applied
to all operations by this :class:`_orm.Mapper`.
:param include_properties: An inclusive list or set of string column
names to map.
See :ref:`include_exclude_cols` for an example.
:param inherits: A mapped class or the corresponding
:class:`_orm.Mapper`
of one indicating a superclass to which this :class:`_orm.Mapper`
should *inherit* from. The mapped class here must be a subclass
of the other mapper's class. When using Declarative, this argument
is passed automatically as a result of the natural class
hierarchy of the declared classes.
.. seealso::
:ref:`inheritance_toplevel`
:param inherit_condition: For joined table inheritance, a SQL
expression which will
define how the two tables are joined; defaults to a natural join
between the two tables.
:param inherit_foreign_keys: When ``inherit_condition`` is used and
the columns present are missing a :class:`_schema.ForeignKey`
configuration, this parameter can be used to specify which columns
are "foreign". In most cases can be left as ``None``.
:param legacy_is_orphan: Boolean, defaults to ``False``.
When ``True``, specifies that "legacy" orphan consideration
is to be applied to objects mapped by this mapper, which means
that a pending (that is, not persistent) object is auto-expunged
from an owning :class:`.Session` only when it is de-associated
from *all* parents that specify a ``delete-orphan`` cascade towards
this mapper. The new default behavior is that the object is
auto-expunged when it is de-associated with *any* of its parents
that specify ``delete-orphan`` cascade. This behavior is more
consistent with that of a persistent object, and allows behavior to
be consistent in more scenarios independently of whether or not an
orphanable object has been flushed yet or not.
See the change note and example at :ref:`legacy_is_orphan_addition`
for more detail on this change.
:param non_primary: Specify that this :class:`_orm.Mapper`
is in addition
to the "primary" mapper, that is, the one used for persistence.
The :class:`_orm.Mapper` created here may be used for ad-hoc
mapping of the class to an alternate selectable, for loading
only.
:paramref:`_orm.Mapper.non_primary` is not an often used option, but
is useful in some specific :func:`_orm.relationship` cases.
.. seealso::
:ref:`relationship_non_primary_mapper`
:param order_by: A single :class:`_schema.Column` or list of
:class:`_schema.Column`
objects for which selection operations should use as the default
ordering for entities. By default mappers have no pre-defined
ordering.
:param passive_deletes: Indicates DELETE behavior of foreign key
columns when a joined-table inheritance entity is being deleted.
Defaults to ``False`` for a base mapper; for an inheriting mapper,
defaults to ``False`` unless the value is set to ``True``
on the superclass mapper.
When ``True``, it is assumed that ON DELETE CASCADE is configured
on the foreign key relationships that link this mapper's table
to its superclass table, so that when the unit of work attempts
to delete the entity, it need only emit a DELETE statement for the
superclass table, and not this table.
When ``False``, a DELETE statement is emitted for this mapper's
table individually. If the primary key attributes local to this
table are unloaded, then a SELECT must be emitted in order to
validate these attributes; note that the primary key columns
of a joined-table subclass are not part of the "primary key" of
the object as a whole.
Note that a value of ``True`` is **always** forced onto the
subclass mappers; that is, it's not possible for a superclass
to specify passive_deletes without this taking effect for
all subclass mappers.
.. versionadded:: 1.1
.. seealso::
:ref:`passive_deletes` - description of similar feature as
used with :func:`_orm.relationship`
:paramref:`.mapper.passive_updates` - supporting ON UPDATE
CASCADE for joined-table inheritance mappers
:param passive_updates: Indicates UPDATE behavior of foreign key
columns when a primary key column changes on a joined-table
inheritance mapping. Defaults to ``True``.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will handle
propagation of an UPDATE from a source column to dependent columns
on joined-table rows.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The unit of work process will
emit an UPDATE statement for the dependent columns during a
primary key change.
.. seealso::
:ref:`passive_updates` - description of a similar feature as
used with :func:`_orm.relationship`
:paramref:`.mapper.passive_deletes` - supporting ON DELETE
CASCADE for joined-table inheritance mappers
:param polymorphic_load: Specifies "polymorphic loading" behavior
for a subclass in an inheritance hierarchy (joined and single
table inheritance only). Valid values are:
* "'inline'" - specifies this class should be part of the
"with_polymorphic" mappers, e.g. its columns will be included
in a SELECT query against the base.
* "'selectin'" - specifies that when instances of this class
are loaded, an additional SELECT will be emitted to retrieve
the columns specific to this subclass. The SELECT uses
IN to fetch multiple subclasses at once.
.. versionadded:: 1.2
.. seealso::
:ref:`with_polymorphic_mapper_config`
:ref:`polymorphic_selectin`
:param polymorphic_on: Specifies the column, attribute, or
SQL expression used to determine the target class for an
incoming row, when inheriting classes are present.
This value is commonly a :class:`_schema.Column` object that's
present in the mapped :class:`_schema.Table`::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":discriminator,
"polymorphic_identity":"employee"
}
It may also be specified
as a SQL expression, as in this example where we
use the :func:`.case` construct to provide a conditional
approach::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee"),
"polymorphic_identity":"employee"
}
It may also refer to any attribute
configured with :func:`.column_property`, or to the
string name of one::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
employee_type = column_property(
case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee")
)
__mapper_args__ = {
"polymorphic_on":employee_type,
"polymorphic_identity":"employee"
}
When setting ``polymorphic_on`` to reference an
attribute or expression that's not present in the
locally mapped :class:`_schema.Table`, yet the value
of the discriminator should be persisted to the database,
the value of the
discriminator is not automatically set on new
instances; this must be handled by the user,
either through manual means or via event listeners.
A typical approach to establishing such a listener
looks like::
from sqlalchemy import event
from sqlalchemy.orm import object_mapper
@event.listens_for(Employee, "init", propagate=True)
def set_identity(instance, *arg, **kw):
mapper = object_mapper(instance)
instance.discriminator = mapper.polymorphic_identity
Where above, we assign the value of ``polymorphic_identity``
for the mapped class to the ``discriminator`` attribute,
thus persisting the value to the ``discriminator`` column
in the database.
.. warning::
Currently, **only one discriminator column may be set**, typically
on the base-most class in the hierarchy. "Cascading" polymorphic
columns are not yet supported.
.. seealso::
:ref:`inheritance_toplevel`
:param polymorphic_identity: Specifies the value which
identifies this particular class as returned by the
column expression referred to by the ``polymorphic_on``
setting. As rows are received, the value corresponding
to the ``polymorphic_on`` column expression is compared
to this value, indicating which subclass should
be used for the newly reconstructed object.
:param properties: A dictionary mapping the string names of object
attributes to :class:`.MapperProperty` instances, which define the
persistence behavior of that attribute. Note that
:class:`_schema.Column`
objects present in
the mapped :class:`_schema.Table` are automatically placed into
``ColumnProperty`` instances upon mapping, unless overridden.
When using Declarative, this argument is passed automatically,
based on all those :class:`.MapperProperty` instances declared
in the declared class body.
:param primary_key: A list of :class:`_schema.Column`
objects which define
the primary key to be used against this mapper's selectable unit.
This is normally simply the primary key of the ``local_table``, but
can be overridden here.
:param version_id_col: A :class:`_schema.Column`
that will be used to keep a running version id of rows
in the table. This is used to detect concurrent updates or
the presence of stale data in a flush. The methodology is to
detect if an UPDATE statement does not match the last known
version id, a
:class:`~sqlalchemy.orm.exc.StaleDataError` exception is
thrown.
By default, the column must be of :class:`.Integer` type,
unless ``version_id_generator`` specifies an alternative version
generator.
.. seealso::
:ref:`mapper_version_counter` - discussion of version counting
and rationale.
:param version_id_generator: Define how new version ids should
be generated. Defaults to ``None``, which indicates that
a simple integer counting scheme be employed. To provide a custom
versioning scheme, provide a callable function of the form::
def generate_version(version):
return next_version
Alternatively, server-side versioning functions such as triggers,
or programmatic versioning schemes outside of the version id
generator may be used, by specifying the value ``False``.
Please see :ref:`server_side_version_counter` for a discussion
of important points when using this option.
.. versionadded:: 0.9.0 ``version_id_generator`` supports
server-side version number generation.
.. seealso::
:ref:`custom_version_counter`
:ref:`server_side_version_counter`
:param with_polymorphic: A tuple in the form ``(<classes>,
<selectable>)`` indicating the default style of "polymorphic"
loading, that is, which tables are queried at once. <classes> is
any single or list of mappers and/or classes indicating the
inherited classes that should be loaded at once. The special value
``'*'`` may be used to indicate all descending classes should be
loaded immediately. The second tuple argument <selectable>
indicates a selectable that will be used to query for multiple
classes.
.. seealso::
:ref:`with_polymorphic` - discussion of polymorphic querying
techniques.
"""
self.class_ = util.assert_arg_type(class_, type, "class_")
self.class_manager = None
self._primary_key_argument = util.to_list(primary_key)
self.non_primary = non_primary
if order_by is not False:
self.order_by = util.to_list(order_by)
else:
self.order_by = order_by
self.always_refresh = always_refresh
if isinstance(version_id_col, MapperProperty):
self.version_id_prop = version_id_col
self.version_id_col = None
else:
self.version_id_col = version_id_col
if version_id_generator is False:
self.version_id_generator = False
elif version_id_generator is None:
self.version_id_generator = lambda x: (x or 0) + 1
else:
self.version_id_generator = version_id_generator
self.concrete = concrete
self.single = False
self.inherits = inherits
self.local_table = local_table
self.inherit_condition = inherit_condition
self.inherit_foreign_keys = inherit_foreign_keys
self._init_properties = properties or {}
self._delete_orphans = []
self.batch = batch
self.eager_defaults = eager_defaults
self.column_prefix = column_prefix
self.polymorphic_on = expression._clause_element_as_expr(
polymorphic_on
)
self._dependency_processors = []
self.validators = util.immutabledict()
self.passive_updates = passive_updates
self.passive_deletes = passive_deletes
self.legacy_is_orphan = legacy_is_orphan
self._clause_adapter = None
self._requires_row_aliasing = False
self._inherits_equated_pairs = None
self._memoized_values = {}
self._compiled_cache_size = _compiled_cache_size
self._reconstructor = None
self._deprecated_extensions = util.to_list(extension or [])
self.allow_partial_pks = allow_partial_pks
if self.inherits and not self.concrete:
self.confirm_deleted_rows = False
else:
self.confirm_deleted_rows = confirm_deleted_rows
if isinstance(self.local_table, expression.SelectBase):
raise sa_exc.InvalidRequestError(
"When mapping against a select() construct, map against "
"an alias() of the construct instead."
"This because several databases don't allow a "
"SELECT from a subquery that does not have an alias."
)
self._set_with_polymorphic(with_polymorphic)
self.polymorphic_load = polymorphic_load
# our 'polymorphic identity', a string name that when located in a
# result set row indicates this Mapper should be used to construct
# the object instance for that row.
self.polymorphic_identity = polymorphic_identity
# a dictionary of 'polymorphic identity' names, associating those
# names with Mappers that will be used to construct object instances
# upon a select operation.
if _polymorphic_map is None:
self.polymorphic_map = {}
else:
self.polymorphic_map = _polymorphic_map
if include_properties is not None:
self.include_properties = util.to_set(include_properties)
else:
self.include_properties = None
if exclude_properties:
self.exclude_properties = util.to_set(exclude_properties)
else:
self.exclude_properties = None
self.configured = False
# prevent this mapper from being constructed
# while a configure_mappers() is occurring (and defer a
# configure_mappers() until construction succeeds)
_CONFIGURE_MUTEX.acquire()
try:
self.dispatch._events._new_mapper_instance(class_, self)
self._configure_inheritance()
self._configure_legacy_instrument_class()
self._configure_class_instrumentation()
self._configure_listeners()
self._configure_properties()
self._configure_polymorphic_setter()
self._configure_pks()
Mapper._new_mappers = True
self._log("constructed")
self._expire_memoizations()
finally:
_CONFIGURE_MUTEX.release()
# major attributes initialized at the classlevel so that
# they can be Sphinx-documented.
is_mapper = True
"""Part of the inspection API."""
represents_outer_join = False
@property
def mapper(self):
"""Part of the inspection API.
Returns self.
"""
return self
@property
def entity(self):
r"""Part of the inspection API.
Returns self.class\_.
"""
return self.class_
local_table = None
"""The :class:`expression.Selectable` which this :class:`_orm.Mapper`
manages.
Typically is an instance of :class:`_schema.Table` or
:class:`_expression.Alias`.
May also be ``None``.
The "local" table is the
selectable that the :class:`_orm.Mapper` is directly responsible for
managing from an attribute access and flush perspective. For
non-inheriting mappers, the local table is the same as the
"mapped" table. For joined-table inheritance mappers, local_table
will be the particular sub-table of the overall "join" which
this :class:`_orm.Mapper` represents. If this mapper is a
single-table inheriting mapper, local_table will be ``None``.
.. seealso::
:attr:`_orm.Mapper.persist_selectable`.
"""
persist_selectable = None
"""The :class:`expression.Selectable` to which this :class:`_orm.Mapper`
is mapped.
Typically an instance of :class:`_schema.Table`, :class:`_expression.Join`
, or
:class:`_expression.Alias`.
The :attr:`_orm.Mapper.persist_selectable` is separate from
:attr:`_orm.Mapper.selectable` in that the former represents columns
that are mapped on this class or its superclasses, whereas the
latter may be a "polymorphic" selectable that contains additional columns
which are in fact mapped on subclasses only.
"persist selectable" is the "thing the mapper writes to" and
"selectable" is the "thing the mapper selects from".
:attr:`_orm.Mapper.persist_selectable` is also separate from
:attr:`_orm.Mapper.local_table`, which represents the set of columns that
are locally mapped on this class directly.
.. seealso::
:attr:`_orm.Mapper.selectable`.
:attr:`_orm.Mapper.local_table`.
"""
inherits = None
"""References the :class:`_orm.Mapper` which this :class:`_orm.Mapper`
inherits from, if any.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
configured = None
"""Represent ``True`` if this :class:`_orm.Mapper` has been configured.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
.. seealso::
:func:`.configure_mappers`.
"""
concrete = None
"""Represent ``True`` if this :class:`_orm.Mapper` is a concrete
inheritance mapper.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
tables = None
"""An iterable containing the collection of :class:`_schema.Table` objects
which this :class:`_orm.Mapper` is aware of.
If the mapper is mapped to a :class:`_expression.Join`, or an
:class:`_expression.Alias`
representing a :class:`_expression.Select`, the individual
:class:`_schema.Table`
objects that comprise the full construct will be represented here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
primary_key = None
"""An iterable containing the collection of :class:`_schema.Column`
objects
which comprise the 'primary key' of the mapped table, from the
perspective of this :class:`_orm.Mapper`.
This list is against the selectable in
:attr:`_orm.Mapper.persist_selectable`.
In the case of inheriting mappers, some columns may be managed by a
superclass mapper. For example, in the case of a
:class:`_expression.Join`, the
primary key is determined by all of the primary key columns across all
tables referenced by the :class:`_expression.Join`.
The list is also not necessarily the same as the primary key column
collection associated with the underlying tables; the :class:`_orm.Mapper`
features a ``primary_key`` argument that can override what the
:class:`_orm.Mapper` considers as primary key columns.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_ = None
"""The Python class which this :class:`_orm.Mapper` maps.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_manager = None
"""The :class:`.ClassManager` which maintains event listeners
and class-bound descriptors for this :class:`_orm.Mapper`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
single = None
"""Represent ``True`` if this :class:`_orm.Mapper` is a single table
inheritance mapper.
:attr:`_orm.Mapper.local_table` will be ``None`` if this flag is set.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
non_primary = None
"""Represent ``True`` if this :class:`_orm.Mapper` is a "non-primary"
mapper, e.g. a mapper that is used only to select rows but not for
persistence management.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_on = None
"""The :class:`_schema.Column` or SQL expression specified as the
``polymorphic_on`` argument
for this :class:`_orm.Mapper`, within an inheritance scenario.
This attribute is normally a :class:`_schema.Column` instance but
may also be an expression, such as one derived from
:func:`.cast`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_map = None
"""A mapping of "polymorphic identity" identifiers mapped to
:class:`_orm.Mapper` instances, within an inheritance scenario.
The identifiers can be of any type which is comparable to the
type of column represented by :attr:`_orm.Mapper.polymorphic_on`.
An inheritance chain of mappers will all reference the same
polymorphic map object. The object is used to correlate incoming
result rows to target mappers.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_identity = None
"""Represent an identifier which is matched against the
:attr:`_orm.Mapper.polymorphic_on` column during result row loading.
Used only with inheritance, this object can be of any type which is
comparable to the type of column represented by
:attr:`_orm.Mapper.polymorphic_on`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
base_mapper = None
"""The base-most :class:`_orm.Mapper` in an inheritance chain.
In a non-inheriting scenario, this attribute will always be this
:class:`_orm.Mapper`. In an inheritance scenario, it references
the :class:`_orm.Mapper` which is parent to all other :class:`_orm.Mapper`
objects in the inheritance chain.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
columns = None
"""A collection of :class:`_schema.Column` or other scalar expression
objects maintained by this :class:`_orm.Mapper`.
The collection behaves the same as that of the ``c`` attribute on
any :class:`_schema.Table` object,
except that only those columns included in
this mapping are present, and are keyed based on the attribute name
defined in the mapping, not necessarily the ``key`` attribute of the
:class:`_schema.Column` itself. Additionally, scalar expressions mapped
by :func:`.column_property` are also present here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
validators = None
"""An immutable dictionary of attributes which have been decorated
using the :func:`_orm.validates` decorator.
The dictionary contains string attribute names as keys
mapped to the actual validation method.
"""
c = None
"""A synonym for :attr:`_orm.Mapper.columns`."""
@property
@util.deprecated("1.3", "Use .persist_selectable")
def mapped_table(self):
return self.persist_selectable
@util.memoized_property
def _path_registry(self):
return PathRegistry.per_mapper(self)
def _configure_inheritance(self):
"""Configure settings related to inheriting and/or inherited mappers
being present."""
# a set of all mappers which inherit from this one.
self._inheriting_mappers = util.WeakSequence()
if self.inherits:
if isinstance(self.inherits, type):
self.inherits = class_mapper(self.inherits, configure=False)
if not issubclass(self.class_, self.inherits.class_):
raise sa_exc.ArgumentError(
"Class '%s' does not inherit from '%s'"
% (self.class_.__name__, self.inherits.class_.__name__)
)
if self.non_primary != self.inherits.non_primary:
np = not self.non_primary and "primary" or "non-primary"
raise sa_exc.ArgumentError(
"Inheritance of %s mapper for class '%s' is "
"only allowed from a %s mapper"
% (np, self.class_.__name__, np)
)
# inherit_condition is optional.
if self.local_table is None:
self.local_table = self.inherits.local_table
self.persist_selectable = self.inherits.persist_selectable
self.single = True
elif self.local_table is not self.inherits.local_table:
if self.concrete:
self.persist_selectable = self.local_table
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
else:
if self.inherit_condition is None:
# figure out inherit condition from our table to the
# immediate table of the inherited mapper, not its
# full table which could pull in other stuff we don't
# want (allows test/inheritance.InheritTest4 to pass)
self.inherit_condition = sql_util.join_condition(
self.inherits.local_table, self.local_table
)
self.persist_selectable = sql.join(
self.inherits.persist_selectable,
self.local_table,
self.inherit_condition,
)
fks = util.to_set(self.inherit_foreign_keys)
self._inherits_equated_pairs = sql_util.criterion_as_pairs(
self.persist_selectable.onclause,
consider_as_foreign_keys=fks,
)
else:
self.persist_selectable = self.local_table
if self.polymorphic_identity is not None and not self.concrete:
self._identity_class = self.inherits._identity_class
else:
self._identity_class = self.class_
if self.version_id_col is None:
self.version_id_col = self.inherits.version_id_col
self.version_id_generator = self.inherits.version_id_generator
elif (
self.inherits.version_id_col is not None
and self.version_id_col is not self.inherits.version_id_col
):
util.warn(
"Inheriting version_id_col '%s' does not match inherited "
"version_id_col '%s' and will not automatically populate "
"the inherited versioning column. "
"version_id_col should only be specified on "
"the base-most mapper that includes versioning."
% (
self.version_id_col.description,
self.inherits.version_id_col.description,
)
)
if (
self.order_by is False
and not self.concrete
and self.inherits.order_by is not False
):
self.order_by = self.inherits.order_by
self.polymorphic_map = self.inherits.polymorphic_map
self.batch = self.inherits.batch
self.inherits._inheriting_mappers.append(self)
self.base_mapper = self.inherits.base_mapper
self.passive_updates = self.inherits.passive_updates
self.passive_deletes = (
self.inherits.passive_deletes or self.passive_deletes
)
self._all_tables = self.inherits._all_tables
if self.polymorphic_identity is not None:
if self.polymorphic_identity in self.polymorphic_map:
util.warn(
"Reassigning polymorphic association for identity %r "
"from %r to %r: Check for duplicate use of %r as "
"value for polymorphic_identity."
% (
self.polymorphic_identity,
self.polymorphic_map[self.polymorphic_identity],
self,
self.polymorphic_identity,
)
)
self.polymorphic_map[self.polymorphic_identity] = self
if self.polymorphic_load and self.concrete:
raise sa_exc.ArgumentError(
"polymorphic_load is not currently supported "
"with concrete table inheritance"
)
if self.polymorphic_load == "inline":
self.inherits._add_with_polymorphic_subclass(self)
elif self.polymorphic_load == "selectin":
pass
elif self.polymorphic_load is not None:
raise sa_exc.ArgumentError(
"unknown argument for polymorphic_load: %r"
% self.polymorphic_load
)
else:
self._all_tables = set()
self.base_mapper = self
self.persist_selectable = self.local_table
if self.polymorphic_identity is not None:
self.polymorphic_map[self.polymorphic_identity] = self
self._identity_class = self.class_
if self.persist_selectable is None:
raise sa_exc.ArgumentError(
"Mapper '%s' does not have a persist_selectable specified."
% self
)
def _set_with_polymorphic(self, with_polymorphic):
if with_polymorphic == "*":
self.with_polymorphic = ("*", None)
elif isinstance(with_polymorphic, (tuple, list)):
if isinstance(
with_polymorphic[0], util.string_types + (tuple, list)
):
self.with_polymorphic = with_polymorphic
else:
self.with_polymorphic = (with_polymorphic, None)
elif with_polymorphic is not None:
raise sa_exc.ArgumentError("Invalid setting for with_polymorphic")
else:
self.with_polymorphic = None
if isinstance(self.local_table, expression.SelectBase):
raise sa_exc.InvalidRequestError(
"When mapping against a select() construct, map against "
"an alias() of the construct instead."
"This because several databases don't allow a "
"SELECT from a subquery that does not have an alias."
)
if self.with_polymorphic and isinstance(
self.with_polymorphic[1], expression.SelectBase
):
self.with_polymorphic = (
self.with_polymorphic[0],
self.with_polymorphic[1].alias(),
)
if self.configured:
self._expire_memoizations()
def _add_with_polymorphic_subclass(self, mapper):
subcl = mapper.class_
if self.with_polymorphic is None:
self._set_with_polymorphic((subcl,))
elif self.with_polymorphic[0] != "*":
self._set_with_polymorphic(
(self.with_polymorphic[0] + (subcl,), self.with_polymorphic[1])
)
def _set_concrete_base(self, mapper):
"""Set the given :class:`_orm.Mapper` as the 'inherits' for this
:class:`_orm.Mapper`, assuming this :class:`_orm.Mapper` is concrete
and does not already have an inherits."""
assert self.concrete
assert not self.inherits
assert isinstance(mapper, Mapper)
self.inherits = mapper
self.inherits.polymorphic_map.update(self.polymorphic_map)
self.polymorphic_map = self.inherits.polymorphic_map
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
self.batch = self.inherits.batch
for mp in self.self_and_descendants:
mp.base_mapper = self.inherits.base_mapper
self.inherits._inheriting_mappers.append(self)
self.passive_updates = self.inherits.passive_updates
self._all_tables = self.inherits._all_tables
for key, prop in mapper._props.items():
if key not in self._props and not self._should_exclude(
key, key, local=False, column=None
):
self._adapt_inherited_property(key, prop, False)
def _set_polymorphic_on(self, polymorphic_on):
self.polymorphic_on = polymorphic_on
self._configure_polymorphic_setter(True)
def _configure_legacy_instrument_class(self):
if self.inherits:
self.dispatch._update(self.inherits.dispatch)
super_extensions = set(
chain(
*[
m._deprecated_extensions
for m in self.inherits.iterate_to_root()
]
)
)
else:
super_extensions = set()
for ext in self._deprecated_extensions:
if ext not in super_extensions:
ext._adapt_instrument_class(self, ext)
def _configure_listeners(self):
if self.inherits:
super_extensions = set(
chain(
*[
m._deprecated_extensions
for m in self.inherits.iterate_to_root()
]
)
)
else:
super_extensions = set()
for ext in self._deprecated_extensions:
if ext not in super_extensions:
ext._adapt_listener(self, ext)
def _configure_class_instrumentation(self):
"""If this mapper is to be a primary mapper (i.e. the
non_primary flag is not set), associate this Mapper with the
given class and entity name.
Subsequent calls to ``class_mapper()`` for the ``class_`` / ``entity``
name combination will return this mapper. Also decorate the
`__init__` method on the mapped class to include optional
auto-session attachment logic.
"""
manager = attributes.manager_of_class(self.class_)
if self.non_primary:
if not manager or not manager.is_mapped:
raise sa_exc.InvalidRequestError(
"Class %s has no primary mapper configured. Configure "
"a primary mapper first before setting up a non primary "
"Mapper." % self.class_
)
self.class_manager = manager
self._identity_class = manager.mapper._identity_class
_mapper_registry[self] = True
return
if manager is not None:
assert manager.class_ is self.class_
if manager.is_mapped:
raise sa_exc.ArgumentError(
"Class '%s' already has a primary mapper defined. "
"Use non_primary=True to "
"create a non primary Mapper. clear_mappers() will "
"remove *all* current mappers from all classes."
% self.class_
)
# else:
# a ClassManager may already exist as
# ClassManager.instrument_attribute() creates
# new managers for each subclass if they don't yet exist.
_mapper_registry[self] = True
# note: this *must be called before instrumentation.register_class*
# to maintain the documented behavior of instrument_class
self.dispatch.instrument_class(self, self.class_)
if manager is None:
manager = instrumentation.register_class(self.class_)
self.class_manager = manager
manager.mapper = self
manager.deferred_scalar_loader = util.partial(
loading.load_scalar_attributes, self
)
# The remaining members can be added by any mapper,
# e_name None or not.
if manager.info.get(_INSTRUMENTOR, False):
return
event.listen(manager, "first_init", _event_on_first_init, raw=True)
event.listen(manager, "init", _event_on_init, raw=True)
for key, method in util.iterate_attributes(self.class_):
if key == "__init__" and hasattr(method, "_sa_original_init"):
method = method._sa_original_init
if isinstance(method, types.MethodType):
method = method.im_func
if isinstance(method, types.FunctionType):
if hasattr(method, "__sa_reconstructor__"):
self._reconstructor = method
event.listen(manager, "load", _event_on_load, raw=True)
elif hasattr(method, "__sa_validators__"):
validation_opts = method.__sa_validation_opts__
for name in method.__sa_validators__:
if name in self.validators:
raise sa_exc.InvalidRequestError(
"A validation function for mapped "
"attribute %r on mapper %s already exists."
% (name, self)
)
self.validators = self.validators.union(
{name: (method, validation_opts)}
)
manager.info[_INSTRUMENTOR] = self
@classmethod
def _configure_all(cls):
"""Class-level path to the :func:`.configure_mappers` call.
"""
configure_mappers()
def dispose(self):
# Disable any attribute-based compilation.
self.configured = True
self._dispose_called = True
if hasattr(self, "_configure_failed"):
del self._configure_failed
if (
not self.non_primary
and self.class_manager is not None
and self.class_manager.is_mapped
and self.class_manager.mapper is self
):
instrumentation.unregister_class(self.class_)
def _configure_pks(self):
self.tables = sql_util.find_tables(self.persist_selectable)
self._pks_by_table = {}
self._cols_by_table = {}
all_cols = util.column_set(
chain(*[col.proxy_set for col in self._columntoproperty])
)
pk_cols = util.column_set(c for c in all_cols if c.primary_key)
# identify primary key columns which are also mapped by this mapper.
tables = set(self.tables + [self.persist_selectable])
self._all_tables.update(tables)
for t in tables:
if t.primary_key and pk_cols.issuperset(t.primary_key):
# ordering is important since it determines the ordering of
# mapper.primary_key (and therefore query.get())
self._pks_by_table[t] = util.ordered_column_set(
t.primary_key
).intersection(pk_cols)
self._cols_by_table[t] = util.ordered_column_set(t.c).intersection(
all_cols
)
# if explicit PK argument sent, add those columns to the
# primary key mappings
if self._primary_key_argument:
for k in self._primary_key_argument:
if k.table not in self._pks_by_table:
self._pks_by_table[k.table] = util.OrderedSet()
self._pks_by_table[k.table].add(k)
# otherwise, see that we got a full PK for the mapped table
elif (
self.persist_selectable not in self._pks_by_table
or len(self._pks_by_table[self.persist_selectable]) == 0
):
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'"
% (self, self.persist_selectable.description)
)
elif self.local_table not in self._pks_by_table and isinstance(
self.local_table, schema.Table
):
util.warn(
"Could not assemble any primary "
"keys for locally mapped table '%s' - "
"no rows will be persisted in this Table."
% self.local_table.description
)
if (
self.inherits
and not self.concrete
and not self._primary_key_argument
):
# if inheriting, the "primary key" for this mapper is
# that of the inheriting (unless concrete or explicit)
self.primary_key = self.inherits.primary_key
else:
# determine primary key from argument or persist_selectable pks -
# reduce to the minimal set of columns
if self._primary_key_argument:
primary_key = sql_util.reduce_columns(
[
self.persist_selectable.corresponding_column(c)
for c in self._primary_key_argument
],
ignore_nonexistent_tables=True,
)
else:
primary_key = sql_util.reduce_columns(
self._pks_by_table[self.persist_selectable],
ignore_nonexistent_tables=True,
)
if len(primary_key) == 0:
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'"
% (self, self.persist_selectable.description)
)
self.primary_key = tuple(primary_key)
self._log("Identified primary key columns: %s", primary_key)
# determine cols that aren't expressed within our tables; mark these
# as "read only" properties which are refreshed upon INSERT/UPDATE
self._readonly_props = set(
self._columntoproperty[col]
for col in self._columntoproperty
if self._columntoproperty[col] not in self._identity_key_props
and (
not hasattr(col, "table")
or col.table not in self._cols_by_table
)
)
def _configure_properties(self):
# Column and other ClauseElement objects which are mapped
self.columns = self.c = util.OrderedProperties()
# object attribute names mapped to MapperProperty objects
self._props = util.OrderedDict()
# table columns mapped to lists of MapperProperty objects
# using a list allows a single column to be defined as
# populating multiple object attributes
self._columntoproperty = _ColumnMapping(self)
# load custom properties
if self._init_properties:
for key, prop in self._init_properties.items():
self._configure_property(key, prop, False)
# pull properties from the inherited mapper if any.
if self.inherits:
for key, prop in self.inherits._props.items():
if key not in self._props and not self._should_exclude(
key, key, local=False, column=None
):
self._adapt_inherited_property(key, prop, False)
# create properties for each column in the mapped table,
# for those columns which don't already map to a property
for column in self.persist_selectable.columns:
if column in self._columntoproperty:
continue
column_key = (self.column_prefix or "") + column.key
if self._should_exclude(
column.key,
column_key,
local=self.local_table.c.contains_column(column),
column=column,
):
continue
# adjust the "key" used for this column to that
# of the inheriting mapper
for mapper in self.iterate_to_root():
if column in mapper._columntoproperty:
column_key = mapper._columntoproperty[column].key
self._configure_property(
column_key, column, init=False, setparent=True
)
def _configure_polymorphic_setter(self, init=False):
"""Configure an attribute on the mapper representing the
'polymorphic_on' column, if applicable, and not
already generated by _configure_properties (which is typical).
Also create a setter function which will assign this
attribute to the value of the 'polymorphic_identity'
upon instance construction, also if applicable. This
routine will run when an instance is created.
"""
setter = False
if self.polymorphic_on is not None:
setter = True
if isinstance(self.polymorphic_on, util.string_types):
# polymorphic_on specified as a string - link
# it to mapped ColumnProperty
try:
self.polymorphic_on = self._props[self.polymorphic_on]
except KeyError as err:
util.raise_(
sa_exc.ArgumentError(
"Can't determine polymorphic_on "
"value '%s' - no attribute is "
"mapped to this name." % self.polymorphic_on
),
replace_context=err,
)
if self.polymorphic_on in self._columntoproperty:
# polymorphic_on is a column that is already mapped
# to a ColumnProperty
prop = self._columntoproperty[self.polymorphic_on]
elif isinstance(self.polymorphic_on, MapperProperty):
# polymorphic_on is directly a MapperProperty,
# ensure it's a ColumnProperty
if not isinstance(
self.polymorphic_on, properties.ColumnProperty
):
raise sa_exc.ArgumentError(
"Only direct column-mapped "
"property or SQL expression "
"can be passed for polymorphic_on"
)
prop = self.polymorphic_on
elif not expression._is_column(self.polymorphic_on):
# polymorphic_on is not a Column and not a ColumnProperty;
# not supported right now.
raise sa_exc.ArgumentError(
"Only direct column-mapped "
"property or SQL expression "
"can be passed for polymorphic_on"
)
else:
# polymorphic_on is a Column or SQL expression and
# doesn't appear to be mapped. this means it can be 1.
# only present in the with_polymorphic selectable or
# 2. a totally standalone SQL expression which we'd
# hope is compatible with this mapper's persist_selectable
col = self.persist_selectable.corresponding_column(
self.polymorphic_on
)
if col is None:
# polymorphic_on doesn't derive from any
# column/expression isn't present in the mapped
# table. we will make a "hidden" ColumnProperty
# for it. Just check that if it's directly a
# schema.Column and we have with_polymorphic, it's
# likely a user error if the schema.Column isn't
# represented somehow in either persist_selectable or
# with_polymorphic. Otherwise as of 0.7.4 we
# just go with it and assume the user wants it
# that way (i.e. a CASE statement)
setter = False
instrument = False
col = self.polymorphic_on
if isinstance(col, schema.Column) and (
self.with_polymorphic is None
or self.with_polymorphic[1].corresponding_column(col)
is None
):
raise sa_exc.InvalidRequestError(
"Could not map polymorphic_on column "
"'%s' to the mapped table - polymorphic "
"loads will not function properly"
% col.description
)
else:
# column/expression that polymorphic_on derives from
# is present in our mapped table
# and is probably mapped, but polymorphic_on itself
# is not. This happens when
# the polymorphic_on is only directly present in the
# with_polymorphic selectable, as when use
# polymorphic_union.
# we'll make a separate ColumnProperty for it.
instrument = True
key = getattr(col, "key", None)
if key:
if self._should_exclude(col.key, col.key, False, col):
raise sa_exc.InvalidRequestError(
"Cannot exclude or override the "
"discriminator column %r" % col.key
)
else:
self.polymorphic_on = col = col.label("_sa_polymorphic_on")
key = col.key
prop = properties.ColumnProperty(col, _instrument=instrument)
self._configure_property(key, prop, init=init, setparent=True)
# the actual polymorphic_on should be the first public-facing
# column in the property
self.polymorphic_on = prop.columns[0]
polymorphic_key = prop.key
else:
# no polymorphic_on was set.
# check inheriting mappers for one.
for mapper in self.iterate_to_root():
# determine if polymorphic_on of the parent
# should be propagated here. If the col
# is present in our mapped table, or if our mapped
# table is the same as the parent (i.e. single table
# inheritance), we can use it
if mapper.polymorphic_on is not None:
if self.persist_selectable is mapper.persist_selectable:
self.polymorphic_on = mapper.polymorphic_on
else:
self.polymorphic_on = (
self.persist_selectable
).corresponding_column(mapper.polymorphic_on)
# we can use the parent mapper's _set_polymorphic_identity
# directly; it ensures the polymorphic_identity of the
# instance's mapper is used so is portable to subclasses.
if self.polymorphic_on is not None:
self._set_polymorphic_identity = (
mapper._set_polymorphic_identity
)
self._validate_polymorphic_identity = (
mapper._validate_polymorphic_identity
)
else:
self._set_polymorphic_identity = None
return
if setter:
def _set_polymorphic_identity(state):
dict_ = state.dict
state.get_impl(polymorphic_key).set(
state,
dict_,
state.manager.mapper.polymorphic_identity,
None,
)
def _validate_polymorphic_identity(mapper, state, dict_):
if (
polymorphic_key in dict_
and dict_[polymorphic_key]
not in mapper._acceptable_polymorphic_identities
):
util.warn_limited(
"Flushing object %s with "
"incompatible polymorphic identity %r; the "
"object may not refresh and/or load correctly",
(state_str(state), dict_[polymorphic_key]),
)
self._set_polymorphic_identity = _set_polymorphic_identity
self._validate_polymorphic_identity = (
_validate_polymorphic_identity
)
else:
self._set_polymorphic_identity = None
_validate_polymorphic_identity = None
@_memoized_configured_property
def _version_id_prop(self):
if self.version_id_col is not None:
return self._columntoproperty[self.version_id_col]
else:
return None
@_memoized_configured_property
def _acceptable_polymorphic_identities(self):
identities = set()
stack = deque([self])
while stack:
item = stack.popleft()
if item.persist_selectable is self.persist_selectable:
identities.add(item.polymorphic_identity)
stack.extend(item._inheriting_mappers)
return identities
@_memoized_configured_property
def _prop_set(self):
return frozenset(self._props.values())
def _adapt_inherited_property(self, key, prop, init):
if not self.concrete:
self._configure_property(key, prop, init=False, setparent=False)
elif key not in self._props:
# determine if the class implements this attribute; if not,
# or if it is implemented by the attribute that is handling the
# given superclass-mapped property, then we need to report that we
# can't use this at the instance level since we are a concrete
# mapper and we don't map this. don't trip user-defined
# descriptors that might have side effects when invoked.
implementing_attribute = self.class_manager._get_class_attr_mro(
key, prop
)
if implementing_attribute is prop or (
isinstance(
implementing_attribute, attributes.InstrumentedAttribute
)
and implementing_attribute._parententity is prop.parent
):
self._configure_property(
key,
properties.ConcreteInheritedProperty(),
init=init,
setparent=True,
)
def _configure_property(self, key, prop, init=True, setparent=True):
self._log("_configure_property(%s, %s)", key, prop.__class__.__name__)
if not isinstance(prop, MapperProperty):
prop = self._property_from_column(key, prop)
if isinstance(prop, properties.ColumnProperty):
col = self.persist_selectable.corresponding_column(prop.columns[0])
# if the column is not present in the mapped table,
# test if a column has been added after the fact to the
# parent table (or their parent, etc.) [ticket:1570]
if col is None and self.inherits:
path = [self]
for m in self.inherits.iterate_to_root():
col = m.local_table.corresponding_column(prop.columns[0])
if col is not None:
for m2 in path:
m2.persist_selectable._reset_exported()
col = self.persist_selectable.corresponding_column(
prop.columns[0]
)
break
path.append(m)
# subquery expression, column not present in the mapped
# selectable.
if col is None:
col = prop.columns[0]
# column is coming in after _readonly_props was
# initialized; check for 'readonly'
if hasattr(self, "_readonly_props") and (
not hasattr(col, "table")
or col.table not in self._cols_by_table
):
self._readonly_props.add(prop)
else:
# if column is coming in after _cols_by_table was
# initialized, ensure the col is in the right set
if (
hasattr(self, "_cols_by_table")
and col.table in self._cols_by_table
and col not in self._cols_by_table[col.table]
):
self._cols_by_table[col.table].add(col)
# if this properties.ColumnProperty represents the "polymorphic
# discriminator" column, mark it. We'll need this when rendering
# columns in SELECT statements.
if not hasattr(prop, "_is_polymorphic_discriminator"):
prop._is_polymorphic_discriminator = (
col is self.polymorphic_on
or prop.columns[0] is self.polymorphic_on
)
self.columns[key] = col
for col in prop.columns + prop._orig_columns:
for col in col.proxy_set:
self._columntoproperty[col] = prop
prop.key = key
if setparent:
prop.set_parent(self, init)
if key in self._props and getattr(
self._props[key], "_mapped_by_synonym", False
):
syn = self._props[key]._mapped_by_synonym
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" % (syn, key, key, syn)
)
if (
key in self._props
and not isinstance(prop, properties.ColumnProperty)
and not isinstance(
self._props[key],
(
properties.ColumnProperty,
properties.ConcreteInheritedProperty,
),
)
):
util.warn(
"Property %s on %s being replaced with new "
"property %s; the old property will be discarded"
% (self._props[key], self, prop)
)
oldprop = self._props[key]
self._path_registry.pop(oldprop, None)
self._props[key] = prop
if not self.non_primary:
prop.instrument_class(self)
for mapper in self._inheriting_mappers:
mapper._adapt_inherited_property(key, prop, init)
if init:
prop.init()
prop.post_instrument_class(self)
if self.configured:
self._expire_memoizations()
def _property_from_column(self, key, prop):
"""generate/update a :class:`.ColumnProprerty` given a
:class:`_schema.Column` object. """
# we were passed a Column or a list of Columns;
# generate a properties.ColumnProperty
columns = util.to_list(prop)
column = columns[0]
if not expression._is_column(column):
raise sa_exc.ArgumentError(
"%s=%r is not an instance of MapperProperty or Column"
% (key, prop)
)
prop = self._props.get(key, None)
if isinstance(prop, properties.ColumnProperty):
if (
(
not self._inherits_equated_pairs
or (prop.columns[0], column)
not in self._inherits_equated_pairs
)
and not prop.columns[0].shares_lineage(column)
and prop.columns[0] is not self.version_id_col
and column is not self.version_id_col
):
warn_only = prop.parent is not self
msg = (
"Implicitly combining column %s with column "
"%s under attribute '%s'. Please configure one "
"or more attributes for these same-named columns "
"explicitly." % (prop.columns[-1], column, key)
)
if warn_only:
util.warn(msg)
else:
raise sa_exc.InvalidRequestError(msg)
# existing properties.ColumnProperty from an inheriting
# mapper. make a copy and append our column to it
prop = prop.copy()
prop.columns.insert(0, column)
self._log(
"inserting column to existing list "
"in properties.ColumnProperty %s" % (key)
)
return prop
elif prop is None or isinstance(
prop, properties.ConcreteInheritedProperty
):
mapped_column = []
for c in columns:
mc = self.persist_selectable.corresponding_column(c)
if mc is None:
mc = self.local_table.corresponding_column(c)
if mc is not None:
# if the column is in the local table but not the
# mapped table, this corresponds to adding a
# column after the fact to the local table.
# [ticket:1523]
self.persist_selectable._reset_exported()
mc = self.persist_selectable.corresponding_column(c)
if mc is None:
raise sa_exc.ArgumentError(
"When configuring property '%s' on %s, "
"column '%s' is not represented in the mapper's "
"table. Use the `column_property()` function to "
"force this column to be mapped as a read-only "
"attribute." % (key, self, c)
)
mapped_column.append(mc)
return properties.ColumnProperty(*mapped_column)
else:
raise sa_exc.ArgumentError(
"WARNING: when configuring property '%s' on %s, "
"column '%s' conflicts with property '%r'. "
"To resolve this, map the column to the class under a "
"different name in the 'properties' dictionary. Or, "
"to remove all awareness of the column entirely "
"(including its availability as a foreign key), "
"use the 'include_properties' or 'exclude_properties' "
"mapper arguments to control specifically which table "
"columns get mapped." % (key, self, column.key, prop)
)
def _post_configure_properties(self):
"""Call the ``init()`` method on all ``MapperProperties``
attached to this mapper.
This is a deferred configuration step which is intended
to execute once all mappers have been constructed.
"""
self._log("_post_configure_properties() started")
l = [(key, prop) for key, prop in self._props.items()]
for key, prop in l:
self._log("initialize prop %s", key)
if prop.parent is self and not prop._configure_started:
prop.init()
if prop._configure_finished:
prop.post_instrument_class(self)
self._log("_post_configure_properties() complete")
self.configured = True
def add_properties(self, dict_of_properties):
"""Add the given dictionary of properties to this mapper,
using `add_property`.
"""
for key, value in dict_of_properties.items():
self.add_property(key, value)
def add_property(self, key, prop):
"""Add an individual MapperProperty to this mapper.
If the mapper has not been configured yet, just adds the
property to the initial properties dictionary sent to the
constructor. If this Mapper has already been configured, then
the given MapperProperty is configured immediately.
"""
self._init_properties[key] = prop
self._configure_property(key, prop, init=self.configured)
def _expire_memoizations(self):
for mapper in self.iterate_to_root():
_memoized_configured_property.expire_instance(mapper)
@property
def _log_desc(self):
return (
"("
+ self.class_.__name__
+ "|"
+ (
self.local_table is not None
and self.local_table.description
or str(self.local_table)
)
+ (self.non_primary and "|non-primary" or "")
+ ")"
)
def _log(self, msg, *args):
self.logger.info("%s " + msg, *((self._log_desc,) + args))
def _log_debug(self, msg, *args):
self.logger.debug("%s " + msg, *((self._log_desc,) + args))
def __repr__(self):
return "<Mapper at 0x%x; %s>" % (id(self), self.class_.__name__)
def __str__(self):
return "mapped class %s%s->%s" % (
self.class_.__name__,
self.non_primary and " (non-primary)" or "",
self.local_table.description
if self.local_table is not None
else self.persist_selectable.description,
)
def _is_orphan(self, state):
orphan_possible = False
for mapper in self.iterate_to_root():
for (key, cls) in mapper._delete_orphans:
orphan_possible = True
has_parent = attributes.manager_of_class(cls).has_parent(
state, key, optimistic=state.has_identity
)
if self.legacy_is_orphan and has_parent:
return False
elif not self.legacy_is_orphan and not has_parent:
return True
if self.legacy_is_orphan:
return orphan_possible
else:
return False
def has_property(self, key):
return key in self._props
def get_property(self, key, _configure_mappers=True):
"""return a MapperProperty associated with the given key.
"""
if _configure_mappers and Mapper._new_mappers:
configure_mappers()
try:
return self._props[key]
except KeyError as err:
util.raise_(
sa_exc.InvalidRequestError(
"Mapper '%s' has no property '%s'" % (self, key)
),
replace_context=err,
)
def get_property_by_column(self, column):
"""Given a :class:`_schema.Column` object, return the
:class:`.MapperProperty` which maps this column."""
return self._columntoproperty[column]
@property
def iterate_properties(self):
"""return an iterator of all MapperProperty objects."""
if Mapper._new_mappers:
configure_mappers()
return iter(self._props.values())
def _mappers_from_spec(self, spec, selectable):
"""given a with_polymorphic() argument, return the set of mappers it
represents.
Trims the list of mappers to just those represented within the given
selectable, if present. This helps some more legacy-ish mappings.
"""
if spec == "*":
mappers = list(self.self_and_descendants)
elif spec:
mappers = set()
for m in util.to_list(spec):
m = _class_to_mapper(m)
if not m.isa(self):
raise sa_exc.InvalidRequestError(
"%r does not inherit from %r" % (m, self)
)
if selectable is None:
mappers.update(m.iterate_to_root())
else:
mappers.add(m)
mappers = [m for m in self.self_and_descendants if m in mappers]
else:
mappers = []
if selectable is not None:
tables = set(
sql_util.find_tables(selectable, include_aliases=True)
)
mappers = [m for m in mappers if m.local_table in tables]
return mappers
def _selectable_from_mappers(self, mappers, innerjoin):
"""given a list of mappers (assumed to be within this mapper's
inheritance hierarchy), construct an outerjoin amongst those mapper's
mapped tables.
"""
from_obj = self.persist_selectable
for m in mappers:
if m is self:
continue
if m.concrete:
raise sa_exc.InvalidRequestError(
"'with_polymorphic()' requires 'selectable' argument "
"when concrete-inheriting mappers are used."
)
elif not m.single:
if innerjoin:
from_obj = from_obj.join(
m.local_table, m.inherit_condition
)
else:
from_obj = from_obj.outerjoin(
m.local_table, m.inherit_condition
)
return from_obj
@_memoized_configured_property
def _single_table_criterion(self):
if self.single and self.inherits and self.polymorphic_on is not None:
return self.polymorphic_on._annotate({"parentmapper": self}).in_(
m.polymorphic_identity for m in self.self_and_descendants
)
else:
return None
@_memoized_configured_property
def _with_polymorphic_mappers(self):
if Mapper._new_mappers:
configure_mappers()
if not self.with_polymorphic:
return []
return self._mappers_from_spec(*self.with_polymorphic)
@_memoized_configured_property
def _with_polymorphic_selectable(self):
if not self.with_polymorphic:
return self.persist_selectable
spec, selectable = self.with_polymorphic
if selectable is not None:
return selectable
else:
return self._selectable_from_mappers(
self._mappers_from_spec(spec, selectable), False
)
with_polymorphic_mappers = _with_polymorphic_mappers
"""The list of :class:`_orm.Mapper` objects included in the
default "polymorphic" query.
"""
@_memoized_configured_property
def _insert_cols_evaluating_none(self):
return dict(
(
table,
frozenset(
col for col in columns if col.type.should_evaluate_none
),
)
for table, columns in self._cols_by_table.items()
)
@_memoized_configured_property
def _insert_cols_as_none(self):
return dict(
(
table,
frozenset(
col.key
for col in columns
if not col.primary_key
and not col.server_default
and not col.default
and not col.type.should_evaluate_none
),
)
for table, columns in self._cols_by_table.items()
)
@_memoized_configured_property
def _propkey_to_col(self):
return dict(
(
table,
dict(
(self._columntoproperty[col].key, col) for col in columns
),
)
for table, columns in self._cols_by_table.items()
)
@_memoized_configured_property
def _pk_keys_by_table(self):
return dict(
(table, frozenset([col.key for col in pks]))
for table, pks in self._pks_by_table.items()
)
@_memoized_configured_property
def _pk_attr_keys_by_table(self):
return dict(
(
table,
frozenset([self._columntoproperty[col].key for col in pks]),
)
for table, pks in self._pks_by_table.items()
)
@_memoized_configured_property
def _server_default_cols(self):
return dict(
(
table,
frozenset(
[
col.key
for col in columns
if col.server_default is not None
]
),
)
for table, columns in self._cols_by_table.items()
)
@_memoized_configured_property
def _server_default_plus_onupdate_propkeys(self):
result = set()
for table, columns in self._cols_by_table.items():
for col in columns:
if (
col.server_default is not None
or col.server_onupdate is not None
) and col in self._columntoproperty:
result.add(self._columntoproperty[col].key)
return result
@_memoized_configured_property
def _server_onupdate_default_cols(self):
return dict(
(
table,
frozenset(
[
col.key
for col in columns
if col.server_onupdate is not None
]
),
)
for table, columns in self._cols_by_table.items()
)
@property
def selectable(self):
"""The :func:`_expression.select` construct this :class:`_orm.Mapper`
selects from
by default.
Normally, this is equivalent to :attr:`.persist_selectable`, unless
the ``with_polymorphic`` feature is in use, in which case the
full "polymorphic" selectable is returned.
"""
return self._with_polymorphic_selectable
def _with_polymorphic_args(
self, spec=None, selectable=False, innerjoin=False
):
if self.with_polymorphic:
if not spec:
spec = self.with_polymorphic[0]
if selectable is False:
selectable = self.with_polymorphic[1]
elif selectable is False:
selectable = None
mappers = self._mappers_from_spec(spec, selectable)
if selectable is not None:
return mappers, selectable
else:
return mappers, self._selectable_from_mappers(mappers, innerjoin)
@_memoized_configured_property
def _polymorphic_properties(self):
return list(
self._iterate_polymorphic_properties(
self._with_polymorphic_mappers
)
)
def _iterate_polymorphic_properties(self, mappers=None):
"""Return an iterator of MapperProperty objects which will render into
a SELECT."""
if mappers is None:
mappers = self._with_polymorphic_mappers
if not mappers:
for c in self.iterate_properties:
yield c
else:
# in the polymorphic case, filter out discriminator columns
# from other mappers, as these are sometimes dependent on that
# mapper's polymorphic selectable (which we don't want rendered)
for c in util.unique_list(
chain(
*[
list(mapper.iterate_properties)
for mapper in [self] + mappers
]
)
):
if getattr(c, "_is_polymorphic_discriminator", False) and (
self.polymorphic_on is None
or c.columns[0] is not self.polymorphic_on
):
continue
yield c
@_memoized_configured_property
def attrs(self):
"""A namespace of all :class:`.MapperProperty` objects
associated this mapper.
This is an object that provides each property based on
its key name. For instance, the mapper for a
``User`` class which has ``User.name`` attribute would
provide ``mapper.attrs.name``, which would be the
:class:`.ColumnProperty` representing the ``name``
column. The namespace object can also be iterated,
which would yield each :class:`.MapperProperty`.
:class:`_orm.Mapper` has several pre-filtered views
of this attribute which limit the types of properties
returned, including :attr:`.synonyms`, :attr:`.column_attrs`,
:attr:`.relationships`, and :attr:`.composites`.
.. warning::
The :attr:`_orm.Mapper.attrs` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.attrs[somename]`` over
``getattr(mapper.attrs, somename)`` to avoid name collisions.
.. seealso::
:attr:`_orm.Mapper.all_orm_descriptors`
"""
if Mapper._new_mappers:
configure_mappers()
return util.ImmutableProperties(self._props)
@_memoized_configured_property
def all_orm_descriptors(self):
"""A namespace of all :class:`.InspectionAttr` attributes associated
with the mapped class.
These attributes are in all cases Python :term:`descriptors`
associated with the mapped class or its superclasses.
This namespace includes attributes that are mapped to the class
as well as attributes declared by extension modules.
It includes any Python descriptor type that inherits from
:class:`.InspectionAttr`. This includes
:class:`.QueryableAttribute`, as well as extension types such as
:class:`.hybrid_property`, :class:`.hybrid_method` and
:class:`.AssociationProxy`.
To distinguish between mapped attributes and extension attributes,
the attribute :attr:`.InspectionAttr.extension_type` will refer
to a constant that distinguishes between different extension types.
When dealing with a :class:`.QueryableAttribute`, the
:attr:`.QueryableAttribute.property` attribute refers to the
:class:`.MapperProperty` property, which is what you get when
referring to the collection of mapped properties via
:attr:`_orm.Mapper.attrs`.
.. warning::
The :attr:`_orm.Mapper.all_orm_descriptors`
accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.all_orm_descriptors[somename]`` over
``getattr(mapper.all_orm_descriptors, somename)`` to avoid name
collisions.
.. seealso::
:attr:`_orm.Mapper.attrs`
"""
return util.ImmutableProperties(
dict(self.class_manager._all_sqla_attributes())
)
@_memoized_configured_property
def synonyms(self):
"""Return a namespace of all :class:`.SynonymProperty`
properties maintained by this :class:`_orm.Mapper`.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.SynonymProperty)
@_memoized_configured_property
def column_attrs(self):
"""Return a namespace of all :class:`.ColumnProperty`
properties maintained by this :class:`_orm.Mapper`.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.ColumnProperty)
@_memoized_configured_property
def relationships(self):
"""A namespace of all :class:`.RelationshipProperty` properties
maintained by this :class:`_orm.Mapper`.
.. warning::
the :attr:`_orm.Mapper.relationships` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.relationships[somename]`` over
``getattr(mapper.relationships, somename)`` to avoid name
collisions.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.RelationshipProperty)
@_memoized_configured_property
def composites(self):
"""Return a namespace of all :class:`.CompositeProperty`
properties maintained by this :class:`_orm.Mapper`.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.CompositeProperty)
def _filter_properties(self, type_):
if Mapper._new_mappers:
configure_mappers()
return util.ImmutableProperties(
util.OrderedDict(
(k, v) for k, v in self._props.items() if isinstance(v, type_)
)
)
@_memoized_configured_property
def _get_clause(self):
"""create a "get clause" based on the primary key. this is used
by query.get() and many-to-one lazyloads to load this item
by primary key.
"""
params = [
(primary_key, sql.bindparam(None, type_=primary_key.type))
for primary_key in self.primary_key
]
return (
sql.and_(*[k == v for (k, v) in params]),
util.column_dict(params),
)
@_memoized_configured_property
def _equivalent_columns(self):
"""Create a map of all equivalent columns, based on
the determination of column pairs that are equated to
one another based on inherit condition. This is designed
to work with the queries that util.polymorphic_union
comes up with, which often don't include the columns from
the base table directly (including the subclass table columns
only).
The resulting structure is a dictionary of columns mapped
to lists of equivalent columns, e.g.::
{
tablea.col1:
{tableb.col1, tablec.col1},
tablea.col2:
{tabled.col2}
}
"""
result = util.column_dict()
def visit_binary(binary):
if binary.operator == operators.eq:
if binary.left in result:
result[binary.left].add(binary.right)
else:
result[binary.left] = util.column_set((binary.right,))
if binary.right in result:
result[binary.right].add(binary.left)
else:
result[binary.right] = util.column_set((binary.left,))
for mapper in self.base_mapper.self_and_descendants:
if mapper.inherit_condition is not None:
visitors.traverse(
mapper.inherit_condition, {}, {"binary": visit_binary}
)
return result
def _is_userland_descriptor(self, obj):
if isinstance(
obj,
(
_MappedAttribute,
instrumentation.ClassManager,
expression.ColumnElement,
),
):
return False
else:
return True
def _should_exclude(self, name, assigned_name, local, column):
"""determine whether a particular property should be implicitly
present on the class.
This occurs when properties are propagated from an inherited class, or
are applied from the columns present in the mapped table.
"""
# check for class-bound attributes and/or descriptors,
# either local or from an inherited class
if local:
if self.class_.__dict__.get(
assigned_name, None
) is not None and self._is_userland_descriptor(
self.class_.__dict__[assigned_name]
):
return True
else:
attr = self.class_manager._get_class_attr_mro(assigned_name, None)
if attr is not None and self._is_userland_descriptor(attr):
return True
if (
self.include_properties is not None
and name not in self.include_properties
and (column is None or column not in self.include_properties)
):
self._log("not including property %s" % (name))
return True
if self.exclude_properties is not None and (
name in self.exclude_properties
or (column is not None and column in self.exclude_properties)
):
self._log("excluding property %s" % (name))
return True
return False
def common_parent(self, other):
"""Return true if the given mapper shares a
common inherited parent as this mapper."""
return self.base_mapper is other.base_mapper
def _canload(self, state, allow_subtypes):
s = self.primary_mapper()
if self.polymorphic_on is not None or allow_subtypes:
return _state_mapper(state).isa(s)
else:
return _state_mapper(state) is s
def isa(self, other):
"""Return True if the this mapper inherits from the given mapper."""
m = self
while m and m is not other:
m = m.inherits
return bool(m)
def iterate_to_root(self):
m = self
while m:
yield m
m = m.inherits
@_memoized_configured_property
def self_and_descendants(self):
"""The collection including this mapper and all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
"""
descendants = []
stack = deque([self])
while stack:
item = stack.popleft()
descendants.append(item)
stack.extend(item._inheriting_mappers)
return util.WeakSequence(descendants)
def polymorphic_iterator(self):
"""Iterate through the collection including this mapper and
all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
To iterate through an entire hierarchy, use
``mapper.base_mapper.polymorphic_iterator()``.
"""
return iter(self.self_and_descendants)
def primary_mapper(self):
"""Return the primary mapper corresponding to this mapper's class key
(class)."""
return self.class_manager.mapper
@property
def primary_base_mapper(self):
return self.class_manager.mapper.base_mapper
def _result_has_identity_key(self, result, adapter=None):
pk_cols = self.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
for col in pk_cols:
if not result._has_key(col):
return False
else:
return True
def identity_key_from_row(self, row, identity_token=None, adapter=None):
"""Return an identity-map key for use in storing/retrieving an
item from the identity map.
:param row: A :class:`.RowProxy` instance. The columns which are
mapped by this :class:`_orm.Mapper` should be locatable in the row,
preferably via the :class:`_schema.Column`
object directly (as is the case
when a :func:`_expression.select` construct is executed),
or via string names of
the form ``<tablename>_<colname>``.
"""
pk_cols = self.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
return (
self._identity_class,
tuple(row[column] for column in pk_cols),
identity_token,
)
def identity_key_from_primary_key(self, primary_key, identity_token=None):
"""Return an identity-map key for use in storing/retrieving an
item from an identity map.
:param primary_key: A list of values indicating the identifier.
"""
return self._identity_class, tuple(primary_key), identity_token
def identity_key_from_instance(self, instance):
"""Return the identity key for the given instance, based on
its primary key attributes.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
This value is typically also found on the instance state under the
attribute name `key`.
"""
state = attributes.instance_state(instance)
return self._identity_key_from_state(state, attributes.PASSIVE_OFF)
def _identity_key_from_state(
self, state, passive=attributes.PASSIVE_RETURN_NEVER_SET
):
dict_ = state.dict
manager = state.manager
return (
self._identity_class,
tuple(
[
manager[prop.key].impl.get(state, dict_, passive)
for prop in self._identity_key_props
]
),
state.identity_token,
)
def primary_key_from_instance(self, instance):
"""Return the list of primary key values for the given
instance.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
"""
state = attributes.instance_state(instance)
identity_key = self._identity_key_from_state(
state, attributes.PASSIVE_OFF
)
return identity_key[1]
@_memoized_configured_property
def _persistent_sortkey_fn(self):
key_fns = [col.type.sort_key_function for col in self.primary_key]
if set(key_fns).difference([None]):
def key(state):
return tuple(
key_fn(val) if key_fn is not None else val
for key_fn, val in zip(key_fns, state.key[1])
)
else:
def key(state):
return state.key[1]
return key
@_memoized_configured_property
def _identity_key_props(self):
return [self._columntoproperty[col] for col in self.primary_key]
@_memoized_configured_property
def _all_pk_props(self):
collection = set()
for table in self.tables:
collection.update(self._pks_by_table[table])
return collection
@_memoized_configured_property
def _should_undefer_in_wildcard(self):
cols = set(self.primary_key)
if self.polymorphic_on is not None:
cols.add(self.polymorphic_on)
return cols
@_memoized_configured_property
def _primary_key_propkeys(self):
return {prop.key for prop in self._all_pk_props}
def _get_state_attr_by_column(
self, state, dict_, column, passive=attributes.PASSIVE_RETURN_NEVER_SET
):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get(state, dict_, passive=passive)
def _set_committed_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set_committed_value(state, dict_, value)
def _set_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set(state, dict_, value, None)
def _get_committed_attr_by_column(self, obj, column):
state = attributes.instance_state(obj)
dict_ = attributes.instance_dict(obj)
return self._get_committed_state_attr_by_column(
state, dict_, column, passive=attributes.PASSIVE_OFF
)
def _get_committed_state_attr_by_column(
self, state, dict_, column, passive=attributes.PASSIVE_RETURN_NEVER_SET
):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get_committed_value(
state, dict_, passive=passive
)
def _optimized_get_statement(self, state, attribute_names):
"""assemble a WHERE clause which retrieves a given state by primary
key, using a minimized set of tables.
Applies to a joined-table inheritance mapper where the
requested attribute names are only present on joined tables,
not the base table. The WHERE clause attempts to include
only those tables to minimize joins.
"""
props = self._props
tables = set(
chain(
*[
sql_util.find_tables(c, check_columns=True)
for key in attribute_names
for c in props[key].columns
]
)
)
if self.base_mapper.local_table in tables:
return None
def visit_binary(binary):
leftcol = binary.left
rightcol = binary.right
if leftcol is None or rightcol is None:
return
if leftcol.table not in tables:
leftval = self._get_committed_state_attr_by_column(
state,
state.dict,
leftcol,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
if leftval in orm_util._none_set:
raise _OptGetColumnsNotAvailable()
binary.left = sql.bindparam(
None, leftval, type_=binary.right.type
)
elif rightcol.table not in tables:
rightval = self._get_committed_state_attr_by_column(
state,
state.dict,
rightcol,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
if rightval in orm_util._none_set:
raise _OptGetColumnsNotAvailable()
binary.right = sql.bindparam(
None, rightval, type_=binary.right.type
)
allconds = []
try:
start = False
for mapper in reversed(list(self.iterate_to_root())):
if mapper.local_table in tables:
start = True
elif not isinstance(
mapper.local_table, expression.TableClause
):
return None
if start and not mapper.single:
allconds.append(
visitors.cloned_traverse(
mapper.inherit_condition,
{},
{"binary": visit_binary},
)
)
except _OptGetColumnsNotAvailable:
return None
cond = sql.and_(*allconds)
cols = []
for key in attribute_names:
cols.extend(props[key].columns)
return sql.select(cols, cond, use_labels=True)
def _iterate_to_target_viawpoly(self, mapper):
if self.isa(mapper):
prev = self
for m in self.iterate_to_root():
yield m
if m is not prev and prev not in m._with_polymorphic_mappers:
break
prev = m
if m is mapper:
break
def _should_selectin_load(self, enabled_via_opt, polymorphic_from):
if not enabled_via_opt:
# common case, takes place for all polymorphic loads
mapper = polymorphic_from
for m in self._iterate_to_target_viawpoly(mapper):
if m.polymorphic_load == "selectin":
return m
else:
# uncommon case, selectin load options were used
enabled_via_opt = set(enabled_via_opt)
enabled_via_opt_mappers = {e.mapper: e for e in enabled_via_opt}
for entity in enabled_via_opt.union([polymorphic_from]):
mapper = entity.mapper
for m in self._iterate_to_target_viawpoly(mapper):
if (
m.polymorphic_load == "selectin"
or m in enabled_via_opt_mappers
):
return enabled_via_opt_mappers.get(m, m)
return None
@util.dependencies(
"sqlalchemy.ext.baked", "sqlalchemy.orm.strategy_options"
)
def _subclass_load_via_in(self, baked, strategy_options, entity):
"""Assemble a BakedQuery that can load the columns local to
this subclass as a SELECT with IN.
"""
assert self.inherits
polymorphic_prop = self._columntoproperty[self.polymorphic_on]
keep_props = set([polymorphic_prop] + self._identity_key_props)
disable_opt = strategy_options.Load(entity)
enable_opt = strategy_options.Load(entity)
for prop in self.attrs:
if prop.parent is self or prop in keep_props:
# "enable" options, to turn on the properties that we want to
# load by default (subject to options from the query)
enable_opt.set_generic_strategy(
(prop.key,), dict(prop.strategy_key)
)
else:
# "disable" options, to turn off the properties from the
# superclass that we *don't* want to load, applied after
# the options from the query to override them
disable_opt.set_generic_strategy(
(prop.key,), {"do_nothing": True}
)
if len(self.primary_key) > 1:
in_expr = sql.tuple_(*self.primary_key)
else:
in_expr = self.primary_key[0]
if entity.is_aliased_class:
assert entity.mapper is self
q = baked.BakedQuery(
self._compiled_cache,
lambda session: session.query(entity)
.select_entity_from(entity.selectable)
._adapt_all_clauses(),
(self,),
)
q.spoil()
else:
q = baked.BakedQuery(
self._compiled_cache,
lambda session: session.query(self),
(self,),
)
q += lambda q: q.filter(
in_expr.in_(sql.bindparam("primary_keys", expanding=True))
).order_by(*self.primary_key)
return q, enable_opt, disable_opt
@_memoized_configured_property
def _subclass_load_via_in_mapper(self):
return self._subclass_load_via_in(self)
def cascade_iterator(self, type_, state, halt_on=None):
r"""Iterate each element and its mapper in an object graph,
for all relationships that meet the given cascade rule.
:param type\_:
The name of the cascade rule (i.e. ``"save-update"``, ``"delete"``,
etc.).
.. note:: the ``"all"`` cascade is not accepted here. For a generic
object traversal function, see :ref:`faq_walk_objects`.
:param state:
The lead InstanceState. child items will be processed per
the relationships defined for this object's mapper.
:return: the method yields individual object instances.
.. seealso::
:ref:`unitofwork_cascades`
:ref:`faq_walk_objects` - illustrates a generic function to
traverse all objects without relying on cascades.
"""
visited_states = set()
prp, mpp = object(), object()
assert state.mapper.isa(self)
visitables = deque(
[(deque(state.mapper._props.values()), prp, state, state.dict)]
)
while visitables:
iterator, item_type, parent_state, parent_dict = visitables[-1]
if not iterator:
visitables.pop()
continue
if item_type is prp:
prop = iterator.popleft()
if type_ not in prop.cascade:
continue
queue = deque(
prop.cascade_iterator(
type_,
parent_state,
parent_dict,
visited_states,
halt_on,
)
)
if queue:
visitables.append((queue, mpp, None, None))
elif item_type is mpp:
(
instance,
instance_mapper,
corresponding_state,
corresponding_dict,
) = iterator.popleft()
yield (
instance,
instance_mapper,
corresponding_state,
corresponding_dict,
)
visitables.append(
(
deque(instance_mapper._props.values()),
prp,
corresponding_state,
corresponding_dict,
)
)
@_memoized_configured_property
def _compiled_cache(self):
return util.LRUCache(self._compiled_cache_size)
@_memoized_configured_property
def _sorted_tables(self):
table_to_mapper = {}
for mapper in self.base_mapper.self_and_descendants:
for t in mapper.tables:
table_to_mapper.setdefault(t, mapper)
extra_dependencies = []
for table, mapper in table_to_mapper.items():
super_ = mapper.inherits
if super_:
extra_dependencies.extend(
[(super_table, table) for super_table in super_.tables]
)
def skip(fk):
# attempt to skip dependencies that are not
# significant to the inheritance chain
# for two tables that are related by inheritance.
# while that dependency may be important, it's technically
# not what we mean to sort on here.
parent = table_to_mapper.get(fk.parent.table)
dep = table_to_mapper.get(fk.column.table)
if (
parent is not None
and dep is not None
and dep is not parent
and dep.inherit_condition is not None
):
cols = set(sql_util._find_columns(dep.inherit_condition))
if parent.inherit_condition is not None:
cols = cols.union(
sql_util._find_columns(parent.inherit_condition)
)
return fk.parent not in cols and fk.column not in cols
else:
return fk.parent not in cols
return False
sorted_ = sql_util.sort_tables(
table_to_mapper,
skip_fn=skip,
extra_dependencies=extra_dependencies,
)
ret = util.OrderedDict()
for t in sorted_:
ret[t] = table_to_mapper[t]
return ret
def _memo(self, key, callable_):
if key in self._memoized_values:
return self._memoized_values[key]
else:
self._memoized_values[key] = value = callable_()
return value
@util.memoized_property
def _table_to_equated(self):
"""memoized map of tables to collections of columns to be
synchronized upwards to the base mapper."""
result = util.defaultdict(list)
for table in self._sorted_tables:
cols = set(table.c)
for m in self.iterate_to_root():
if m._inherits_equated_pairs and cols.intersection(
util.reduce(
set.union,
[l.proxy_set for l, r in m._inherits_equated_pairs],
)
):
result[table].append((m, m._inherits_equated_pairs))
return result
class _OptGetColumnsNotAvailable(Exception):
pass
def configure_mappers():
"""Initialize the inter-mapper relationships of all mappers that
have been constructed thus far.
This function can be called any number of times, but in
most cases is invoked automatically, the first time mappings are used,
as well as whenever mappings are used and additional not-yet-configured
mappers have been constructed.
Points at which this occur include when a mapped class is instantiated
into an instance, as well as when the :meth:`.Session.query` method
is used.
The :func:`.configure_mappers` function provides several event hooks
that can be used to augment its functionality. These methods include:
* :meth:`.MapperEvents.before_configured` - called once before
:func:`.configure_mappers` does any work; this can be used to establish
additional options, properties, or related mappings before the operation
proceeds.
* :meth:`.MapperEvents.mapper_configured` - called as each individual
:class:`_orm.Mapper` is configured within the process; will include all
mapper state except for backrefs set up by other mappers that are still
to be configured.
* :meth:`.MapperEvents.after_configured` - called once after
:func:`.configure_mappers` is complete; at this stage, all
:class:`_orm.Mapper` objects that are known to SQLAlchemy will be fully
configured. Note that the calling application may still have other
mappings that haven't been produced yet, such as if they are in modules
as yet unimported.
"""
if not Mapper._new_mappers:
return
_CONFIGURE_MUTEX.acquire()
try:
global _already_compiling
if _already_compiling:
return
_already_compiling = True
try:
# double-check inside mutex
if not Mapper._new_mappers:
return
has_skip = False
Mapper.dispatch._for_class(Mapper).before_configured()
# initialize properties on all mappers
# note that _mapper_registry is unordered, which
# may randomly conceal/reveal issues related to
# the order of mapper compilation
for mapper in list(_mapper_registry):
run_configure = None
for fn in mapper.dispatch.before_mapper_configured:
run_configure = fn(mapper, mapper.class_)
if run_configure is EXT_SKIP:
has_skip = True
break
if run_configure is EXT_SKIP:
continue
if getattr(mapper, "_configure_failed", False):
e = sa_exc.InvalidRequestError(
"One or more mappers failed to initialize - "
"can't proceed with initialization of other "
"mappers. Triggering mapper: '%s'. "
"Original exception was: %s"
% (mapper, mapper._configure_failed)
)
e._configure_failed = mapper._configure_failed
raise e
if not mapper.configured:
try:
mapper._post_configure_properties()
mapper._expire_memoizations()
mapper.dispatch.mapper_configured(
mapper, mapper.class_
)
except Exception:
exc = sys.exc_info()[1]
if not hasattr(exc, "_configure_failed"):
mapper._configure_failed = exc
raise
if not has_skip:
Mapper._new_mappers = False
finally:
_already_compiling = False
finally:
_CONFIGURE_MUTEX.release()
Mapper.dispatch._for_class(Mapper).after_configured()
def reconstructor(fn):
"""Decorate a method as the 'reconstructor' hook.
Designates a method as the "reconstructor", an ``__init__``-like
method that will be called by the ORM after the instance has been
loaded from the database or otherwise reconstituted.
The reconstructor will be invoked with no arguments. Scalar
(non-collection) database-mapped attributes of the instance will
be available for use within the function. Eagerly-loaded
collections are generally not yet available and will usually only
contain the first element. ORM state changes made to objects at
this stage will not be recorded for the next flush() operation, so
the activity within a reconstructor should be conservative.
.. seealso::
:ref:`mapping_constructors`
:meth:`.InstanceEvents.load`
"""
fn.__sa_reconstructor__ = True
return fn
def validates(*names, **kw):
r"""Decorate a method as a 'validator' for one or more named properties.
Designates a method as a validator, a method which receives the
name of the attribute as well as a value to be assigned, or in the
case of a collection, the value to be added to the collection.
The function can then raise validation exceptions to halt the
process from continuing (where Python's built-in ``ValueError``
and ``AssertionError`` exceptions are reasonable choices), or can
modify or replace the value before proceeding. The function should
otherwise return the given value.
Note that a validator for a collection **cannot** issue a load of that
collection within the validation routine - this usage raises
an assertion to avoid recursion overflows. This is a reentrant
condition which is not supported.
:param \*names: list of attribute names to be validated.
:param include_removes: if True, "remove" events will be
sent as well - the validation function must accept an additional
argument "is_remove" which will be a boolean.
:param include_backrefs: defaults to ``True``; if ``False``, the
validation function will not emit if the originator is an attribute
event related via a backref. This can be used for bi-directional
:func:`.validates` usage where only one validator should emit per
attribute operation.
.. versionadded:: 0.9.0
.. seealso::
:ref:`simple_validators` - usage examples for :func:`.validates`
"""
include_removes = kw.pop("include_removes", False)
include_backrefs = kw.pop("include_backrefs", True)
def wrap(fn):
fn.__sa_validators__ = names
fn.__sa_validation_opts__ = {
"include_removes": include_removes,
"include_backrefs": include_backrefs,
}
return fn
return wrap
def _event_on_load(state, ctx):
instrumenting_mapper = state.manager.info[_INSTRUMENTOR]
if instrumenting_mapper._reconstructor:
instrumenting_mapper._reconstructor(state.obj())
def _event_on_first_init(manager, cls):
"""Initial mapper compilation trigger.
instrumentation calls this one when InstanceState
is first generated, and is needed for legacy mutable
attributes to work.
"""
instrumenting_mapper = manager.info.get(_INSTRUMENTOR)
if instrumenting_mapper:
if Mapper._new_mappers:
configure_mappers()
def _event_on_init(state, args, kwargs):
"""Run init_instance hooks.
This also includes mapper compilation, normally not needed
here but helps with some piecemeal configuration
scenarios (such as in the ORM tutorial).
"""
instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR)
if instrumenting_mapper:
if Mapper._new_mappers:
configure_mappers()
if instrumenting_mapper._set_polymorphic_identity:
instrumenting_mapper._set_polymorphic_identity(state)
class _ColumnMapping(dict):
"""Error reporting helper for mapper._columntoproperty."""
__slots__ = ("mapper",)
def __init__(self, mapper):
self.mapper = mapper
def __missing__(self, column):
prop = self.mapper._props.get(column)
if prop:
raise orm_exc.UnmappedColumnError(
"Column '%s.%s' is not available, due to "
"conflicting property '%s':%r"
% (column.table.name, column.name, column.key, prop)
)
raise orm_exc.UnmappedColumnError(
"No column %s is configured on mapper %s..."
% (column, self.mapper)
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/scoping.py
|
# orm/scoping.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import class_mapper
from . import exc as orm_exc
from .session import Session
from .. import exc as sa_exc
from ..util import ScopedRegistry
from ..util import ThreadLocalRegistry
from ..util import warn
__all__ = ["scoped_session"]
class scoped_session(object):
"""Provides scoped management of :class:`.Session` objects.
See :ref:`unitofwork_contextual` for a tutorial.
"""
session_factory = None
"""The `session_factory` provided to `__init__` is stored in this
attribute and may be accessed at a later time. This can be useful when
a new non-scoped :class:`.Session` or :class:`_engine.Connection` to the
database is needed."""
def __init__(self, session_factory, scopefunc=None):
"""Construct a new :class:`.scoped_session`.
:param session_factory: a factory to create new :class:`.Session`
instances. This is usually, but not necessarily, an instance
of :class:`.sessionmaker`.
:param scopefunc: optional function which defines
the current scope. If not passed, the :class:`.scoped_session`
object assumes "thread-local" scope, and will use
a Python ``threading.local()`` in order to maintain the current
:class:`.Session`. If passed, the function should return
a hashable token; this token will be used as the key in a
dictionary in order to store and retrieve the current
:class:`.Session`.
"""
self.session_factory = session_factory
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
self.registry = ThreadLocalRegistry(session_factory)
def __call__(self, **kw):
r"""Return the current :class:`.Session`, creating it
using the :attr:`.scoped_session.session_factory` if not present.
:param \**kw: Keyword arguments will be passed to the
:attr:`.scoped_session.session_factory` callable, if an existing
:class:`.Session` is not present. If the :class:`.Session` is present
and keyword arguments have been passed,
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if kw:
if self.registry.has():
raise sa_exc.InvalidRequestError(
"Scoped session is already present; "
"no new arguments may be specified."
)
else:
sess = self.session_factory(**kw)
self.registry.set(sess)
return sess
else:
return self.registry()
def remove(self):
"""Dispose of the current :class:`.Session`, if present.
This will first call :meth:`.Session.close` method
on the current :class:`.Session`, which releases any existing
transactional/connection resources still being held; transactions
specifically are rolled back. The :class:`.Session` is then
discarded. Upon next usage within the same scope,
the :class:`.scoped_session` will produce a new
:class:`.Session` object.
"""
if self.registry.has():
self.registry().close()
self.registry.clear()
def configure(self, **kwargs):
"""reconfigure the :class:`.sessionmaker` used by this
:class:`.scoped_session`.
See :meth:`.sessionmaker.configure`.
"""
if self.registry.has():
warn(
"At least one scoped session is already present. "
" configure() can not affect sessions that have "
"already been created."
)
self.session_factory.configure(**kwargs)
def query_property(self, query_cls=None):
"""return a class property which produces a :class:`_query.Query`
object
against the class and the current :class:`.Session` when called.
e.g.::
Session = scoped_session(sessionmaker())
class MyClass(object):
query = Session.query_property()
# after mappers are defined
result = MyClass.query.filter(MyClass.name=='foo').all()
Produces instances of the session's configured query class by
default. To override and use a custom implementation, provide
a ``query_cls`` callable. The callable will be invoked with
the class's mapper as a positional argument and a session
keyword argument.
There is no limit to the number of query properties placed on
a class.
"""
class query(object):
def __get__(s, instance, owner):
try:
mapper = class_mapper(owner)
if mapper:
if query_cls:
# custom query class
return query_cls(mapper, session=self.registry())
else:
# session's configured query class
return self.registry().query(mapper)
except orm_exc.UnmappedClassError:
return None
return query()
ScopedSession = scoped_session
"""Old name for backwards compatibility."""
def instrument(name):
def do(self, *args, **kwargs):
return getattr(self.registry(), name)(*args, **kwargs)
return do
for meth in Session.public_methods:
setattr(scoped_session, meth, instrument(meth))
def makeprop(name):
def set_(self, attr):
setattr(self.registry(), name, attr)
def get(self):
return getattr(self.registry(), name)
return property(get, set_)
for prop in (
"bind",
"dirty",
"deleted",
"new",
"identity_map",
"is_active",
"autoflush",
"no_autoflush",
"info",
"autocommit",
):
setattr(scoped_session, prop, makeprop(prop))
def clslevel(name):
def do(cls, *args, **kwargs):
return getattr(Session, name)(*args, **kwargs)
return classmethod(do)
for prop in ("close_all", "object_session", "identity_key"):
setattr(scoped_session, prop, clslevel(prop))
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/session.py
|
# orm/session.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the Session class and related utilities."""
import itertools
import sys
import weakref
from . import attributes
from . import exc
from . import identity
from . import loading
from . import persistence
from . import query
from . import state as statelib
from .base import _class_to_mapper
from .base import _none_set
from .base import _state_mapper
from .base import instance_str
from .base import object_mapper
from .base import object_state
from .base import state_str
from .deprecated_interfaces import SessionExtension
from .unitofwork import UOWTransaction
from .. import engine
from .. import exc as sa_exc
from .. import sql
from .. import util
from ..inspection import inspect
from ..sql import expression
from ..sql import util as sql_util
__all__ = ["Session", "SessionTransaction", "SessionExtension", "sessionmaker"]
_sessions = weakref.WeakValueDictionary()
"""Weak-referencing dictionary of :class:`.Session` objects.
"""
def _state_session(state):
"""Given an :class:`.InstanceState`, return the :class:`.Session`
associated, if any.
"""
if state.session_id:
try:
return _sessions[state.session_id]
except KeyError:
pass
return None
class _SessionClassMethods(object):
"""Class-level methods for :class:`.Session`, :class:`.sessionmaker`."""
@classmethod
@util.deprecated(
"1.3",
"The :meth:`.Session.close_all` method is deprecated and will be "
"removed in a future release. Please refer to "
":func:`.session.close_all_sessions`.",
)
def close_all(cls):
"""Close *all* sessions in memory."""
close_all_sessions()
@classmethod
@util.dependencies("sqlalchemy.orm.util")
def identity_key(cls, orm_util, *args, **kwargs):
"""Return an identity key.
This is an alias of :func:`.util.identity_key`.
"""
return orm_util.identity_key(*args, **kwargs)
@classmethod
def object_session(cls, instance):
"""Return the :class:`.Session` to which an object belongs.
This is an alias of :func:`.object_session`.
"""
return object_session(instance)
ACTIVE = util.symbol("ACTIVE")
PREPARED = util.symbol("PREPARED")
COMMITTED = util.symbol("COMMITTED")
DEACTIVE = util.symbol("DEACTIVE")
CLOSED = util.symbol("CLOSED")
class SessionTransaction(object):
"""A :class:`.Session`-level transaction.
:class:`.SessionTransaction` is a mostly behind-the-scenes object
not normally referenced directly by application code. It coordinates
among multiple :class:`_engine.Connection` objects, maintaining a database
transaction for each one individually, committing or rolling them
back all at once. It also provides optional two-phase commit behavior
which can augment this coordination operation.
The :attr:`.Session.transaction` attribute of :class:`.Session`
refers to the current :class:`.SessionTransaction` object in use, if any.
The :attr:`.SessionTransaction.parent` attribute refers to the parent
:class:`.SessionTransaction` in the stack of :class:`.SessionTransaction`
objects. If this attribute is ``None``, then this is the top of the stack.
If non-``None``, then this :class:`.SessionTransaction` refers either
to a so-called "subtransaction" or a "nested" transaction. A
"subtransaction" is a scoping concept that demarcates an inner portion
of the outermost "real" transaction. A nested transaction, which
is indicated when the :attr:`.SessionTransaction.nested`
attribute is also True, indicates that this :class:`.SessionTransaction`
corresponds to a SAVEPOINT.
**Life Cycle**
A :class:`.SessionTransaction` is associated with a :class:`.Session`
in its default mode of ``autocommit=False`` immediately, associated
with no database connections. As the :class:`.Session` is called upon
to emit SQL on behalf of various :class:`_engine.Engine` or
:class:`_engine.Connection`
objects, a corresponding :class:`_engine.Connection` and associated
:class:`.Transaction` is added to a collection within the
:class:`.SessionTransaction` object, becoming one of the
connection/transaction pairs maintained by the
:class:`.SessionTransaction`. The start of a :class:`.SessionTransaction`
can be tracked using the :meth:`.SessionEvents.after_transaction_create`
event.
The lifespan of the :class:`.SessionTransaction` ends when the
:meth:`.Session.commit`, :meth:`.Session.rollback` or
:meth:`.Session.close` methods are called. At this point, the
:class:`.SessionTransaction` removes its association with its parent
:class:`.Session`. A :class:`.Session` that is in ``autocommit=False``
mode will create a new :class:`.SessionTransaction` to replace it
immediately, whereas a :class:`.Session` that's in ``autocommit=True``
mode will remain without a :class:`.SessionTransaction` until the
:meth:`.Session.begin` method is called. The end of a
:class:`.SessionTransaction` can be tracked using the
:meth:`.SessionEvents.after_transaction_end` event.
**Nesting and Subtransactions**
Another detail of :class:`.SessionTransaction` behavior is that it is
capable of "nesting". This means that the :meth:`.Session.begin` method
can be called while an existing :class:`.SessionTransaction` is already
present, producing a new :class:`.SessionTransaction` that temporarily
replaces the parent :class:`.SessionTransaction`. When a
:class:`.SessionTransaction` is produced as nested, it assigns itself to
the :attr:`.Session.transaction` attribute, and it additionally will assign
the previous :class:`.SessionTransaction` to its :attr:`.Session.parent`
attribute. The behavior is effectively a
stack, where :attr:`.Session.transaction` refers to the current head of
the stack, and the :attr:`.SessionTransaction.parent` attribute allows
traversal up the stack until :attr:`.SessionTransaction.parent` is
``None``, indicating the top of the stack.
When the scope of :class:`.SessionTransaction` is ended via
:meth:`.Session.commit` or :meth:`.Session.rollback`, it restores its
parent :class:`.SessionTransaction` back onto the
:attr:`.Session.transaction` attribute.
The purpose of this stack is to allow nesting of
:meth:`.Session.rollback` or :meth:`.Session.commit` calls in context
with various flavors of :meth:`.Session.begin`. This nesting behavior
applies to when :meth:`.Session.begin_nested` is used to emit a
SAVEPOINT transaction, and is also used to produce a so-called
"subtransaction" which allows a block of code to use a
begin/rollback/commit sequence regardless of whether or not its enclosing
code block has begun a transaction. The :meth:`.flush` method, whether
called explicitly or via autoflush, is the primary consumer of the
"subtransaction" feature, in that it wishes to guarantee that it works
within in a transaction block regardless of whether or not the
:class:`.Session` is in transactional mode when the method is called.
Note that the flush process that occurs within the "autoflush" feature
as well as when the :meth:`.Session.flush` method is used **always**
creates a :class:`.SessionTransaction` object. This object is normally
a subtransaction, unless the :class:`.Session` is in autocommit mode
and no transaction exists at all, in which case it's the outermost
transaction. Any event-handling logic or other inspection logic
needs to take into account whether a :class:`.SessionTransaction`
is the outermost transaction, a subtransaction, or a "nested" / SAVEPOINT
transaction.
.. seealso::
:meth:`.Session.rollback`
:meth:`.Session.commit`
:meth:`.Session.begin`
:meth:`.Session.begin_nested`
:attr:`.Session.is_active`
:meth:`.SessionEvents.after_transaction_create`
:meth:`.SessionEvents.after_transaction_end`
:meth:`.SessionEvents.after_commit`
:meth:`.SessionEvents.after_rollback`
:meth:`.SessionEvents.after_soft_rollback`
"""
_rollback_exception = None
def __init__(self, session, parent=None, nested=False):
self.session = session
self._connections = {}
self._parent = parent
self.nested = nested
self._state = ACTIVE
if not parent and nested:
raise sa_exc.InvalidRequestError(
"Can't start a SAVEPOINT transaction when no existing "
"transaction is in progress"
)
if self.session._enable_transaction_accounting:
self._take_snapshot()
self.session.dispatch.after_transaction_create(self.session, self)
@property
def parent(self):
"""The parent :class:`.SessionTransaction` of this
:class:`.SessionTransaction`.
If this attribute is ``None``, indicates this
:class:`.SessionTransaction` is at the top of the stack, and
corresponds to a real "COMMIT"/"ROLLBACK"
block. If non-``None``, then this is either a "subtransaction"
or a "nested" / SAVEPOINT transaction. If the
:attr:`.SessionTransaction.nested` attribute is ``True``, then
this is a SAVEPOINT, and if ``False``, indicates this a subtransaction.
.. versionadded:: 1.0.16 - use ._parent for previous versions
"""
return self._parent
nested = False
"""Indicates if this is a nested, or SAVEPOINT, transaction.
When :attr:`.SessionTransaction.nested` is True, it is expected
that :attr:`.SessionTransaction.parent` will be True as well.
"""
@property
def is_active(self):
return self.session is not None and self._state is ACTIVE
def _assert_active(
self,
prepared_ok=False,
rollback_ok=False,
deactive_ok=False,
closed_msg="This transaction is closed",
):
if self._state is COMMITTED:
raise sa_exc.InvalidRequestError(
"This session is in 'committed' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is PREPARED:
if not prepared_ok:
raise sa_exc.InvalidRequestError(
"This session is in 'prepared' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is DEACTIVE:
if not deactive_ok and not rollback_ok:
if self._rollback_exception:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"due to a previous exception during flush."
" To begin a new transaction with this Session, "
"first issue Session.rollback()."
" Original exception was: %s"
% self._rollback_exception,
code="7s2a",
)
elif not deactive_ok:
raise sa_exc.InvalidRequestError(
"This session is in 'inactive' state, due to the "
"SQL transaction being rolled back; no further "
"SQL can be emitted within this transaction."
)
elif self._state is CLOSED:
raise sa_exc.ResourceClosedError(closed_msg)
@property
def _is_transaction_boundary(self):
return self.nested or not self._parent
def connection(self, bindkey, execution_options=None, **kwargs):
self._assert_active()
bind = self.session.get_bind(bindkey, **kwargs)
return self._connection_for_bind(bind, execution_options)
def _begin(self, nested=False):
self._assert_active()
return SessionTransaction(self.session, self, nested=nested)
def _iterate_self_and_parents(self, upto=None):
current = self
result = ()
while current:
result += (current,)
if current._parent is upto:
break
elif current._parent is None:
raise sa_exc.InvalidRequestError(
"Transaction %s is not on the active transaction list"
% (upto)
)
else:
current = current._parent
return result
def _take_snapshot(self):
if not self._is_transaction_boundary:
self._new = self._parent._new
self._deleted = self._parent._deleted
self._dirty = self._parent._dirty
self._key_switches = self._parent._key_switches
return
if not self.session._flushing:
self.session.flush()
self._new = weakref.WeakKeyDictionary()
self._deleted = weakref.WeakKeyDictionary()
self._dirty = weakref.WeakKeyDictionary()
self._key_switches = weakref.WeakKeyDictionary()
def _restore_snapshot(self, dirty_only=False):
"""Restore the restoration state taken before a transaction began.
Corresponds to a rollback.
"""
assert self._is_transaction_boundary
to_expunge = set(self._new).union(self.session._new)
self.session._expunge_states(to_expunge, to_transient=True)
for s, (oldkey, newkey) in self._key_switches.items():
# we probably can do this conditionally based on
# if we expunged or not, but safe_discard does that anyway
self.session.identity_map.safe_discard(s)
# restore the old key
s.key = oldkey
# now restore the object, but only if we didn't expunge
if s not in to_expunge:
self.session.identity_map.replace(s)
for s in set(self._deleted).union(self.session._deleted):
self.session._update_impl(s, revert_deletion=True)
assert not self.session._deleted
for s in self.session.identity_map.all_states():
if not dirty_only or s.modified or s in self._dirty:
s._expire(s.dict, self.session.identity_map._modified)
def _remove_snapshot(self):
"""Remove the restoration state taken before a transaction began.
Corresponds to a commit.
"""
assert self._is_transaction_boundary
if not self.nested and self.session.expire_on_commit:
for s in self.session.identity_map.all_states():
s._expire(s.dict, self.session.identity_map._modified)
statelib.InstanceState._detach_states(
list(self._deleted), self.session
)
self._deleted.clear()
elif self.nested:
self._parent._new.update(self._new)
self._parent._dirty.update(self._dirty)
self._parent._deleted.update(self._deleted)
self._parent._key_switches.update(self._key_switches)
def _connection_for_bind(self, bind, execution_options):
self._assert_active()
if bind in self._connections:
if execution_options:
util.warn(
"Connection is already established for the "
"given bind; execution_options ignored"
)
return self._connections[bind][0]
local_connect = False
if self._parent:
conn = self._parent._connection_for_bind(bind, execution_options)
if not self.nested:
return conn
else:
if isinstance(bind, engine.Connection):
conn = bind
if conn.engine in self._connections:
raise sa_exc.InvalidRequestError(
"Session already has a Connection associated for the "
"given Connection's Engine"
)
else:
conn = bind._contextual_connect()
local_connect = True
try:
if execution_options:
conn = conn.execution_options(**execution_options)
if self.session.twophase and self._parent is None:
transaction = conn.begin_twophase()
elif self.nested:
transaction = conn.begin_nested()
else:
transaction = conn.begin()
except:
# connection will not not be associated with this Session;
# close it immediately so that it isn't closed under GC
if local_connect:
conn.close()
raise
else:
self._connections[conn] = self._connections[conn.engine] = (
conn,
transaction,
conn is not bind,
)
self.session.dispatch.after_begin(self.session, self, conn)
return conn
def prepare(self):
if self._parent is not None or not self.session.twophase:
raise sa_exc.InvalidRequestError(
"'twophase' mode not enabled, or not root transaction; "
"can't prepare."
)
self._prepare_impl()
def _prepare_impl(self):
self._assert_active()
if self._parent is None or self.nested:
self.session.dispatch.before_commit(self.session)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_self_and_parents(upto=self):
subtransaction.commit()
if not self.session._flushing:
for _flush_guard in range(100):
if self.session._is_clean():
break
self.session.flush()
else:
raise exc.FlushError(
"Over 100 subsequent flushes have occurred within "
"session.commit() - is an after_flush() hook "
"creating new objects?"
)
if self._parent is None and self.session.twophase:
try:
for t in set(self._connections.values()):
t[1].prepare()
except:
with util.safe_reraise():
self.rollback()
self._state = PREPARED
def commit(self):
self._assert_active(prepared_ok=True)
if self._state is not PREPARED:
self._prepare_impl()
if self._parent is None or self.nested:
for t in set(self._connections.values()):
t[1].commit()
self._state = COMMITTED
self.session.dispatch.after_commit(self.session)
if self.session._enable_transaction_accounting:
self._remove_snapshot()
self.close()
return self._parent
def rollback(self, _capture_exception=False):
self._assert_active(prepared_ok=True, rollback_ok=True)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_self_and_parents(upto=self):
subtransaction.close()
boundary = self
rollback_err = None
if self._state in (ACTIVE, PREPARED):
for transaction in self._iterate_self_and_parents():
if transaction._parent is None or transaction.nested:
try:
for t in set(transaction._connections.values()):
t[1].rollback()
transaction._state = DEACTIVE
self.session.dispatch.after_rollback(self.session)
except:
rollback_err = sys.exc_info()
finally:
transaction._state = DEACTIVE
if self.session._enable_transaction_accounting:
transaction._restore_snapshot(
dirty_only=transaction.nested
)
boundary = transaction
break
else:
transaction._state = DEACTIVE
sess = self.session
if (
not rollback_err
and sess._enable_transaction_accounting
and not sess._is_clean()
):
# if items were added, deleted, or mutated
# here, we need to re-restore the snapshot
util.warn(
"Session's state has been changed on "
"a non-active transaction - this state "
"will be discarded."
)
boundary._restore_snapshot(dirty_only=boundary.nested)
self.close()
if self._parent and _capture_exception:
self._parent._rollback_exception = sys.exc_info()[1]
if rollback_err:
util.raise_(rollback_err[1], with_traceback=rollback_err[2])
sess.dispatch.after_soft_rollback(sess, self)
return self._parent
def close(self, invalidate=False):
self.session.transaction = self._parent
if self._parent is None:
for connection, transaction, autoclose in set(
self._connections.values()
):
if invalidate:
connection.invalidate()
if autoclose:
connection.close()
else:
transaction.close()
self._state = CLOSED
self.session.dispatch.after_transaction_end(self.session, self)
if self._parent is None:
if not self.session.autocommit:
self.session.begin()
self.session = None
self._connections = None
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self._assert_active(deactive_ok=True, prepared_ok=True)
if self.session.transaction is None:
return
if type_ is None:
try:
self.commit()
except:
with util.safe_reraise():
self.rollback()
else:
self.rollback()
class Session(_SessionClassMethods):
"""Manages persistence operations for ORM-mapped objects.
The Session's usage paradigm is described at :doc:`/orm/session`.
"""
public_methods = (
"__contains__",
"__iter__",
"add",
"add_all",
"begin",
"begin_nested",
"close",
"commit",
"connection",
"delete",
"execute",
"expire",
"expire_all",
"expunge",
"expunge_all",
"flush",
"get_bind",
"is_modified",
"bulk_save_objects",
"bulk_insert_mappings",
"bulk_update_mappings",
"merge",
"query",
"refresh",
"rollback",
"scalar",
)
@util.deprecated_params(
weak_identity_map=(
"1.0",
"The :paramref:`.Session.weak_identity_map` parameter as well as "
"the strong-referencing identity map are deprecated, and will be "
"removed in a future release. For the use case where objects "
"present in a :class:`.Session` need to be automatically strong "
"referenced, see the recipe at "
":ref:`session_referencing_behavior` for an event-based approach "
"to maintaining strong identity references. ",
),
_enable_transaction_accounting=(
"0.7",
"The :paramref:`.Session._enable_transaction_accounting` "
"parameter is deprecated and will be removed in a future release.",
),
extension=(
"0.7",
":class:`.SessionExtension` is deprecated in favor of the "
":class:`.SessionEvents` listener interface. The "
":paramref:`.Session.extension` parameter will be "
"removed in a future release.",
),
)
def __init__(
self,
bind=None,
autoflush=True,
expire_on_commit=True,
_enable_transaction_accounting=True,
autocommit=False,
twophase=False,
weak_identity_map=None,
binds=None,
extension=None,
enable_baked_queries=True,
info=None,
query_cls=None,
):
r"""Construct a new Session.
See also the :class:`.sessionmaker` function which is used to
generate a :class:`.Session`-producing callable with a given
set of arguments.
:param autocommit:
.. warning::
The autocommit flag is **not for general use**, and if it is
used, queries should only be invoked within the span of a
:meth:`.Session.begin` / :meth:`.Session.commit` pair. Executing
queries outside of a demarcated transaction is a legacy mode
of usage, and can in some cases lead to concurrent connection
checkouts.
Defaults to ``False``. When ``True``, the
:class:`.Session` does not keep a persistent transaction running,
and will acquire connections from the engine on an as-needed basis,
returning them immediately after their use. Flushes will begin and
commit (or possibly rollback) their own transaction if no
transaction is present. When using this mode, the
:meth:`.Session.begin` method is used to explicitly start
transactions.
.. seealso::
:ref:`session_autocommit`
:param autoflush: When ``True``, all query operations will issue a
:meth:`~.Session.flush` call to this ``Session`` before proceeding.
This is a convenience feature so that :meth:`~.Session.flush` need
not be called repeatedly in order for database queries to retrieve
results. It's typical that ``autoflush`` is used in conjunction
with ``autocommit=False``. In this scenario, explicit calls to
:meth:`~.Session.flush` are rarely needed; you usually only need to
call :meth:`~.Session.commit` (which flushes) to finalize changes.
:param bind: An optional :class:`_engine.Engine` or
:class:`_engine.Connection` to
which this ``Session`` should be bound. When specified, all SQL
operations performed by this session will execute via this
connectable.
:param binds: A dictionary which may specify any number of
:class:`_engine.Engine` or :class:`_engine.Connection`
objects as the source of
connectivity for SQL operations on a per-entity basis. The keys
of the dictionary consist of any series of mapped classes,
arbitrary Python classes that are bases for mapped classes,
:class:`_schema.Table` objects and :class:`_orm.Mapper` objects.
The
values of the dictionary are then instances of
:class:`_engine.Engine`
or less commonly :class:`_engine.Connection` objects.
Operations which
proceed relative to a particular mapped class will consult this
dictionary for the closest matching entity in order to determine
which :class:`_engine.Engine` should be used for a particular SQL
operation. The complete heuristics for resolution are
described at :meth:`.Session.get_bind`. Usage looks like::
Session = sessionmaker(binds={
SomeMappedClass: create_engine('postgresql://engine1'),
SomeDeclarativeBase: create_engine('postgresql://engine2'),
some_mapper: create_engine('postgresql://engine3'),
some_table: create_engine('postgresql://engine4'),
})
.. seealso::
:ref:`session_partitioning`
:meth:`.Session.bind_mapper`
:meth:`.Session.bind_table`
:meth:`.Session.get_bind`
:param \class_: Specify an alternate class other than
``sqlalchemy.orm.session.Session`` which should be used by the
returned class. This is the only argument that is local to the
:class:`.sessionmaker` function, and is not sent directly to the
constructor for ``Session``.
:param enable_baked_queries: defaults to ``True``. A flag consumed
by the :mod:`sqlalchemy.ext.baked` extension to determine if
"baked queries" should be cached, as is the normal operation
of this extension. When set to ``False``, all caching is disabled,
including baked queries defined by the calling application as
well as those used internally. Setting this flag to ``False``
can significantly reduce memory use, however will also degrade
performance for those areas that make use of baked queries
(such as relationship loaders). Additionally, baked query
logic in the calling application or potentially within the ORM
that may be malfunctioning due to cache key collisions or similar
can be flagged by observing if this flag resolves the issue.
.. versionadded:: 1.2
:param _enable_transaction_accounting: A
legacy-only flag which when ``False`` disables *all* 0.5-style
object accounting on transaction boundaries.
:param expire_on_commit: Defaults to ``True``. When ``True``, all
instances will be fully expired after each :meth:`~.commit`,
so that all attribute/object access subsequent to a completed
transaction will load from the most recent database state.
:param extension: An optional
:class:`~.SessionExtension` instance, or a list
of such instances, which will receive pre- and post- commit and
flush events, as well as a post-rollback event.
:param info: optional dictionary of arbitrary data to be associated
with this :class:`.Session`. Is available via the
:attr:`.Session.info` attribute. Note the dictionary is copied at
construction time so that modifications to the per-
:class:`.Session` dictionary will be local to that
:class:`.Session`.
.. versionadded:: 0.9.0
:param query_cls: Class which should be used to create new Query
objects, as returned by the :meth:`~.Session.query` method.
Defaults to :class:`_query.Query`.
:param twophase: When ``True``, all transactions will be started as
a "two phase" transaction, i.e. using the "two phase" semantics
of the database in use along with an XID. During a
:meth:`~.commit`, after :meth:`~.flush` has been issued for all
attached databases, the :meth:`~.TwoPhaseTransaction.prepare`
method on each database's :class:`.TwoPhaseTransaction` will be
called. This allows each database to roll back the entire
transaction, before each transaction is committed.
:param weak_identity_map: Defaults to ``True`` - when set to
``False``, objects placed in the :class:`.Session` will be
strongly referenced until explicitly removed or the
:class:`.Session` is closed.
"""
if weak_identity_map in (True, None):
self._identity_cls = identity.WeakInstanceDict
else:
self._identity_cls = identity.StrongInstanceDict
self.identity_map = self._identity_cls()
self._new = {} # InstanceState->object, strong refs object
self._deleted = {} # same
self.bind = bind
self.__binds = {}
self._flushing = False
self._warn_on_events = False
self.transaction = None
self.hash_key = _new_sessionid()
self.autoflush = autoflush
self.autocommit = autocommit
self.expire_on_commit = expire_on_commit
self.enable_baked_queries = enable_baked_queries
self._enable_transaction_accounting = _enable_transaction_accounting
self.twophase = twophase
self._query_cls = query_cls if query_cls else query.Query
if info:
self.info.update(info)
if extension:
for ext in util.to_list(extension):
SessionExtension._adapt_listener(self, ext)
if binds is not None:
for key, bind in binds.items():
self._add_bind(key, bind)
if not self.autocommit:
self.begin()
_sessions[self.hash_key] = self
connection_callable = None
transaction = None
"""The current active or inactive :class:`.SessionTransaction`."""
@util.memoized_property
def info(self):
"""A user-modifiable dictionary.
The initial value of this dictionary can be populated using the
``info`` argument to the :class:`.Session` constructor or
:class:`.sessionmaker` constructor or factory methods. The dictionary
here is always local to this :class:`.Session` and can be modified
independently of all other :class:`.Session` objects.
.. versionadded:: 0.9.0
"""
return {}
def begin(self, subtransactions=False, nested=False):
"""Begin a transaction on this :class:`.Session`.
.. warning::
The :meth:`.Session.begin` method is part of a larger pattern
of use with the :class:`.Session` known as **autocommit mode**.
This is essentially a **legacy mode of use** and is
not necessary for new applications. The :class:`.Session`
normally handles the work of "begin" transparently, which in
turn relies upon the Python DBAPI to transparently "begin"
transactions; there is **no need to explicitly begin transactions**
when using modern :class:`.Session` programming patterns.
In its default mode of ``autocommit=False``, the
:class:`.Session` does all of its work within
the context of a transaction, so as soon as you call
:meth:`.Session.commit`, the next transaction is implicitly
started when the next database operation is invoked. See
:ref:`session_autocommit` for further background.
The method will raise an error if this :class:`.Session` is already
inside of a transaction, unless
:paramref:`~.Session.begin.subtransactions` or
:paramref:`~.Session.begin.nested` are specified. A "subtransaction"
is essentially a code embedding pattern that does not affect the
transactional state of the database connection unless a rollback is
emitted, in which case the whole transaction is rolled back. For
documentation on subtransactions, please see
:ref:`session_subtransactions`.
:param subtransactions: if True, indicates that this
:meth:`~.Session.begin` can create a "subtransaction".
:param nested: if True, begins a SAVEPOINT transaction and is
equivalent to calling :meth:`~.Session.begin_nested`. For
documentation on SAVEPOINT transactions, please see
:ref:`session_begin_nested`.
:return: the :class:`.SessionTransaction` object. Note that
:class:`.SessionTransaction`
acts as a Python context manager, allowing :meth:`.Session.begin`
to be used in a "with" block. See :ref:`session_autocommit` for
an example.
.. seealso::
:ref:`session_autocommit`
:meth:`.Session.begin_nested`
"""
if self.transaction is not None:
if subtransactions or nested:
self.transaction = self.transaction._begin(nested=nested)
else:
raise sa_exc.InvalidRequestError(
"A transaction is already begun. Use "
"subtransactions=True to allow subtransactions."
)
else:
self.transaction = SessionTransaction(self, nested=nested)
return self.transaction # needed for __enter__/__exit__ hook
def begin_nested(self):
"""Begin a "nested" transaction on this Session, e.g. SAVEPOINT.
The target database(s) and associated drivers must support SQL
SAVEPOINT for this method to function correctly.
For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
:return: the :class:`.SessionTransaction` object. Note that
:class:`.SessionTransaction` acts as a context manager, allowing
:meth:`.Session.begin_nested` to be used in a "with" block.
See :ref:`session_begin_nested` for a usage example.
.. seealso::
:ref:`session_begin_nested`
:ref:`pysqlite_serializable` - special workarounds required
with the SQLite driver in order for SAVEPOINT to work
correctly.
"""
return self.begin(nested=True)
def rollback(self):
"""Rollback the current transaction in progress.
If no transaction is in progress, this method is a pass-through.
This method rolls back the current transaction or nested transaction
regardless of subtransactions being in effect. All subtransactions up
to the first real transaction are closed. Subtransactions occur when
:meth:`.begin` is called multiple times.
.. seealso::
:ref:`session_rollback`
"""
if self.transaction is None:
pass
else:
self.transaction.rollback()
def commit(self):
"""Flush pending changes and commit the current transaction.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
By default, the :class:`.Session` also expires all database
loaded state on all ORM-managed attributes after transaction commit.
This so that subsequent operations load the most recent
data from the database. This behavior can be disabled using
the ``expire_on_commit=False`` option to :class:`.sessionmaker` or
the :class:`.Session` constructor.
If a subtransaction is in effect (which occurs when begin() is called
multiple times), the subtransaction will be closed, and the next call
to ``commit()`` will operate on the enclosing transaction.
When using the :class:`.Session` in its default mode of
``autocommit=False``, a new transaction will
be begun immediately after the commit, but note that the newly begun
transaction does *not* use any connection resources until the first
SQL is actually emitted.
.. seealso::
:ref:`session_committing`
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.commit()
def prepare(self):
"""Prepare the current transaction in progress for two phase commit.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
Only root transactions of two phase sessions can be prepared. If the
current transaction is not such, an
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.prepare()
def connection(
self,
mapper=None,
clause=None,
bind=None,
close_with_result=False,
execution_options=None,
**kw
):
r"""Return a :class:`_engine.Connection` object corresponding to this
:class:`.Session` object's transactional state.
If this :class:`.Session` is configured with ``autocommit=False``,
either the :class:`_engine.Connection` corresponding to the current
transaction is returned, or if no transaction is in progress, a new
one is begun and the :class:`_engine.Connection`
returned (note that no
transactional state is established with the DBAPI until the first
SQL statement is emitted).
Alternatively, if this :class:`.Session` is configured with
``autocommit=True``, an ad-hoc :class:`_engine.Connection` is returned
using :meth:`_engine.Engine.connect` on the underlying
:class:`_engine.Engine`.
Ambiguity in multi-bind or unbound :class:`.Session` objects can be
resolved through any of the optional keyword arguments. This
ultimately makes usage of the :meth:`.get_bind` method for resolution.
:param bind:
Optional :class:`_engine.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes precedence
over ``mapper``, ``clause``.
:param mapper:
Optional :func:`.mapper` mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause``.
:param clause:
A :class:`_expression.ClauseElement` (i.e.
:func:`_expression.select`,
:func:`_expression.text`,
etc.) which will be used to locate a bind, if a bind
cannot otherwise be identified.
:param close_with_result: Passed to :meth:`_engine.Engine.connect`,
indicating the :class:`_engine.Connection` should be considered
"single use", automatically closing when the first result set is
closed. This flag only has an effect if this :class:`.Session` is
configured with ``autocommit=True`` and does not already have a
transaction in progress.
:param execution_options: a dictionary of execution options that will
be passed to :meth:`_engine.Connection.execution_options`, **when the
connection is first procured only**. If the connection is already
present within the :class:`.Session`, a warning is emitted and
the arguments are ignored.
.. versionadded:: 0.9.9
.. seealso::
:ref:`session_transaction_isolation`
:param \**kw:
Additional keyword arguments are sent to :meth:`get_bind()`,
allowing additional arguments to be passed to custom
implementations of :meth:`get_bind`.
"""
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(
bind,
close_with_result=close_with_result,
execution_options=execution_options,
)
def _connection_for_bind(self, engine, execution_options=None, **kw):
if self.transaction is not None:
return self.transaction._connection_for_bind(
engine, execution_options
)
else:
conn = engine._contextual_connect(**kw)
if execution_options:
conn = conn.execution_options(**execution_options)
return conn
def execute(self, clause, params=None, mapper=None, bind=None, **kw):
r"""Execute a SQL expression construct or string statement within
the current transaction.
Returns a :class:`_engine.ResultProxy` representing
results of the statement execution, in the same manner as that of an
:class:`_engine.Engine` or
:class:`_engine.Connection`.
E.g.::
result = session.execute(
user_table.select().where(user_table.c.id == 5)
)
:meth:`~.Session.execute` accepts any executable clause construct,
such as :func:`_expression.select`,
:func:`_expression.insert`,
:func:`_expression.update`,
:func:`_expression.delete`, and
:func:`_expression.text`. Plain SQL strings can be passed
as well, which in the case of :meth:`.Session.execute` only
will be interpreted the same as if it were passed via a
:func:`_expression.text` construct. That is, the following usage::
result = session.execute(
"SELECT * FROM user WHERE id=:param",
{"param":5}
)
is equivalent to::
from sqlalchemy import text
result = session.execute(
text("SELECT * FROM user WHERE id=:param"),
{"param":5}
)
The second positional argument to :meth:`.Session.execute` is an
optional parameter set. Similar to that of
:meth:`_engine.Connection.execute`, whether this is passed as a single
dictionary, or a sequence of dictionaries, determines whether the DBAPI
cursor's ``execute()`` or ``executemany()`` is used to execute the
statement. An INSERT construct may be invoked for a single row::
result = session.execute(
users.insert(), {"id": 7, "name": "somename"})
or for multiple rows::
result = session.execute(users.insert(), [
{"id": 7, "name": "somename7"},
{"id": 8, "name": "somename8"},
{"id": 9, "name": "somename9"}
])
The statement is executed within the current transactional context of
this :class:`.Session`. The :class:`_engine.Connection`
which is used
to execute the statement can also be acquired directly by
calling the :meth:`.Session.connection` method. Both methods use
a rule-based resolution scheme in order to determine the
:class:`_engine.Connection`,
which in the average case is derived directly
from the "bind" of the :class:`.Session` itself, and in other cases
can be based on the :func:`.mapper`
and :class:`_schema.Table` objects passed to the method; see the
documentation for :meth:`.Session.get_bind` for a full description of
this scheme.
The :meth:`.Session.execute` method does *not* invoke autoflush.
The :class:`_engine.ResultProxy` returned by the
:meth:`.Session.execute`
method is returned with the "close_with_result" flag set to true;
the significance of this flag is that if this :class:`.Session` is
autocommitting and does not have a transaction-dedicated
:class:`_engine.Connection` available, a temporary
:class:`_engine.Connection` is
established for the statement execution, which is closed (meaning,
returned to the connection pool) when the :class:`_engine.ResultProxy`
has
consumed all available data. This applies *only* when the
:class:`.Session` is configured with autocommit=True and no
transaction has been started.
:param clause:
An executable statement (i.e. an :class:`.Executable` expression
such as :func:`_expression.select`) or string SQL statement
to be executed.
:param params:
Optional dictionary, or list of dictionaries, containing
bound parameter values. If a single dictionary, single-row
execution occurs; if a list of dictionaries, an
"executemany" will be invoked. The keys in each dictionary
must correspond to parameter names present in the statement.
:param mapper:
Optional :func:`.mapper` or mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause`` when locating a bind. See :meth:`.Session.get_bind`
for more details.
:param bind:
Optional :class:`_engine.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes
precedence over ``mapper`` and ``clause`` when locating
a bind.
:param \**kw:
Additional keyword arguments are sent to :meth:`.Session.get_bind()`
to allow extensibility of "bind" schemes.
.. seealso::
:ref:`sqlexpression_toplevel` - Tutorial on using Core SQL
constructs.
:ref:`connections_toplevel` - Further information on direct
statement execution.
:meth:`_engine.Connection.execute`
- core level statement execution
method, which is :meth:`.Session.execute` ultimately uses
in order to execute the statement.
"""
clause = expression._literal_as_text(
clause, allow_coercion_to_text=True
)
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind, close_with_result=True).execute(
clause, params or {}
)
def scalar(self, clause, params=None, mapper=None, bind=None, **kw):
"""Like :meth:`~.Session.execute` but return a scalar result."""
return self.execute(
clause, params=params, mapper=mapper, bind=bind, **kw
).scalar()
def close(self):
"""Close this Session.
This clears all items and ends any transaction in progress.
If this session were created with ``autocommit=False``, a new
transaction is immediately begun. Note that this new transaction does
not use any connection resources until they are first needed.
"""
self._close_impl(invalidate=False)
def invalidate(self):
"""Close this Session, using connection invalidation.
This is a variant of :meth:`.Session.close` that will additionally
ensure that the :meth:`_engine.Connection.invalidate`
method will be called
on all :class:`_engine.Connection` objects. This can be called when
the database is known to be in a state where the connections are
no longer safe to be used.
E.g.::
try:
sess = Session()
sess.add(User())
sess.commit()
except gevent.Timeout:
sess.invalidate()
raise
except:
sess.rollback()
raise
This clears all items and ends any transaction in progress.
If this session were created with ``autocommit=False``, a new
transaction is immediately begun. Note that this new transaction does
not use any connection resources until they are first needed.
.. versionadded:: 0.9.9
"""
self._close_impl(invalidate=True)
def _close_impl(self, invalidate):
self.expunge_all()
if self.transaction is not None:
for transaction in self.transaction._iterate_self_and_parents():
transaction.close(invalidate)
def expunge_all(self):
"""Remove all object instances from this ``Session``.
This is equivalent to calling ``expunge(obj)`` on all objects in this
``Session``.
"""
all_states = self.identity_map.all_states() + list(self._new)
self.identity_map = self._identity_cls()
self._new = {}
self._deleted = {}
statelib.InstanceState._detach_states(all_states, self)
def _add_bind(self, key, bind):
try:
insp = inspect(key)
except sa_exc.NoInspectionAvailable as err:
if not isinstance(key, type):
util.raise_(
sa_exc.ArgumentError(
"Not an acceptable bind target: %s" % key
),
replace_context=err,
)
else:
self.__binds[key] = bind
else:
if insp.is_selectable:
self.__binds[insp] = bind
elif insp.is_mapper:
self.__binds[insp.class_] = bind
for selectable in insp._all_tables:
self.__binds[selectable] = bind
else:
raise sa_exc.ArgumentError(
"Not an acceptable bind target: %s" % key
)
def bind_mapper(self, mapper, bind):
"""Associate a :class:`_orm.Mapper` or arbitrary Python class with a
"bind", e.g. an :class:`_engine.Engine` or :class:`_engine.Connection`
.
The given entity is added to a lookup used by the
:meth:`.Session.get_bind` method.
:param mapper: a :class:`_orm.Mapper` object,
or an instance of a mapped
class, or any Python class that is the base of a set of mapped
classes.
:param bind: an :class:`_engine.Engine` or :class:`_engine.Connection`
object.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_table`
"""
self._add_bind(mapper, bind)
def bind_table(self, table, bind):
"""Associate a :class:`_schema.Table` with a "bind", e.g. an
:class:`_engine.Engine`
or :class:`_engine.Connection`.
The given :class:`_schema.Table` is added to a lookup used by the
:meth:`.Session.get_bind` method.
:param table: a :class:`_schema.Table` object,
which is typically the target
of an ORM mapping, or is present within a selectable that is
mapped.
:param bind: an :class:`_engine.Engine` or :class:`_engine.Connection`
object.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_mapper`
"""
self._add_bind(table, bind)
def get_bind(self, mapper=None, clause=None):
"""Return a "bind" to which this :class:`.Session` is bound.
The "bind" is usually an instance of :class:`_engine.Engine`,
except in the case where the :class:`.Session` has been
explicitly bound directly to a :class:`_engine.Connection`.
For a multiply-bound or unbound :class:`.Session`, the
``mapper`` or ``clause`` arguments are used to determine the
appropriate bind to return.
Note that the "mapper" argument is usually present
when :meth:`.Session.get_bind` is called via an ORM
operation such as a :meth:`.Session.query`, each
individual INSERT/UPDATE/DELETE operation within a
:meth:`.Session.flush`, call, etc.
The order of resolution is:
1. if mapper given and session.binds is present,
locate a bind based first on the mapper in use, then
on the mapped class in use, then on any base classes that are
present in the ``__mro__`` of the mapped class, from more specific
superclasses to more general.
2. if clause given and session.binds is present,
locate a bind based on :class:`_schema.Table` objects
found in the given clause present in session.binds.
3. if session.bind is present, return that.
4. if clause given, attempt to return a bind
linked to the :class:`_schema.MetaData` ultimately
associated with the clause.
5. if mapper given, attempt to return a bind
linked to the :class:`_schema.MetaData` ultimately
associated with the :class:`_schema.Table` or other
selectable to which the mapper is mapped.
6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError`
is raised.
Note that the :meth:`.Session.get_bind` method can be overridden on
a user-defined subclass of :class:`.Session` to provide any kind
of bind resolution scheme. See the example at
:ref:`session_custom_partitioning`.
:param mapper:
Optional :func:`.mapper` mapped class or instance of
:class:`_orm.Mapper`. The bind can be derived from a
:class:`_orm.Mapper`
first by consulting the "binds" map associated with this
:class:`.Session`, and secondly by consulting the
:class:`_schema.MetaData`
associated with the :class:`_schema.Table` to which the
:class:`_orm.Mapper`
is mapped for a bind.
:param clause:
A :class:`_expression.ClauseElement` (i.e.
:func:`_expression.select`,
:func:`_expression.text`,
etc.). If the ``mapper`` argument is not present or could not
produce a bind, the given expression construct will be searched
for a bound element, typically a :class:`_schema.Table`
associated with
bound :class:`_schema.MetaData`.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_mapper`
:meth:`.Session.bind_table`
"""
if mapper is clause is None:
if self.bind:
return self.bind
else:
raise sa_exc.UnboundExecutionError(
"This session is not bound to a single Engine or "
"Connection, and no context was provided to locate "
"a binding."
)
if mapper is not None:
try:
mapper = inspect(mapper)
except sa_exc.NoInspectionAvailable as err:
if isinstance(mapper, type):
util.raise_(
exc.UnmappedClassError(mapper), replace_context=err,
)
else:
raise
if self.__binds:
if mapper:
for cls in mapper.class_.__mro__:
if cls in self.__binds:
return self.__binds[cls]
if clause is None:
clause = mapper.persist_selectable
if clause is not None:
for t in sql_util.find_tables(clause, include_crud=True):
if t in self.__binds:
return self.__binds[t]
if self.bind:
return self.bind
if isinstance(clause, sql.expression.ClauseElement) and clause.bind:
return clause.bind
if mapper and mapper.persist_selectable.bind:
return mapper.persist_selectable.bind
context = []
if mapper is not None:
context.append("mapper %s" % mapper)
if clause is not None:
context.append("SQL expression")
raise sa_exc.UnboundExecutionError(
"Could not locate a bind configured on %s or this Session"
% (", ".join(context))
)
def query(self, *entities, **kwargs):
"""Return a new :class:`_query.Query` object corresponding to this
:class:`.Session`."""
return self._query_cls(entities, self, **kwargs)
@property
@util.contextmanager
def no_autoflush(self):
"""Return a context manager that disables autoflush.
e.g.::
with session.no_autoflush:
some_object = SomeClass()
session.add(some_object)
# won't autoflush
some_object.related_thing = session.query(SomeRelated).first()
Operations that proceed within the ``with:`` block
will not be subject to flushes occurring upon query
access. This is useful when initializing a series
of objects which involve existing database queries,
where the uncompleted object should not yet be flushed.
"""
autoflush = self.autoflush
self.autoflush = False
try:
yield self
finally:
self.autoflush = autoflush
def _autoflush(self):
if self.autoflush and not self._flushing:
try:
self.flush()
except sa_exc.StatementError as e:
# note we are reraising StatementError as opposed to
# raising FlushError with "chaining" to remain compatible
# with code that catches StatementError, IntegrityError,
# etc.
e.add_detail(
"raised as a result of Query-invoked autoflush; "
"consider using a session.no_autoflush block if this "
"flush is occurring prematurely"
)
util.raise_(e, with_traceback=sys.exc_info()[2])
def refresh(
self,
instance,
attribute_names=None,
with_for_update=None,
lockmode=None,
):
"""Expire and refresh the attributes on the given instance.
A query will be issued to the database and all attributes will be
refreshed with their current database value.
Lazy-loaded relational attributes will remain lazily loaded, so that
the instance-wide refresh operation will be followed immediately by
the lazy load of that attribute.
Eagerly-loaded relational attributes will eagerly load within the
single refresh operation.
Note that a highly isolated transaction will return the same values as
were previously read in that same transaction, regardless of changes
in database state outside of that transaction - usage of
:meth:`~Session.refresh` usually only makes sense if non-ORM SQL
statement were emitted in the ongoing transaction, or if autocommit
mode is turned on.
:param attribute_names: optional. An iterable collection of
string attribute names indicating a subset of attributes to
be refreshed.
:param with_for_update: optional boolean ``True`` indicating FOR UPDATE
should be used, or may be a dictionary containing flags to
indicate a more specific set of FOR UPDATE flags for the SELECT;
flags should match the parameters of
:meth:`_query.Query.with_for_update`.
Supersedes the :paramref:`.Session.refresh.lockmode` parameter.
.. versionadded:: 1.2
:param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query`
as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`.
Superseded by :paramref:`.Session.refresh.with_for_update`.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.expire_all`
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._expire_state(state, attribute_names)
if with_for_update == {}:
raise sa_exc.ArgumentError(
"with_for_update should be the boolean value "
"True, or a dictionary with options. "
"A blank dictionary is ambiguous."
)
if lockmode:
with_for_update = query.LockmodeArg.parse_legacy_query(lockmode)
elif with_for_update is not None:
if with_for_update is True:
with_for_update = query.LockmodeArg()
elif with_for_update:
with_for_update = query.LockmodeArg(**with_for_update)
else:
with_for_update = None
if (
loading.load_on_ident(
self.query(object_mapper(instance)),
state.key,
refresh_state=state,
with_for_update=with_for_update,
only_load_props=attribute_names,
)
is None
):
raise sa_exc.InvalidRequestError(
"Could not refresh instance '%s'" % instance_str(instance)
)
def expire_all(self):
"""Expires all persistent instances within this Session.
When any attributes on a persistent instance is next accessed,
a query will be issued using the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire individual objects and individual attributes
on those objects, use :meth:`Session.expire`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire_all` should not be needed when
autocommit is ``False``, assuming the transaction is isolated.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
"""
for state in self.identity_map.all_states():
state._expire(state.dict, self.identity_map._modified)
def expire(self, instance, attribute_names=None):
"""Expire the attributes on an instance.
Marks the attributes of an instance as out of date. When an expired
attribute is next accessed, a query will be issued to the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire all objects in the :class:`.Session` simultaneously,
use :meth:`Session.expire_all`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire` only makes sense for the specific
case that a non-ORM SQL statement was emitted in the current
transaction.
:param instance: The instance to be refreshed.
:param attribute_names: optional list of string attribute names
indicating a subset of attributes to be expired.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._expire_state(state, attribute_names)
def _expire_state(self, state, attribute_names):
self._validate_persistent(state)
if attribute_names:
state._expire_attributes(state.dict, attribute_names)
else:
# pre-fetch the full cascade since the expire is going to
# remove associations
cascaded = list(
state.manager.mapper.cascade_iterator("refresh-expire", state)
)
self._conditional_expire(state)
for o, m, st_, dct_ in cascaded:
self._conditional_expire(st_)
def _conditional_expire(self, state):
"""Expire a state if persistent, else expunge if pending"""
if state.key:
state._expire(state.dict, self.identity_map._modified)
elif state in self._new:
self._new.pop(state)
state._detach(self)
@util.deprecated(
"0.7",
"The :meth:`.Session.prune` method is deprecated along with "
":paramref:`.Session.weak_identity_map`. This method will be "
"removed in a future release.",
)
def prune(self):
"""Remove unreferenced instances cached in the identity map.
Note that this method is only meaningful if "weak_identity_map" is set
to False. The default weak identity map is self-pruning.
Removes any object in this Session's identity map that is not
referenced in user code, modified, new or scheduled for deletion.
Returns the number of objects pruned.
"""
return self.identity_map.prune()
def expunge(self, instance):
"""Remove the `instance` from this ``Session``.
This will free all internal references to the instance. Cascading
will be applied according to the *expunge* cascade rule.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
if state.session_id is not self.hash_key:
raise sa_exc.InvalidRequestError(
"Instance %s is not present in this Session" % state_str(state)
)
cascaded = list(
state.manager.mapper.cascade_iterator("expunge", state)
)
self._expunge_states([state] + [st_ for o, m, st_, dct_ in cascaded])
def _expunge_states(self, states, to_transient=False):
for state in states:
if state in self._new:
self._new.pop(state)
elif self.identity_map.contains_state(state):
self.identity_map.safe_discard(state)
self._deleted.pop(state, None)
elif self.transaction:
# state is "detached" from being deleted, but still present
# in the transaction snapshot
self.transaction._deleted.pop(state, None)
statelib.InstanceState._detach_states(
states, self, to_transient=to_transient
)
def _register_persistent(self, states):
"""Register all persistent objects from a flush.
This is used both for pending objects moving to the persistent
state as well as already persistent objects.
"""
pending_to_persistent = self.dispatch.pending_to_persistent or None
for state in states:
mapper = _state_mapper(state)
# prevent against last minute dereferences of the object
obj = state.obj()
if obj is not None:
instance_key = mapper._identity_key_from_state(state)
if (
_none_set.intersection(instance_key[1])
and not mapper.allow_partial_pks
or _none_set.issuperset(instance_key[1])
):
raise exc.FlushError(
"Instance %s has a NULL identity key. If this is an "
"auto-generated value, check that the database table "
"allows generation of new primary key values, and "
"that the mapped Column object is configured to "
"expect these generated values. Ensure also that "
"this flush() is not occurring at an inappropriate "
"time, such as within a load() event."
% state_str(state)
)
if state.key is None:
state.key = instance_key
elif state.key != instance_key:
# primary key switch. use safe_discard() in case another
# state has already replaced this one in the identity
# map (see test/orm/test_naturalpks.py ReversePKsTest)
self.identity_map.safe_discard(state)
if state in self.transaction._key_switches:
orig_key = self.transaction._key_switches[state][0]
else:
orig_key = state.key
self.transaction._key_switches[state] = (
orig_key,
instance_key,
)
state.key = instance_key
# there can be an existing state in the identity map
# that is replaced when the primary keys of two instances
# are swapped; see test/orm/test_naturalpks.py -> test_reverse
old = self.identity_map.replace(state)
if (
old is not None
and mapper._identity_key_from_state(old) == instance_key
and old.obj() is not None
):
util.warn(
"Identity map already had an identity for %s, "
"replacing it with newly flushed object. Are there "
"load operations occurring inside of an event handler "
"within the flush?" % (instance_key,)
)
state._orphaned_outside_of_session = False
statelib.InstanceState._commit_all_states(
((state, state.dict) for state in states), self.identity_map
)
self._register_altered(states)
if pending_to_persistent is not None:
for state in states.intersection(self._new):
pending_to_persistent(self, state)
# remove from new last, might be the last strong ref
for state in set(states).intersection(self._new):
self._new.pop(state)
def _register_altered(self, states):
if self._enable_transaction_accounting and self.transaction:
for state in states:
if state in self._new:
self.transaction._new[state] = True
else:
self.transaction._dirty[state] = True
def _remove_newly_deleted(self, states):
persistent_to_deleted = self.dispatch.persistent_to_deleted or None
for state in states:
if self._enable_transaction_accounting and self.transaction:
self.transaction._deleted[state] = True
if persistent_to_deleted is not None:
# get a strong reference before we pop out of
# self._deleted
obj = state.obj() # noqa
self.identity_map.safe_discard(state)
self._deleted.pop(state, None)
state._deleted = True
# can't call state._detach() here, because this state
# is still in the transaction snapshot and needs to be
# tracked as part of that
if persistent_to_deleted is not None:
persistent_to_deleted(self, state)
def add(self, instance, _warn=True):
"""Place an object in the ``Session``.
Its state will be persisted to the database on the next flush
operation.
Repeated calls to ``add()`` will be ignored. The opposite of ``add()``
is ``expunge()``.
"""
if _warn and self._warn_on_events:
self._flush_warning("Session.add()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._save_or_update_state(state)
def add_all(self, instances):
"""Add the given collection of instances to this ``Session``."""
if self._warn_on_events:
self._flush_warning("Session.add_all()")
for instance in instances:
self.add(instance, _warn=False)
def _save_or_update_state(self, state):
state._orphaned_outside_of_session = False
self._save_or_update_impl(state)
mapper = _state_mapper(state)
for o, m, st_, dct_ in mapper.cascade_iterator(
"save-update", state, halt_on=self._contains_state
):
self._save_or_update_impl(st_)
def delete(self, instance):
"""Mark an instance as deleted.
The database delete operation occurs upon ``flush()``.
"""
if self._warn_on_events:
self._flush_warning("Session.delete()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._delete_impl(state, instance, head=True)
def _delete_impl(self, state, obj, head):
if state.key is None:
if head:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" % state_str(state)
)
else:
return
to_attach = self._before_attach(state, obj)
if state in self._deleted:
return
self.identity_map.add(state)
if to_attach:
self._after_attach(state, obj)
if head:
# grab the cascades before adding the item to the deleted list
# so that autoflush does not delete the item
# the strong reference to the instance itself is significant here
cascade_states = list(
state.manager.mapper.cascade_iterator("delete", state)
)
self._deleted[state] = obj
if head:
for o, m, st_, dct_ in cascade_states:
self._delete_impl(st_, o, False)
def merge(self, instance, load=True):
"""Copy the state of a given instance into a corresponding instance
within this :class:`.Session`.
:meth:`.Session.merge` examines the primary key attributes of the
source instance, and attempts to reconcile it with an instance of the
same primary key in the session. If not found locally, it attempts
to load the object from the database based on primary key, and if
none can be located, creates a new instance. The state of each
attribute on the source instance is then copied to the target
instance. The resulting target instance is then returned by the
method; the original source instance is left unmodified, and
un-associated with the :class:`.Session` if not already.
This operation cascades to associated instances if the association is
mapped with ``cascade="merge"``.
See :ref:`unitofwork_merging` for a detailed discussion of merging.
.. versionchanged:: 1.1 - :meth:`.Session.merge` will now reconcile
pending objects with overlapping primary keys in the same way
as persistent. See :ref:`change_3601` for discussion.
:param instance: Instance to be merged.
:param load: Boolean, when False, :meth:`.merge` switches into
a "high performance" mode which causes it to forego emitting history
events as well as all database access. This flag is used for
cases such as transferring graphs of objects into a :class:`.Session`
from a second level cache, or to transfer just-loaded objects
into the :class:`.Session` owned by a worker thread or process
without re-querying the database.
The ``load=False`` use case adds the caveat that the given
object has to be in a "clean" state, that is, has no pending changes
to be flushed - even if the incoming object is detached from any
:class:`.Session`. This is so that when
the merge operation populates local attributes and
cascades to related objects and
collections, the values can be "stamped" onto the
target object as is, without generating any history or attribute
events, and without the need to reconcile the incoming data with
any existing related objects or collections that might not
be loaded. The resulting objects from ``load=False`` are always
produced as "clean", so it is only appropriate that the given objects
should be "clean" as well, else this suggests a mis-use of the
method.
.. seealso::
:func:`.make_transient_to_detached` - provides for an alternative
means of "merging" a single object into the :class:`.Session`
"""
if self._warn_on_events:
self._flush_warning("Session.merge()")
_recursive = {}
_resolve_conflict_map = {}
if load:
# flush current contents if we expect to load data
self._autoflush()
object_mapper(instance) # verify mapped
autoflush = self.autoflush
try:
self.autoflush = False
return self._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
finally:
self.autoflush = autoflush
def _merge(
self,
state,
state_dict,
load=True,
_recursive=None,
_resolve_conflict_map=None,
):
mapper = _state_mapper(state)
if state in _recursive:
return _recursive[state]
new_instance = False
key = state.key
if key is None:
if state in self._new:
util.warn(
"Instance %s is already pending in this Session yet is "
"being merged again; this is probably not what you want "
"to do" % state_str(state)
)
if not load:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects transient (i.e. unpersisted) objects. flush() "
"all changes on mapped instances before merging with "
"load=False."
)
key = mapper._identity_key_from_state(state)
key_is_persistent = attributes.NEVER_SET not in key[1] and (
not _none_set.intersection(key[1])
or (
mapper.allow_partial_pks
and not _none_set.issuperset(key[1])
)
)
else:
key_is_persistent = True
if key in self.identity_map:
try:
merged = self.identity_map[key]
except KeyError:
# object was GC'ed right as we checked for it
merged = None
else:
merged = None
if merged is None:
if key_is_persistent and key in _resolve_conflict_map:
merged = _resolve_conflict_map[key]
elif not load:
if state.modified:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects marked as 'dirty'. flush() all changes on "
"mapped instances before merging with load=False."
)
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_state.key = key
self._update_impl(merged_state)
new_instance = True
elif key_is_persistent:
merged = self.query(mapper.class_).get(key[1])
if merged is None:
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
new_instance = True
self._save_or_update_state(merged_state)
else:
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
_recursive[state] = merged
_resolve_conflict_map[key] = merged
# check that we didn't just pull the exact same
# state out.
if state is not merged_state:
# version check if applicable
if mapper.version_id_col is not None:
existing_version = mapper._get_state_attr_by_column(
state,
state_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
merged_version = mapper._get_state_attr_by_column(
merged_state,
merged_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
if (
existing_version is not attributes.PASSIVE_NO_RESULT
and merged_version is not attributes.PASSIVE_NO_RESULT
and existing_version != merged_version
):
raise exc.StaleDataError(
"Version id '%s' on merged state %s "
"does not match existing version '%s'. "
"Leave the version attribute unset when "
"merging to update the most recent version."
% (
existing_version,
state_str(merged_state),
merged_version,
)
)
merged_state.load_path = state.load_path
merged_state.load_options = state.load_options
# since we are copying load_options, we need to copy
# the callables_ that would have been generated by those
# load_options.
# assumes that the callables we put in state.callables_
# are not instance-specific (which they should not be)
merged_state._copy_callables(state)
for prop in mapper.iterate_properties:
prop.merge(
self,
state,
state_dict,
merged_state,
merged_dict,
load,
_recursive,
_resolve_conflict_map,
)
if not load:
# remove any history
merged_state._commit_all(merged_dict, self.identity_map)
if new_instance:
merged_state.manager.dispatch.load(merged_state, None)
return merged
def _validate_persistent(self, state):
if not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persistent within this Session"
% state_str(state)
)
def _save_impl(self, state):
if state.key is not None:
raise sa_exc.InvalidRequestError(
"Object '%s' already has an identity - "
"it can't be registered as pending" % state_str(state)
)
obj = state.obj()
to_attach = self._before_attach(state, obj)
if state not in self._new:
self._new[state] = obj
state.insert_order = len(self._new)
if to_attach:
self._after_attach(state, obj)
def _update_impl(self, state, revert_deletion=False):
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" % state_str(state)
)
if state._deleted:
if revert_deletion:
if not state._attached:
return
del state._deleted
else:
raise sa_exc.InvalidRequestError(
"Instance '%s' has been deleted. "
"Use the make_transient() "
"function to send this object back "
"to the transient state." % state_str(state)
)
obj = state.obj()
# check for late gc
if obj is None:
return
to_attach = self._before_attach(state, obj)
self._deleted.pop(state, None)
if revert_deletion:
self.identity_map.replace(state)
else:
self.identity_map.add(state)
if to_attach:
self._after_attach(state, obj)
elif revert_deletion:
self.dispatch.deleted_to_persistent(self, state)
def _save_or_update_impl(self, state):
if state.key is None:
self._save_impl(state)
else:
self._update_impl(state)
def enable_relationship_loading(self, obj):
"""Associate an object with this :class:`.Session` for related
object loading.
.. warning::
:meth:`.enable_relationship_loading` exists to serve special
use cases and is not recommended for general use.
Accesses of attributes mapped with :func:`_orm.relationship`
will attempt to load a value from the database using this
:class:`.Session` as the source of connectivity. The values
will be loaded based on foreign key and primary key values
present on this object - if not present, then those relationships
will be unavailable.
The object will be attached to this session, but will
**not** participate in any persistence operations; its state
for almost all purposes will remain either "transient" or
"detached", except for the case of relationship loading.
Also note that backrefs will often not work as expected.
Altering a relationship-bound attribute on the target object
may not fire off a backref event, if the effective value
is what was already loaded from a foreign-key-holding value.
The :meth:`.Session.enable_relationship_loading` method is
similar to the ``load_on_pending`` flag on :func:`_orm.relationship`.
Unlike that flag, :meth:`.Session.enable_relationship_loading` allows
an object to remain transient while still being able to load
related items.
To make a transient object associated with a :class:`.Session`
via :meth:`.Session.enable_relationship_loading` pending, add
it to the :class:`.Session` using :meth:`.Session.add` normally.
If the object instead represents an existing identity in the database,
it should be merged using :meth:`.Session.merge`.
:meth:`.Session.enable_relationship_loading` does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before flush()
proceeds. This method is not intended for general use.
.. seealso::
``load_on_pending`` at :func:`_orm.relationship` - this flag
allows per-relationship loading of many-to-ones on items that
are pending.
:func:`.make_transient_to_detached` - allows for an object to
be added to a :class:`.Session` without SQL emitted, which then
will unexpire attributes on access.
"""
state = attributes.instance_state(obj)
to_attach = self._before_attach(state, obj)
state._load_pending = True
if to_attach:
self._after_attach(state, obj)
def _before_attach(self, state, obj):
if state.session_id == self.hash_key:
return False
if state.session_id and state.session_id in _sessions:
raise sa_exc.InvalidRequestError(
"Object '%s' is already attached to session '%s' "
"(this is '%s')"
% (state_str(state), state.session_id, self.hash_key)
)
self.dispatch.before_attach(self, state)
return True
def _after_attach(self, state, obj):
state.session_id = self.hash_key
if state.modified and state._strong_obj is None:
state._strong_obj = obj
self.dispatch.after_attach(self, state)
if state.key:
self.dispatch.detached_to_persistent(self, state)
else:
self.dispatch.transient_to_pending(self, state)
def __contains__(self, instance):
"""Return True if the instance is associated with this session.
The instance may be pending or persistent within the Session for a
result of True.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
return self._contains_state(state)
def __iter__(self):
"""Iterate over all pending or persistent instances within this
Session.
"""
return iter(
list(self._new.values()) + list(self.identity_map.values())
)
def _contains_state(self, state):
return state in self._new or self.identity_map.contains_state(state)
def flush(self, objects=None):
"""Flush all the object changes to the database.
Writes out all pending object creations, deletions and modifications
to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are
automatically ordered by the Session's unit of work dependency
solver.
Database operations will be issued in the current transactional
context and do not affect the state of the transaction, unless an
error occurs, in which case the entire transaction is rolled back.
You may flush() as often as you like within a transaction to move
changes from Python to the database's transaction buffer.
For ``autocommit`` Sessions with no active manual transaction, flush()
will create a transaction on the fly that surrounds the entire set of
operations into the flush.
:param objects: Optional; restricts the flush operation to operate
only on elements that are in the given collection.
This feature is for an extremely narrow set of use cases where
particular objects may need to be operated upon before the
full flush() occurs. It is not intended for general use.
"""
if self._flushing:
raise sa_exc.InvalidRequestError("Session is already flushing")
if self._is_clean():
return
try:
self._flushing = True
self._flush(objects)
finally:
self._flushing = False
def _flush_warning(self, method):
util.warn(
"Usage of the '%s' operation is not currently supported "
"within the execution stage of the flush process. "
"Results may not be consistent. Consider using alternative "
"event listeners or connection-level operations instead." % method
)
def _is_clean(self):
return (
not self.identity_map.check_modified()
and not self._deleted
and not self._new
)
def _flush(self, objects=None):
dirty = self._dirty_states
if not dirty and not self._deleted and not self._new:
self.identity_map._modified.clear()
return
flush_context = UOWTransaction(self)
if self.dispatch.before_flush:
self.dispatch.before_flush(self, flush_context, objects)
# re-establish "dirty states" in case the listeners
# added
dirty = self._dirty_states
deleted = set(self._deleted)
new = set(self._new)
dirty = set(dirty).difference(deleted)
# create the set of all objects we want to operate upon
if objects:
# specific list passed in
objset = set()
for o in objects:
try:
state = attributes.instance_state(o)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(o), replace_context=err,
)
objset.add(state)
else:
objset = None
# store objects whose fate has been decided
processed = set()
# put all saves/updates into the flush context. detect top-level
# orphans and throw them into deleted.
if objset:
proc = new.union(dirty).intersection(objset).difference(deleted)
else:
proc = new.union(dirty).difference(deleted)
for state in proc:
is_orphan = _state_mapper(state)._is_orphan(state)
is_persistent_orphan = is_orphan and state.has_identity
if (
is_orphan
and not is_persistent_orphan
and state._orphaned_outside_of_session
):
self._expunge_states([state])
else:
_reg = flush_context.register_object(
state, isdelete=is_persistent_orphan
)
assert _reg, "Failed to add object to the flush context!"
processed.add(state)
# put all remaining deletes into the flush context.
if objset:
proc = deleted.intersection(objset).difference(processed)
else:
proc = deleted.difference(processed)
for state in proc:
_reg = flush_context.register_object(state, isdelete=True)
assert _reg, "Failed to add object to the flush context!"
if not flush_context.has_work:
return
flush_context.transaction = transaction = self.begin(
subtransactions=True
)
try:
self._warn_on_events = True
try:
flush_context.execute()
finally:
self._warn_on_events = False
self.dispatch.after_flush(self, flush_context)
flush_context.finalize_flush_changes()
if not objects and self.identity_map._modified:
len_ = len(self.identity_map._modified)
statelib.InstanceState._commit_all_states(
[
(state, state.dict)
for state in self.identity_map._modified
],
instance_dict=self.identity_map,
)
util.warn(
"Attribute history events accumulated on %d "
"previously clean instances "
"within inner-flush event handlers have been "
"reset, and will not result in database updates. "
"Consider using set_committed_value() within "
"inner-flush event handlers to avoid this warning." % len_
)
# useful assertions:
# if not objects:
# assert not self.identity_map._modified
# else:
# assert self.identity_map._modified == \
# self.identity_map._modified.difference(objects)
self.dispatch.after_flush_postexec(self, flush_context)
transaction.commit()
except:
with util.safe_reraise():
transaction.rollback(_capture_exception=True)
def bulk_save_objects(
self,
objects,
return_defaults=False,
update_changed_only=True,
preserve_order=True,
):
"""Perform a bulk save of the given list of objects.
The bulk save feature allows mapped objects to be used as the
source of simple INSERT and UPDATE operations which can be more easily
grouped together into higher performing "executemany"
operations; the extraction of data from the objects is also performed
using a lower-latency process that ignores whether or not attributes
have actually been modified in the case of UPDATEs, and also ignores
SQL expressions.
The objects as given are not added to the session and no additional
state is established on them, unless the ``return_defaults`` flag
is also set, in which case primary key attributes and server-side
default values will be populated.
.. versionadded:: 1.0.0
.. warning::
The bulk save feature allows for a lower-latency INSERT/UPDATE
of rows at the expense of most other unit-of-work features.
Features such as object management, relationship handling,
and SQL clause support are **silently omitted** in favor of raw
INSERT/UPDATES of records.
**Please read the list of caveats at** :ref:`bulk_operations`
**before using this method, and fully test and confirm the
functionality of all code developed using these systems.**
:param objects: a sequence of mapped object instances. The mapped
objects are persisted as is, and are **not** associated with the
:class:`.Session` afterwards.
For each object, whether the object is sent as an INSERT or an
UPDATE is dependent on the same rules used by the :class:`.Session`
in traditional operation; if the object has the
:attr:`.InstanceState.key`
attribute set, then the object is assumed to be "detached" and
will result in an UPDATE. Otherwise, an INSERT is used.
In the case of an UPDATE, statements are grouped based on which
attributes have changed, and are thus to be the subject of each
SET clause. If ``update_changed_only`` is False, then all
attributes present within each object are applied to the UPDATE
statement, which may help in allowing the statements to be grouped
together into a larger executemany(), and will also reduce the
overhead of checking history on attributes.
:param return_defaults: when True, rows that are missing values which
generate defaults, namely integer primary key defaults and sequences,
will be inserted **one at a time**, so that the primary key value
is available. In particular this will allow joined-inheritance
and other multi-table mappings to insert correctly without the need
to provide primary key values ahead of time; however,
:paramref:`.Session.bulk_save_objects.return_defaults` **greatly
reduces the performance gains** of the method overall.
:param update_changed_only: when True, UPDATE statements are rendered
based on those attributes in each state that have logged changes.
When False, all attributes present are rendered into the SET clause
with the exception of primary key attributes.
:param preserve_order: when True, the order of inserts and updates
matches exactly the order in which the objects are given. When
False, common types of objects are grouped into inserts
and updates, to allow for more batching opportunities.
.. versionadded:: 1.3
.. seealso::
:ref:`bulk_operations`
:meth:`.Session.bulk_insert_mappings`
:meth:`.Session.bulk_update_mappings`
"""
def key(state):
return (state.mapper, state.key is not None)
obj_states = (attributes.instance_state(obj) for obj in objects)
if not preserve_order:
obj_states = sorted(obj_states, key=key)
for (mapper, isupdate), states in itertools.groupby(obj_states, key):
self._bulk_save_mappings(
mapper,
states,
isupdate,
True,
return_defaults,
update_changed_only,
False,
)
def bulk_insert_mappings(
self, mapper, mappings, return_defaults=False, render_nulls=False
):
"""Perform a bulk insert of the given list of mapping dictionaries.
The bulk insert feature allows plain Python dictionaries to be used as
the source of simple INSERT operations which can be more easily
grouped together into higher performing "executemany"
operations. Using dictionaries, there is no "history" or session
state management features in use, reducing latency when inserting
large numbers of simple rows.
The values within the dictionaries as given are typically passed
without modification into Core :meth:`_expression.Insert` constructs,
after
organizing the values within them across the tables to which
the given mapper is mapped.
.. versionadded:: 1.0.0
.. warning::
The bulk insert feature allows for a lower-latency INSERT
of rows at the expense of most other unit-of-work features.
Features such as object management, relationship handling,
and SQL clause support are **silently omitted** in favor of raw
INSERT of records.
**Please read the list of caveats at** :ref:`bulk_operations`
**before using this method, and fully test and confirm the
functionality of all code developed using these systems.**
:param mapper: a mapped class, or the actual :class:`_orm.Mapper`
object,
representing the single kind of object represented within the mapping
list.
:param mappings: a sequence of dictionaries, each one containing the
state of the mapped row to be inserted, in terms of the attribute
names on the mapped class. If the mapping refers to multiple tables,
such as a joined-inheritance mapping, each dictionary must contain all
keys to be populated into all tables.
:param return_defaults: when True, rows that are missing values which
generate defaults, namely integer primary key defaults and sequences,
will be inserted **one at a time**, so that the primary key value
is available. In particular this will allow joined-inheritance
and other multi-table mappings to insert correctly without the need
to provide primary
key values ahead of time; however,
:paramref:`.Session.bulk_insert_mappings.return_defaults`
**greatly reduces the performance gains** of the method overall.
If the rows
to be inserted only refer to a single table, then there is no
reason this flag should be set as the returned default information
is not used.
:param render_nulls: When True, a value of ``None`` will result
in a NULL value being included in the INSERT statement, rather
than the column being omitted from the INSERT. This allows all
the rows being INSERTed to have the identical set of columns which
allows the full set of rows to be batched to the DBAPI. Normally,
each column-set that contains a different combination of NULL values
than the previous row must omit a different series of columns from
the rendered INSERT statement, which means it must be emitted as a
separate statement. By passing this flag, the full set of rows
are guaranteed to be batchable into one batch; the cost however is
that server-side defaults which are invoked by an omitted column will
be skipped, so care must be taken to ensure that these are not
necessary.
.. warning::
When this flag is set, **server side default SQL values will
not be invoked** for those columns that are inserted as NULL;
the NULL value will be sent explicitly. Care must be taken
to ensure that no server-side default functions need to be
invoked for the operation as a whole.
.. versionadded:: 1.1
.. seealso::
:ref:`bulk_operations`
:meth:`.Session.bulk_save_objects`
:meth:`.Session.bulk_update_mappings`
"""
self._bulk_save_mappings(
mapper,
mappings,
False,
False,
return_defaults,
False,
render_nulls,
)
def bulk_update_mappings(self, mapper, mappings):
"""Perform a bulk update of the given list of mapping dictionaries.
The bulk update feature allows plain Python dictionaries to be used as
the source of simple UPDATE operations which can be more easily
grouped together into higher performing "executemany"
operations. Using dictionaries, there is no "history" or session
state management features in use, reducing latency when updating
large numbers of simple rows.
.. versionadded:: 1.0.0
.. warning::
The bulk update feature allows for a lower-latency UPDATE
of rows at the expense of most other unit-of-work features.
Features such as object management, relationship handling,
and SQL clause support are **silently omitted** in favor of raw
UPDATES of records.
**Please read the list of caveats at** :ref:`bulk_operations`
**before using this method, and fully test and confirm the
functionality of all code developed using these systems.**
:param mapper: a mapped class, or the actual :class:`_orm.Mapper`
object,
representing the single kind of object represented within the mapping
list.
:param mappings: a sequence of dictionaries, each one containing the
state of the mapped row to be updated, in terms of the attribute names
on the mapped class. If the mapping refers to multiple tables, such
as a joined-inheritance mapping, each dictionary may contain keys
corresponding to all tables. All those keys which are present and
are not part of the primary key are applied to the SET clause of the
UPDATE statement; the primary key values, which are required, are
applied to the WHERE clause.
.. seealso::
:ref:`bulk_operations`
:meth:`.Session.bulk_insert_mappings`
:meth:`.Session.bulk_save_objects`
"""
self._bulk_save_mappings(
mapper, mappings, True, False, False, False, False
)
def _bulk_save_mappings(
self,
mapper,
mappings,
isupdate,
isstates,
return_defaults,
update_changed_only,
render_nulls,
):
mapper = _class_to_mapper(mapper)
self._flushing = True
transaction = self.begin(subtransactions=True)
try:
if isupdate:
persistence._bulk_update(
mapper,
mappings,
transaction,
isstates,
update_changed_only,
)
else:
persistence._bulk_insert(
mapper,
mappings,
transaction,
isstates,
return_defaults,
render_nulls,
)
transaction.commit()
except:
with util.safe_reraise():
transaction.rollback(_capture_exception=True)
finally:
self._flushing = False
@util.deprecated_params(
passive=(
"0.8",
"The :paramref:`.Session.is_modified.passive` flag is deprecated "
"and will be removed in a future release. The flag is no longer "
"used and is ignored.",
)
)
def is_modified(self, instance, include_collections=True, passive=None):
r"""Return ``True`` if the given instance has locally
modified attributes.
This method retrieves the history for each instrumented
attribute on the instance and performs a comparison of the current
value to its previously committed value, if any.
It is in effect a more expensive and accurate
version of checking for the given instance in the
:attr:`.Session.dirty` collection; a full test for
each attribute's net "dirty" status is performed.
E.g.::
return session.is_modified(someobject)
A few caveats to this method apply:
* Instances present in the :attr:`.Session.dirty` collection may
report ``False`` when tested with this method. This is because
the object may have received change events via attribute mutation,
thus placing it in :attr:`.Session.dirty`, but ultimately the state
is the same as that loaded from the database, resulting in no net
change here.
* Scalar attributes may not have recorded the previously set
value when a new value was applied, if the attribute was not loaded,
or was expired, at the time the new value was received - in these
cases, the attribute is assumed to have a change, even if there is
ultimately no net change against its database value. SQLAlchemy in
most cases does not need the "old" value when a set event occurs, so
it skips the expense of a SQL call if the old value isn't present,
based on the assumption that an UPDATE of the scalar value is
usually needed, and in those few cases where it isn't, is less
expensive on average than issuing a defensive SELECT.
The "old" value is fetched unconditionally upon set only if the
attribute container has the ``active_history`` flag set to ``True``.
This flag is set typically for primary key attributes and scalar
object references that are not a simple many-to-one. To set this
flag for any arbitrary mapped column, use the ``active_history``
argument with :func:`.column_property`.
:param instance: mapped instance to be tested for pending changes.
:param include_collections: Indicates if multivalued collections
should be included in the operation. Setting this to ``False`` is a
way to detect only local-column based properties (i.e. scalar columns
or many-to-one foreign keys) that would result in an UPDATE for this
instance upon flush.
:param passive: not used
"""
state = object_state(instance)
if not state.modified:
return False
dict_ = state.dict
for attr in state.manager.attributes:
if (
not include_collections
and hasattr(attr.impl, "get_collection")
) or not hasattr(attr.impl, "get_history"):
continue
(added, unchanged, deleted) = attr.impl.get_history(
state, dict_, passive=attributes.NO_CHANGE
)
if added or deleted:
return True
else:
return False
@property
def is_active(self):
"""True if this :class:`.Session` is in "transaction mode" and
is not in "partial rollback" state.
The :class:`.Session` in its default mode of ``autocommit=False``
is essentially always in "transaction mode", in that a
:class:`.SessionTransaction` is associated with it as soon as
it is instantiated. This :class:`.SessionTransaction` is immediately
replaced with a new one as soon as it is ended, due to a rollback,
commit, or close operation.
"Transaction mode" does *not* indicate whether
or not actual database connection resources are in use; the
:class:`.SessionTransaction` object coordinates among zero or more
actual database transactions, and starts out with none, accumulating
individual DBAPI connections as different data sources are used
within its scope. The best way to track when a particular
:class:`.Session` has actually begun to use DBAPI resources is to
implement a listener using the :meth:`.SessionEvents.after_begin`
method, which will deliver both the :class:`.Session` as well as the
target :class:`_engine.Connection` to a user-defined event listener.
The "partial rollback" state refers to when an "inner" transaction,
typically used during a flush, encounters an error and emits a
rollback of the DBAPI connection. At this point, the
:class:`.Session` is in "partial rollback" and awaits for the user to
call :meth:`.Session.rollback`, in order to close out the
transaction stack. It is in this "partial rollback" period that the
:attr:`.is_active` flag returns False. After the call to
:meth:`.Session.rollback`, the :class:`.SessionTransaction` is
replaced with a new one and :attr:`.is_active` returns ``True`` again.
When a :class:`.Session` is used in ``autocommit=True`` mode, the
:class:`.SessionTransaction` is only instantiated within the scope
of a flush call, or when :meth:`.Session.begin` is called. So
:attr:`.is_active` will always be ``False`` outside of a flush or
:meth:`.Session.begin` block in this mode, and will be ``True``
within the :meth:`.Session.begin` block as long as it doesn't enter
"partial rollback" state.
From all the above, it follows that the only purpose to this flag is
for application frameworks that wish to detect if a "rollback" is
necessary within a generic error handling routine, for
:class:`.Session` objects that would otherwise be in
"partial rollback" mode. In a typical integration case, this is also
not necessary as it is standard practice to emit
:meth:`.Session.rollback` unconditionally within the outermost
exception catch.
To track the transactional state of a :class:`.Session` fully,
use event listeners, primarily the :meth:`.SessionEvents.after_begin`,
:meth:`.SessionEvents.after_commit`,
:meth:`.SessionEvents.after_rollback` and related events.
"""
return self.transaction and self.transaction.is_active
identity_map = None
"""A mapping of object identities to objects themselves.
Iterating through ``Session.identity_map.values()`` provides
access to the full set of persistent objects (i.e., those
that have row identity) currently in the session.
.. seealso::
:func:`.identity_key` - helper function to produce the keys used
in this dictionary.
"""
@property
def _dirty_states(self):
"""The set of all persistent states considered dirty.
This method returns all states that were modified including
those that were possibly deleted.
"""
return self.identity_map._dirty_states()
@property
def dirty(self):
"""The set of all persistent instances considered dirty.
E.g.::
some_mapped_object in session.dirty
Instances are considered dirty when they were modified but not
deleted.
Note that this 'dirty' calculation is 'optimistic'; most
attribute-setting or collection modification operations will
mark an instance as 'dirty' and place it in this set, even if
there is no net change to the attribute's value. At flush
time, the value of each attribute is compared to its
previously saved value, and if there's no net change, no SQL
operation will occur (this is a more expensive operation so
it's only done at flush time).
To check if an instance has actionable net changes to its
attributes, use the :meth:`.Session.is_modified` method.
"""
return util.IdentitySet(
[
state.obj()
for state in self._dirty_states
if state not in self._deleted
]
)
@property
def deleted(self):
"The set of all instances marked as 'deleted' within this ``Session``"
return util.IdentitySet(list(self._deleted.values()))
@property
def new(self):
"The set of all instances marked as 'new' within this ``Session``."
return util.IdentitySet(list(self._new.values()))
class sessionmaker(_SessionClassMethods):
"""A configurable :class:`.Session` factory.
The :class:`.sessionmaker` factory generates new
:class:`.Session` objects when called, creating them given
the configurational arguments established here.
e.g.::
# global scope
Session = sessionmaker(autoflush=False)
# later, in a local scope, create and use a session:
sess = Session()
Any keyword arguments sent to the constructor itself will override the
"configured" keywords::
Session = sessionmaker()
# bind an individual session to a connection
sess = Session(bind=connection)
The class also includes a method :meth:`.configure`, which can
be used to specify additional keyword arguments to the factory, which
will take effect for subsequent :class:`.Session` objects generated.
This is usually used to associate one or more :class:`_engine.Engine`
objects
with an existing :class:`.sessionmaker` factory before it is first
used::
# application starts
Session = sessionmaker()
# ... later
engine = create_engine('sqlite:///foo.db')
Session.configure(bind=engine)
sess = Session()
.. seealso:
:ref:`session_getting` - introductory text on creating
sessions using :class:`.sessionmaker`.
"""
def __init__(
self,
bind=None,
class_=Session,
autoflush=True,
autocommit=False,
expire_on_commit=True,
info=None,
**kw
):
r"""Construct a new :class:`.sessionmaker`.
All arguments here except for ``class_`` correspond to arguments
accepted by :class:`.Session` directly. See the
:meth:`.Session.__init__` docstring for more details on parameters.
:param bind: a :class:`_engine.Engine` or other :class:`.Connectable`
with
which newly created :class:`.Session` objects will be associated.
:param class\_: class to use in order to create new :class:`.Session`
objects. Defaults to :class:`.Session`.
:param autoflush: The autoflush setting to use with newly created
:class:`.Session` objects.
:param autocommit: The autocommit setting to use with newly created
:class:`.Session` objects.
:param expire_on_commit=True: the expire_on_commit setting to use
with newly created :class:`.Session` objects.
:param info: optional dictionary of information that will be available
via :attr:`.Session.info`. Note this dictionary is *updated*, not
replaced, when the ``info`` parameter is specified to the specific
:class:`.Session` construction operation.
.. versionadded:: 0.9.0
:param \**kw: all other keyword arguments are passed to the
constructor of newly created :class:`.Session` objects.
"""
kw["bind"] = bind
kw["autoflush"] = autoflush
kw["autocommit"] = autocommit
kw["expire_on_commit"] = expire_on_commit
if info is not None:
kw["info"] = info
self.kw = kw
# make our own subclass of the given class, so that
# events can be associated with it specifically.
self.class_ = type(class_.__name__, (class_,), {})
def __call__(self, **local_kw):
"""Produce a new :class:`.Session` object using the configuration
established in this :class:`.sessionmaker`.
In Python, the ``__call__`` method is invoked on an object when
it is "called" in the same way as a function::
Session = sessionmaker()
session = Session() # invokes sessionmaker.__call__()
"""
for k, v in self.kw.items():
if k == "info" and "info" in local_kw:
d = v.copy()
d.update(local_kw["info"])
local_kw["info"] = d
else:
local_kw.setdefault(k, v)
return self.class_(**local_kw)
def configure(self, **new_kw):
"""(Re)configure the arguments for this sessionmaker.
e.g.::
Session = sessionmaker()
Session.configure(bind=create_engine('sqlite://'))
"""
self.kw.update(new_kw)
def __repr__(self):
return "%s(class_=%r, %s)" % (
self.__class__.__name__,
self.class_.__name__,
", ".join("%s=%r" % (k, v) for k, v in self.kw.items()),
)
def close_all_sessions():
"""Close all sessions in memory.
This function consults a global registry of all :class:`.Session` objects
and calls :meth:`.Session.close` on them, which resets them to a clean
state.
This function is not for general use but may be useful for test suites
within the teardown scheme.
.. versionadded:: 1.3
"""
for sess in _sessions.values():
sess.close()
def make_transient(instance):
"""Alter the state of the given instance so that it is :term:`transient`.
.. note::
:func:`.make_transient` is a special-case function for
advanced use cases only.
The given mapped instance is assumed to be in the :term:`persistent` or
:term:`detached` state. The function will remove its association with any
:class:`.Session` as well as its :attr:`.InstanceState.identity`. The
effect is that the object will behave as though it were newly constructed,
except retaining any attribute / collection values that were loaded at the
time of the call. The :attr:`.InstanceState.deleted` flag is also reset
if this object had been deleted as a result of using
:meth:`.Session.delete`.
.. warning::
:func:`.make_transient` does **not** "unexpire" or otherwise eagerly
load ORM-mapped attributes that are not currently loaded at the time
the function is called. This includes attributes which:
* were expired via :meth:`.Session.expire`
* were expired as the natural effect of committing a session
transaction, e.g. :meth:`.Session.commit`
* are normally :term:`lazy loaded` but are not currently loaded
* are "deferred" via :ref:`deferred` and are not yet loaded
* were not present in the query which loaded this object, such as that
which is common in joined table inheritance and other scenarios.
After :func:`.make_transient` is called, unloaded attributes such
as those above will normally resolve to the value ``None`` when
accessed, or an empty collection for a collection-oriented attribute.
As the object is transient and un-associated with any database
identity, it will no longer retrieve these values.
.. seealso::
:func:`.make_transient_to_detached`
"""
state = attributes.instance_state(instance)
s = _state_session(state)
if s:
s._expunge_states([state])
# remove expired state
state.expired_attributes.clear()
# remove deferred callables
if state.callables:
del state.callables
if state.key:
del state.key
if state._deleted:
del state._deleted
def make_transient_to_detached(instance):
"""Make the given transient instance :term:`detached`.
.. note::
:func:`.make_transient_to_detached` is a special-case function for
advanced use cases only.
All attribute history on the given instance
will be reset as though the instance were freshly loaded
from a query. Missing attributes will be marked as expired.
The primary key attributes of the object, which are required, will be made
into the "key" of the instance.
The object can then be added to a session, or merged
possibly with the load=False flag, at which point it will look
as if it were loaded that way, without emitting SQL.
This is a special use case function that differs from a normal
call to :meth:`.Session.merge` in that a given persistent state
can be manufactured without any SQL calls.
.. versionadded:: 0.9.5
.. seealso::
:func:`.make_transient`
:meth:`.Session.enable_relationship_loading`
"""
state = attributes.instance_state(instance)
if state.session_id or state.key:
raise sa_exc.InvalidRequestError("Given object must be transient")
state.key = state.mapper._identity_key_from_state(state)
if state._deleted:
del state._deleted
state._commit_all(state.dict)
state._expire_attributes(state.dict, state.unloaded_expirable)
def object_session(instance):
"""Return the :class:`.Session` to which the given instance belongs.
This is essentially the same as the :attr:`.InstanceState.session`
accessor. See that attribute for details.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
else:
return _state_session(state)
_new_sessionid = util.counter()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/__init__.py
|
# orm/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Functional constructs for ORM configuration.
See the SQLAlchemy object relational tutorial and mapper configuration
documentation for an overview of how this module is used.
"""
from . import exc # noqa
from . import mapper as mapperlib # noqa
from . import strategy_options
from .deprecated_interfaces import AttributeExtension # noqa
from .deprecated_interfaces import MapperExtension # noqa
from .deprecated_interfaces import SessionExtension # noqa
from .descriptor_props import ComparableProperty # noqa
from .descriptor_props import CompositeProperty # noqa
from .descriptor_props import SynonymProperty # noqa
from .interfaces import EXT_CONTINUE # noqa
from .interfaces import EXT_SKIP # noqa
from .interfaces import EXT_STOP # noqa
from .interfaces import PropComparator # noqa
from .mapper import _mapper_registry
from .mapper import class_mapper # noqa
from .mapper import configure_mappers # noqa
from .mapper import Mapper # noqa
from .mapper import reconstructor # noqa
from .mapper import validates # noqa
from .properties import ColumnProperty # noqa
from .query import AliasOption # noqa
from .query import Bundle # noqa
from .query import Query # noqa
from .relationships import foreign # noqa
from .relationships import RelationshipProperty # noqa
from .relationships import remote # noqa
from .scoping import scoped_session # noqa
from .session import close_all_sessions # noqa
from .session import make_transient # noqa
from .session import make_transient_to_detached # noqa
from .session import object_session # noqa
from .session import Session # noqa
from .session import sessionmaker # noqa
from .strategy_options import Load # noqa
from .util import aliased # noqa
from .util import join # noqa
from .util import object_mapper # noqa
from .util import outerjoin # noqa
from .util import polymorphic_union # noqa
from .util import was_deleted # noqa
from .util import with_parent # noqa
from .util import with_polymorphic # noqa
from .. import sql as _sql
from .. import util as _sa_util
from ..util.langhelpers import public_factory
def create_session(bind=None, **kwargs):
r"""Create a new :class:`.Session`
with no automation enabled by default.
This function is used primarily for testing. The usual
route to :class:`.Session` creation is via its constructor
or the :func:`.sessionmaker` function.
:param bind: optional, a single Connectable to use for all
database access in the created
:class:`~sqlalchemy.orm.session.Session`.
:param \*\*kwargs: optional, passed through to the
:class:`.Session` constructor.
:returns: an :class:`~sqlalchemy.orm.session.Session` instance
The defaults of create_session() are the opposite of that of
:func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are
False, ``autocommit`` is True. In this sense the session acts
more like the "classic" SQLAlchemy 0.3 session with these.
Usage::
>>> from sqlalchemy.orm import create_session
>>> session = create_session()
It is recommended to use :func:`sessionmaker` instead of
create_session().
"""
kwargs.setdefault("autoflush", False)
kwargs.setdefault("autocommit", True)
kwargs.setdefault("expire_on_commit", False)
return Session(bind=bind, **kwargs)
relationship = public_factory(RelationshipProperty, ".orm.relationship")
def relation(*arg, **kw):
"""A synonym for :func:`relationship`."""
return relationship(*arg, **kw)
def dynamic_loader(argument, **kw):
"""Construct a dynamically-loading mapper property.
This is essentially the same as
using the ``lazy='dynamic'`` argument with :func:`relationship`::
dynamic_loader(SomeClass)
# is the same as
relationship(SomeClass, lazy="dynamic")
See the section :ref:`dynamic_relationship` for more details
on dynamic loading.
"""
kw["lazy"] = "dynamic"
return relationship(argument, **kw)
column_property = public_factory(ColumnProperty, ".orm.column_property")
composite = public_factory(CompositeProperty, ".orm.composite")
def backref(name, **kwargs):
"""Create a back reference with explicit keyword arguments, which are the
same arguments one can send to :func:`relationship`.
Used with the ``backref`` keyword argument to :func:`relationship` in
place of a string argument, e.g.::
'items':relationship(
SomeItem, backref=backref('parent', lazy='subquery'))
.. seealso::
:ref:`relationships_backref`
"""
return (name, kwargs)
def deferred(*columns, **kw):
r"""Indicate a column-based mapped attribute that by default will
not load unless accessed.
:param \*columns: columns to be mapped. This is typically a single
:class:`_schema.Column` object,
however a collection is supported in order
to support multiple columns mapped under the same attribute.
:param \**kw: additional keyword arguments passed to
:class:`.ColumnProperty`.
.. seealso::
:ref:`deferred`
"""
return ColumnProperty(deferred=True, *columns, **kw)
def query_expression(default_expr=_sql.null()):
"""Indicate an attribute that populates from a query-time SQL expression.
:param default_expr: Optional SQL expression object that will be used in
all cases if not assigned later with :func:`_orm.with_expression`.
E.g.::
from sqlalchemy.sql import literal
class C(Base):
#...
my_expr = query_expression(literal(1))
.. versionadded:: 1.3.18
.. versionadded:: 1.2
.. seealso::
:ref:`mapper_querytime_expression`
"""
prop = ColumnProperty(default_expr)
prop.strategy_key = (("query_expression", True),)
return prop
mapper = public_factory(Mapper, ".orm.mapper")
synonym = public_factory(SynonymProperty, ".orm.synonym")
comparable_property = public_factory(
ComparableProperty, ".orm.comparable_property"
)
@_sa_util.deprecated(
"0.7",
message=":func:`.compile_mappers` is deprecated and will be removed "
"in a future release. Please use :func:`.configure_mappers`",
)
def compile_mappers():
"""Initialize the inter-mapper relationships of all mappers that have
been defined.
"""
configure_mappers()
def clear_mappers():
"""Remove all mappers from all classes.
This function removes all instrumentation from classes and disposes
of their associated mappers. Once called, the classes are unmapped
and can be later re-mapped with new mappers.
:func:`.clear_mappers` is *not* for normal use, as there is literally no
valid usage for it outside of very specific testing scenarios. Normally,
mappers are permanent structural components of user-defined classes, and
are never discarded independently of their class. If a mapped class
itself is garbage collected, its mapper is automatically disposed of as
well. As such, :func:`.clear_mappers` is only for usage in test suites
that re-use the same classes with different mappings, which is itself an
extremely rare use case - the only such use case is in fact SQLAlchemy's
own test suite, and possibly the test suites of other ORM extension
libraries which intend to test various combinations of mapper construction
upon a fixed set of classes.
"""
mapperlib._CONFIGURE_MUTEX.acquire()
try:
while _mapper_registry:
try:
# can't even reliably call list(weakdict) in jython
mapper, b = _mapper_registry.popitem()
mapper.dispose()
except KeyError:
pass
finally:
mapperlib._CONFIGURE_MUTEX.release()
joinedload = strategy_options.joinedload._unbound_fn
joinedload_all = strategy_options.joinedload._unbound_all_fn
contains_eager = strategy_options.contains_eager._unbound_fn
defer = strategy_options.defer._unbound_fn
undefer = strategy_options.undefer._unbound_fn
undefer_group = strategy_options.undefer_group._unbound_fn
with_expression = strategy_options.with_expression._unbound_fn
load_only = strategy_options.load_only._unbound_fn
lazyload = strategy_options.lazyload._unbound_fn
lazyload_all = strategy_options.lazyload_all._unbound_all_fn
subqueryload = strategy_options.subqueryload._unbound_fn
subqueryload_all = strategy_options.subqueryload_all._unbound_all_fn
selectinload = strategy_options.selectinload._unbound_fn
selectinload_all = strategy_options.selectinload_all._unbound_all_fn
immediateload = strategy_options.immediateload._unbound_fn
noload = strategy_options.noload._unbound_fn
raiseload = strategy_options.raiseload._unbound_fn
defaultload = strategy_options.defaultload._unbound_fn
selectin_polymorphic = strategy_options.selectin_polymorphic._unbound_fn
def eagerload(*args, **kwargs):
"""A synonym for :func:`joinedload()`."""
return joinedload(*args, **kwargs)
def eagerload_all(*args, **kwargs):
"""A synonym for :func:`joinedload_all()`"""
return joinedload_all(*args, **kwargs)
contains_alias = public_factory(AliasOption, ".orm.contains_alias")
def __go(lcls):
global __all__
from .. import util as sa_util # noqa
from . import dynamic # noqa
from . import events # noqa
from . import loading # noqa
import inspect as _inspect
__all__ = sorted(
name
for name, obj in lcls.items()
if not (name.startswith("_") or _inspect.ismodule(obj))
)
_sa_util.dependencies.resolve_all("sqlalchemy.orm")
_sa_util.dependencies.resolve_all("sqlalchemy.ext")
__go(locals())
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/dynamic.py
|
# orm/dynamic.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Dynamic collection API.
Dynamic collections act like Query() objects for read operations and support
basic add/delete mutation.
"""
from . import attributes
from . import exc as orm_exc
from . import interfaces
from . import object_mapper
from . import object_session
from . import properties
from . import strategies
from . import util as orm_util
from .query import Query
from .. import exc
from .. import log
from .. import util
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="dynamic")
class DynaLoader(strategies.AbstractRelationshipLoader):
def init_class_attribute(self, mapper):
self.is_class_level = True
if not self.uselist:
raise exc.InvalidRequestError(
"On relationship %s, 'dynamic' loaders cannot be used with "
"many-to-one/one-to-one relationships and/or "
"uselist=False." % self.parent_property
)
elif self.parent_property.direction not in (
interfaces.ONETOMANY,
interfaces.MANYTOMANY,
):
util.warn(
"On relationship %s, 'dynamic' loaders cannot be used with "
"many-to-one/one-to-one relationships and/or "
"uselist=False. This warning will be an exception in a "
"future release." % self.parent_property
)
strategies._register_attribute(
self.parent_property,
mapper,
useobject=True,
impl_class=DynamicAttributeImpl,
target_mapper=self.parent_property.mapper,
order_by=self.parent_property.order_by,
query_class=self.parent_property.query_class,
)
class DynamicAttributeImpl(attributes.AttributeImpl):
uses_objects = True
default_accepts_scalar_loader = False
supports_population = False
collection = False
dynamic = True
def __init__(
self,
class_,
key,
typecallable,
dispatch,
target_mapper,
order_by,
query_class=None,
**kw
):
super(DynamicAttributeImpl, self).__init__(
class_, key, typecallable, dispatch, **kw
)
self.target_mapper = target_mapper
self.order_by = order_by
if not query_class:
self.query_class = AppenderQuery
elif AppenderMixin in query_class.mro():
self.query_class = query_class
else:
self.query_class = mixin_user_query(query_class)
def get(self, state, dict_, passive=attributes.PASSIVE_OFF):
if not passive & attributes.SQL_OK:
return self._get_collection_history(
state, attributes.PASSIVE_NO_INITIALIZE
).added_items
else:
return self.query_class(self, state)
def get_collection(
self,
state,
dict_,
user_data=None,
passive=attributes.PASSIVE_NO_INITIALIZE,
):
if not passive & attributes.SQL_OK:
return self._get_collection_history(state, passive).added_items
else:
history = self._get_collection_history(state, passive)
return history.added_plus_unchanged
@util.memoized_property
def _append_token(self):
return attributes.Event(self, attributes.OP_APPEND)
@util.memoized_property
def _remove_token(self):
return attributes.Event(self, attributes.OP_REMOVE)
def fire_append_event(
self, state, dict_, value, initiator, collection_history=None
):
if collection_history is None:
collection_history = self._modified_event(state, dict_)
collection_history.add_added(value)
for fn in self.dispatch.append:
value = fn(state, value, initiator or self._append_token)
if self.trackparent and value is not None:
self.sethasparent(attributes.instance_state(value), state, True)
def fire_remove_event(
self, state, dict_, value, initiator, collection_history=None
):
if collection_history is None:
collection_history = self._modified_event(state, dict_)
collection_history.add_removed(value)
if self.trackparent and value is not None:
self.sethasparent(attributes.instance_state(value), state, False)
for fn in self.dispatch.remove:
fn(state, value, initiator or self._remove_token)
def _modified_event(self, state, dict_):
if self.key not in state.committed_state:
state.committed_state[self.key] = CollectionHistory(self, state)
state._modified_event(dict_, self, attributes.NEVER_SET)
# this is a hack to allow the fixtures.ComparableEntity fixture
# to work
dict_[self.key] = True
return state.committed_state[self.key]
def set(
self,
state,
dict_,
value,
initiator=None,
passive=attributes.PASSIVE_OFF,
check_old=None,
pop=False,
_adapt=True,
):
if initiator and initiator.parent_token is self.parent_token:
return
if pop and value is None:
return
iterable = value
new_values = list(iterable)
if state.has_identity:
old_collection = util.IdentitySet(self.get(state, dict_))
collection_history = self._modified_event(state, dict_)
if not state.has_identity:
old_collection = collection_history.added_items
else:
old_collection = old_collection.union(
collection_history.added_items
)
idset = util.IdentitySet
constants = old_collection.intersection(new_values)
additions = idset(new_values).difference(constants)
removals = old_collection.difference(constants)
for member in new_values:
if member in additions:
self.fire_append_event(
state,
dict_,
member,
None,
collection_history=collection_history,
)
for member in removals:
self.fire_remove_event(
state,
dict_,
member,
None,
collection_history=collection_history,
)
def delete(self, *args, **kwargs):
raise NotImplementedError()
def set_committed_value(self, state, dict_, value):
raise NotImplementedError(
"Dynamic attributes don't support " "collection population."
)
def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
c = self._get_collection_history(state, passive)
return c.as_history()
def get_all_pending(
self, state, dict_, passive=attributes.PASSIVE_NO_INITIALIZE
):
c = self._get_collection_history(state, passive)
return [(attributes.instance_state(x), x) for x in c.all_items]
def _get_collection_history(self, state, passive=attributes.PASSIVE_OFF):
if self.key in state.committed_state:
c = state.committed_state[self.key]
else:
c = CollectionHistory(self, state)
if state.has_identity and (passive & attributes.INIT_OK):
return CollectionHistory(self, state, apply_to=c)
else:
return c
def append(
self, state, dict_, value, initiator, passive=attributes.PASSIVE_OFF
):
if initiator is not self:
self.fire_append_event(state, dict_, value, initiator)
def remove(
self, state, dict_, value, initiator, passive=attributes.PASSIVE_OFF
):
if initiator is not self:
self.fire_remove_event(state, dict_, value, initiator)
def pop(
self, state, dict_, value, initiator, passive=attributes.PASSIVE_OFF
):
self.remove(state, dict_, value, initiator, passive=passive)
class AppenderMixin(object):
query_class = None
def __init__(self, attr, state):
super(AppenderMixin, self).__init__(attr.target_mapper, None)
self.instance = instance = state.obj()
self.attr = attr
mapper = object_mapper(instance)
prop = mapper._props[self.attr.key]
if prop.secondary is not None:
# this is a hack right now. The Query only knows how to
# make subsequent joins() without a given left-hand side
# from self._from_obj[0]. We need to ensure prop.secondary
# is in the FROM. So we purposly put the mapper selectable
# in _from_obj[0] to ensure a user-defined join() later on
# doesn't fail, and secondary is then in _from_obj[1].
self._from_obj = (prop.mapper.selectable, prop.secondary)
self._criterion = prop._with_parent(instance, alias_secondary=False)
if self.attr.order_by:
self._order_by = self.attr.order_by
def session(self):
sess = object_session(self.instance)
if (
sess is not None
and self.autoflush
and sess.autoflush
and self.instance in sess
):
sess.flush()
if not orm_util.has_identity(self.instance):
return None
else:
return sess
session = property(session, lambda s, x: None)
def __iter__(self):
sess = self.session
if sess is None:
return iter(
self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE,
).added_items
)
else:
return iter(self._clone(sess))
def __getitem__(self, index):
sess = self.session
if sess is None:
return self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE,
).indexed(index)
else:
return self._clone(sess).__getitem__(index)
def count(self):
sess = self.session
if sess is None:
return len(
self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE,
).added_items
)
else:
return self._clone(sess).count()
def _clone(self, sess=None):
# note we're returning an entirely new Query class instance
# here without any assignment capabilities; the class of this
# query is determined by the session.
instance = self.instance
if sess is None:
sess = object_session(instance)
if sess is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session, and no "
"contextual session is established; lazy load operation "
"of attribute '%s' cannot proceed"
% (orm_util.instance_str(instance), self.attr.key)
)
if self.query_class:
query = self.query_class(self.attr.target_mapper, session=sess)
else:
query = sess.query(self.attr.target_mapper)
query._criterion = self._criterion
query._from_obj = self._from_obj
query._order_by = self._order_by
return query
def extend(self, iterator):
for item in iterator:
self.attr.append(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance),
item,
None,
)
def append(self, item):
self.attr.append(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance),
item,
None,
)
def remove(self, item):
self.attr.remove(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance),
item,
None,
)
class AppenderQuery(AppenderMixin, Query):
"""A dynamic query that supports basic collection storage operations."""
def mixin_user_query(cls):
"""Return a new class with AppenderQuery functionality layered over."""
name = "Appender" + cls.__name__
return type(name, (AppenderMixin, cls), {"query_class": cls})
class CollectionHistory(object):
"""Overrides AttributeHistory to receive append/remove events directly."""
def __init__(self, attr, state, apply_to=None):
if apply_to:
coll = AppenderQuery(attr, state).autoflush(False)
self.unchanged_items = util.OrderedIdentitySet(coll)
self.added_items = apply_to.added_items
self.deleted_items = apply_to.deleted_items
self._reconcile_collection = True
else:
self.deleted_items = util.OrderedIdentitySet()
self.added_items = util.OrderedIdentitySet()
self.unchanged_items = util.OrderedIdentitySet()
self._reconcile_collection = False
@property
def added_plus_unchanged(self):
return list(self.added_items.union(self.unchanged_items))
@property
def all_items(self):
return list(
self.added_items.union(self.unchanged_items).union(
self.deleted_items
)
)
def as_history(self):
if self._reconcile_collection:
added = self.added_items.difference(self.unchanged_items)
deleted = self.deleted_items.intersection(self.unchanged_items)
unchanged = self.unchanged_items.difference(deleted)
else:
added, unchanged, deleted = (
self.added_items,
self.unchanged_items,
self.deleted_items,
)
return attributes.History(list(added), list(unchanged), list(deleted))
def indexed(self, index):
return list(self.added_items)[index]
def add_added(self, value):
self.added_items.add(value)
def add_removed(self, value):
if value in self.added_items:
self.added_items.remove(value)
else:
self.deleted_items.add(value)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/exc.py
|
# orm/exc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQLAlchemy ORM exceptions."""
from .. import exc as sa_exc
from .. import util
NO_STATE = (AttributeError, KeyError)
"""Exception types that may be raised by instrumentation implementations."""
class StaleDataError(sa_exc.SQLAlchemyError):
"""An operation encountered database state that is unaccounted for.
Conditions which cause this to happen include:
* A flush may have attempted to update or delete rows
and an unexpected number of rows were matched during
the UPDATE or DELETE statement. Note that when
version_id_col is used, rows in UPDATE or DELETE statements
are also matched against the current known version
identifier.
* A mapped object with version_id_col was refreshed,
and the version number coming back from the database does
not match that of the object itself.
* A object is detached from its parent object, however
the object was previously attached to a different parent
identity which was garbage collected, and a decision
cannot be made if the new parent was really the most
recent "parent".
"""
ConcurrentModificationError = StaleDataError
class FlushError(sa_exc.SQLAlchemyError):
"""A invalid condition was detected during flush()."""
class UnmappedError(sa_exc.InvalidRequestError):
"""Base for exceptions that involve expected mappings not present."""
class ObjectDereferencedError(sa_exc.SQLAlchemyError):
"""An operation cannot complete due to an object being garbage
collected.
"""
class DetachedInstanceError(sa_exc.SQLAlchemyError):
"""An attempt to access unloaded attributes on a
mapped instance that is detached."""
code = "bhk3"
class UnmappedInstanceError(UnmappedError):
"""An mapping operation was requested for an unknown instance."""
@util.dependencies("sqlalchemy.orm.base")
def __init__(self, base, obj, msg=None):
if not msg:
try:
base.class_mapper(type(obj))
name = _safe_cls_name(type(obj))
msg = (
"Class %r is mapped, but this instance lacks "
"instrumentation. This occurs when the instance "
"is created before sqlalchemy.orm.mapper(%s) "
"was called." % (name, name)
)
except UnmappedClassError:
msg = _default_unmapped(type(obj))
if isinstance(obj, type):
msg += (
"; was a class (%s) supplied where an instance was "
"required?" % _safe_cls_name(obj)
)
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedClassError(UnmappedError):
"""An mapping operation was requested for an unknown class."""
def __init__(self, cls, msg=None):
if not msg:
msg = _default_unmapped(cls)
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class ObjectDeletedError(sa_exc.InvalidRequestError):
"""A refresh operation failed to retrieve the database
row corresponding to an object's known primary key identity.
A refresh operation proceeds when an expired attribute is
accessed on an object, or when :meth:`_query.Query.get` is
used to retrieve an object which is, upon retrieval, detected
as expired. A SELECT is emitted for the target row
based on primary key; if no row is returned, this
exception is raised.
The true meaning of this exception is simply that
no row exists for the primary key identifier associated
with a persistent object. The row may have been
deleted, or in some cases the primary key updated
to a new value, outside of the ORM's management of the target
object.
"""
@util.dependencies("sqlalchemy.orm.base")
def __init__(self, base, state, msg=None):
if not msg:
msg = (
"Instance '%s' has been deleted, or its "
"row is otherwise not present." % base.state_str(state)
)
sa_exc.InvalidRequestError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedColumnError(sa_exc.InvalidRequestError):
"""Mapping operation was requested on an unknown column."""
class NoResultFound(sa_exc.InvalidRequestError):
"""A database result was required but none was found."""
class MultipleResultsFound(sa_exc.InvalidRequestError):
"""A single database result was required but more than one were found."""
class LoaderStrategyException(sa_exc.InvalidRequestError):
"""A loader strategy for an attribute does not exist."""
def __init__(
self,
applied_to_property_type,
requesting_property,
applies_to,
actual_strategy_type,
strategy_key,
):
if actual_strategy_type is None:
sa_exc.InvalidRequestError.__init__(
self,
"Can't find strategy %s for %s"
% (strategy_key, requesting_property),
)
else:
sa_exc.InvalidRequestError.__init__(
self,
'Can\'t apply "%s" strategy to property "%s", '
'which is a "%s"; this loader strategy is intended '
'to be used with a "%s".'
% (
util.clsname_as_plain_name(actual_strategy_type),
requesting_property,
util.clsname_as_plain_name(applied_to_property_type),
util.clsname_as_plain_name(applies_to),
),
)
def _safe_cls_name(cls):
try:
cls_name = ".".join((cls.__module__, cls.__name__))
except AttributeError:
cls_name = getattr(cls, "__name__", None)
if cls_name is None:
cls_name = repr(cls)
return cls_name
@util.dependencies("sqlalchemy.orm.base")
def _default_unmapped(base, cls):
try:
mappers = base.manager_of_class(cls).mappers
except NO_STATE:
mappers = {}
except TypeError:
mappers = {}
name = _safe_cls_name(cls)
if not mappers:
return "Class '%s' is not mapped" % name
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/collections.py
|
# orm/collections.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for collections of mapped entities.
The collections package supplies the machinery used to inform the ORM of
collection membership changes. An instrumentation via decoration approach is
used, allowing arbitrary types (including built-ins) to be used as entity
collections without requiring inheritance from a base class.
Instrumentation decoration relays membership change events to the
:class:`.CollectionAttributeImpl` that is currently managing the collection.
The decorators observe function call arguments and return values, tracking
entities entering or leaving the collection. Two decorator approaches are
provided. One is a bundle of generic decorators that map function arguments
and return values to events::
from sqlalchemy.orm.collections import collection
class MyClass(object):
# ...
@collection.adds(1)
def store(self, item):
self.data.append(item)
@collection.removes_return()
def pop(self):
return self.data.pop()
The second approach is a bundle of targeted decorators that wrap appropriate
append and remove notifiers around the mutation methods present in the
standard Python ``list``, ``set`` and ``dict`` interfaces. These could be
specified in terms of generic decorator recipes, but are instead hand-tooled
for increased efficiency. The targeted decorators occasionally implement
adapter-like behavior, such as mapping bulk-set methods (``extend``,
``update``, ``__setslice__``, etc.) into the series of atomic mutation events
that the ORM requires.
The targeted decorators are used internally for automatic instrumentation of
entity collection classes. Every collection class goes through a
transformation process roughly like so:
1. If the class is a built-in, substitute a trivial sub-class
2. Is this class already instrumented?
3. Add in generic decorators
4. Sniff out the collection interface through duck-typing
5. Add targeted decoration to any undecorated interface method
This process modifies the class at runtime, decorating methods and adding some
bookkeeping properties. This isn't possible (or desirable) for built-in
classes like ``list``, so trivial sub-classes are substituted to hold
decoration::
class InstrumentedList(list):
pass
Collection classes can be specified in ``relationship(collection_class=)`` as
types or a function that returns an instance. Collection classes are
inspected and instrumented during the mapper compilation phase. The
collection_class callable will be executed once to produce a specimen
instance, and the type of that specimen will be instrumented. Functions that
return built-in types like ``lists`` will be adapted to produce instrumented
instances.
When extending a known type like ``list``, additional decorations are not
generally not needed. Odds are, the extension method will delegate to a
method that's already instrumented. For example::
class QueueIsh(list):
def push(self, item):
self.append(item)
def shift(self):
return self.pop(0)
There's no need to decorate these methods. ``append`` and ``pop`` are already
instrumented as part of the ``list`` interface. Decorating them would fire
duplicate events, which should be avoided.
The targeted decoration tries not to rely on other methods in the underlying
collection class, but some are unavoidable. Many depend on 'read' methods
being present to properly instrument a 'write', for example, ``__setitem__``
needs ``__getitem__``. "Bulk" methods like ``update`` and ``extend`` may also
reimplemented in terms of atomic appends and removes, so the ``extend``
decoration will actually perform many ``append`` operations and not call the
underlying method at all.
Tight control over bulk operation and the firing of events is also possible by
implementing the instrumentation internally in your methods. The basic
instrumentation package works under the general assumption that collection
mutation will not raise unusual exceptions. If you want to closely
orchestrate append and remove events with exception management, internal
instrumentation may be the answer. Within your method,
``collection_adapter(self)`` will retrieve an object that you can use for
explicit control over triggering append and remove events.
The owning object and :class:`.CollectionAttributeImpl` are also reachable
through the adapter, allowing for some very sophisticated behavior.
"""
import operator
import weakref
from sqlalchemy.util.compat import inspect_getfullargspec
from . import base
from .. import exc as sa_exc
from .. import util
from ..sql import expression
__all__ = [
"collection",
"collection_adapter",
"mapped_collection",
"column_mapped_collection",
"attribute_mapped_collection",
]
__instrumentation_mutex = util.threading.Lock()
class _PlainColumnGetter(object):
"""Plain column getter, stores collection of Column objects
directly.
Serializes to a :class:`._SerializableColumnGetterV2`
which has more expensive __call__() performance
and some rare caveats.
"""
def __init__(self, cols):
self.cols = cols
self.composite = len(cols) > 1
def __reduce__(self):
return _SerializableColumnGetterV2._reduce_from_cols(self.cols)
def _cols(self, mapper):
return self.cols
def __call__(self, value):
state = base.instance_state(value)
m = base._state_mapper(state)
key = [
m._get_state_attr_by_column(state, state.dict, col)
for col in self._cols(m)
]
if self.composite:
return tuple(key)
else:
return key[0]
class _SerializableColumnGetter(object):
"""Column-based getter used in version 0.7.6 only.
Remains here for pickle compatibility with 0.7.6.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return _SerializableColumnGetter, (self.colkeys,)
def __call__(self, value):
state = base.instance_state(value)
m = base._state_mapper(state)
key = [
m._get_state_attr_by_column(
state, state.dict, m.mapped_table.columns[k]
)
for k in self.colkeys
]
if self.composite:
return tuple(key)
else:
return key[0]
class _SerializableColumnGetterV2(_PlainColumnGetter):
"""Updated serializable getter which deals with
multi-table mapped classes.
Two extremely unusual cases are not supported.
Mappings which have tables across multiple metadata
objects, or which are mapped to non-Table selectables
linked across inheriting mappers may fail to function
here.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return self.__class__, (self.colkeys,)
@classmethod
def _reduce_from_cols(cls, cols):
def _table_key(c):
if not isinstance(c.table, expression.TableClause):
return None
else:
return c.table.key
colkeys = [(c.key, _table_key(c)) for c in cols]
return _SerializableColumnGetterV2, (colkeys,)
def _cols(self, mapper):
cols = []
metadata = getattr(mapper.local_table, "metadata", None)
for (ckey, tkey) in self.colkeys:
if tkey is None or metadata is None or tkey not in metadata:
cols.append(mapper.local_table.c[ckey])
else:
cols.append(metadata.tables[tkey].c[ckey])
return cols
def column_mapped_collection(mapping_spec):
"""A dictionary-based collection type with column-based keying.
Returns a :class:`.MappedCollection` factory with a keying function
generated from mapping_spec, which may be a Column or a sequence
of Columns.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
cols = [
expression._only_column_elements(q, "mapping_spec")
for q in util.to_list(mapping_spec)
]
keyfunc = _PlainColumnGetter(cols)
return lambda: MappedCollection(keyfunc)
class _SerializableAttrGetter(object):
def __init__(self, name):
self.name = name
self.getter = operator.attrgetter(name)
def __call__(self, target):
return self.getter(target)
def __reduce__(self):
return _SerializableAttrGetter, (self.name,)
def attribute_mapped_collection(attr_name):
"""A dictionary-based collection type with attribute-based keying.
Returns a :class:`.MappedCollection` factory with a keying based on the
'attr_name' attribute of entities in the collection, where ``attr_name``
is the string name of the attribute.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
getter = _SerializableAttrGetter(attr_name)
return lambda: MappedCollection(getter)
def mapped_collection(keyfunc):
"""A dictionary-based collection type with arbitrary keying.
Returns a :class:`.MappedCollection` factory with a keying function
generated from keyfunc, a callable that takes an entity and returns a
key value.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
return lambda: MappedCollection(keyfunc)
class collection(object):
"""Decorators for entity collection classes.
The decorators fall into two groups: annotations and interception recipes.
The annotating decorators (appender, remover, iterator, linker, converter,
internally_instrumented) indicate the method's purpose and take no
arguments. They are not written with parens::
@collection.appender
def append(self, append): ...
The recipe decorators all require parens, even those that take no
arguments::
@collection.adds('entity')
def insert(self, position, entity): ...
@collection.removes_return()
def popitem(self): ...
"""
# Bundled as a class solely for ease of use: packaging, doc strings,
# importability.
@staticmethod
def appender(fn):
"""Tag the method as the collection appender.
The appender method is called with one positional argument: the value
to append. The method will be automatically decorated with 'adds(1)'
if not already decorated::
@collection.appender
def add(self, append): ...
# or, equivalently
@collection.appender
@collection.adds(1)
def add(self, append): ...
# for mapping type, an 'append' may kick out a previous value
# that occupies that slot. consider d['a'] = 'foo'- any previous
# value in d['a'] is discarded.
@collection.appender
@collection.replaces(1)
def add(self, entity):
key = some_key_func(entity)
previous = None
if key in self:
previous = self[key]
self[key] = entity
return previous
If the value to append is not allowed in the collection, you may
raise an exception. Something to remember is that the appender
will be called for each object mapped by a database query. If the
database contains rows that violate your collection semantics, you
will need to get creative to fix the problem, as access via the
collection will not work.
If the appender method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = "appender"
return fn
@staticmethod
def remover(fn):
"""Tag the method as the collection remover.
The remover method is called with one positional argument: the value
to remove. The method will be automatically decorated with
:meth:`removes_return` if not already decorated::
@collection.remover
def zap(self, entity): ...
# or, equivalently
@collection.remover
@collection.removes_return()
def zap(self, ): ...
If the value to remove is not present in the collection, you may
raise an exception or return None to ignore the error.
If the remove method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = "remover"
return fn
@staticmethod
def iterator(fn):
"""Tag the method as the collection remover.
The iterator method is called with no arguments. It is expected to
return an iterator over all collection members::
@collection.iterator
def __iter__(self): ...
"""
fn._sa_instrument_role = "iterator"
return fn
@staticmethod
def internally_instrumented(fn):
"""Tag the method as instrumented.
This tag will prevent any decoration from being applied to the
method. Use this if you are orchestrating your own calls to
:func:`.collection_adapter` in one of the basic SQLAlchemy
interface methods, or to prevent an automatic ABC method
decoration from wrapping your implementation::
# normally an 'extend' method on a list-like class would be
# automatically intercepted and re-implemented in terms of
# SQLAlchemy events and append(). your implementation will
# never be called, unless:
@collection.internally_instrumented
def extend(self, items): ...
"""
fn._sa_instrumented = True
return fn
@staticmethod
@util.deprecated(
"1.0",
"The :meth:`.collection.linker` handler is deprecated and will "
"be removed in a future release. Please refer to the "
":meth:`.AttributeEvents.init_collection` "
"and :meth:`.AttributeEvents.dispose_collection` event handlers. ",
)
def linker(fn):
"""Tag the method as a "linked to attribute" event handler.
This optional event handler will be called when the collection class
is linked to or unlinked from the InstrumentedAttribute. It is
invoked immediately after the '_sa_adapter' property is set on
the instance. A single argument is passed: the collection adapter
that has been linked, or None if unlinking.
"""
fn._sa_instrument_role = "linker"
return fn
link = linker
"""Synonym for :meth:`.collection.linker`.
.. deprecated:: 1.0 - :meth:`.collection.link` is deprecated and will be
removed in a future release.
"""
@staticmethod
@util.deprecated(
"1.3",
"The :meth:`.collection.converter` handler is deprecated and will "
"be removed in a future release. Please refer to the "
":class:`.AttributeEvents.bulk_replace` listener interface in "
"conjunction with the :func:`.event.listen` function.",
)
def converter(fn):
"""Tag the method as the collection converter.
This optional method will be called when a collection is being
replaced entirely, as in::
myobj.acollection = [newvalue1, newvalue2]
The converter method will receive the object being assigned and should
return an iterable of values suitable for use by the ``appender``
method. A converter must not assign values or mutate the collection,
its sole job is to adapt the value the user provides into an iterable
of values for the ORM's use.
The default converter implementation will use duck-typing to do the
conversion. A dict-like collection will be convert into an iterable
of dictionary values, and other types will simply be iterated::
@collection.converter
def convert(self, other): ...
If the duck-typing of the object does not match the type of this
collection, a TypeError is raised.
Supply an implementation of this method if you want to expand the
range of possible types that can be assigned in bulk or perform
validation on the values about to be assigned.
"""
fn._sa_instrument_role = "converter"
return fn
@staticmethod
def adds(arg):
"""Mark the method as adding an entity to the collection.
Adds "add to collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value. Arguments can be specified positionally (i.e. integer) or by
name::
@collection.adds(1)
def push(self, item): ...
@collection.adds('entity')
def do_stuff(self, thing, entity=None): ...
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_append_event", arg)
return fn
return decorator
@staticmethod
def replaces(arg):
"""Mark the method as replacing an entity in the collection.
Adds "add to collection" and "remove from collection" handling to
the method. The decorator argument indicates which method argument
holds the SQLAlchemy-relevant value to be added, and return value, if
any will be considered the value to remove.
Arguments can be specified positionally (i.e. integer) or by name::
@collection.replaces(2)
def __setitem__(self, index, item): ...
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_append_event", arg)
fn._sa_instrument_after = "fire_remove_event"
return fn
return decorator
@staticmethod
def removes(arg):
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value to be removed. Arguments can be specified positionally (i.e.
integer) or by name::
@collection.removes(1)
def zap(self, item): ...
For methods where the value to remove is not known at call-time, use
collection.removes_return.
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_remove_event", arg)
return fn
return decorator
@staticmethod
def removes_return():
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The return
value of the method, if any, is considered the value to remove. The
method arguments are not inspected::
@collection.removes_return()
def pop(self): ...
For methods where the value to remove is known at call-time, use
collection.remove.
"""
def decorator(fn):
fn._sa_instrument_after = "fire_remove_event"
return fn
return decorator
collection_adapter = operator.attrgetter("_sa_adapter")
"""Fetch the :class:`.CollectionAdapter` for a collection."""
class CollectionAdapter(object):
"""Bridges between the ORM and arbitrary Python collections.
Proxies base-level collection operations (append, remove, iterate)
to the underlying Python collection, and emits add/remove events for
entities entering or leaving the collection.
The ORM uses :class:`.CollectionAdapter` exclusively for interaction with
entity collections.
"""
__slots__ = (
"attr",
"_key",
"_data",
"owner_state",
"_converter",
"invalidated",
)
def __init__(self, attr, owner_state, data):
self.attr = attr
self._key = attr.key
self._data = weakref.ref(data)
self.owner_state = owner_state
data._sa_adapter = self
self._converter = data._sa_converter
self.invalidated = False
def _warn_invalidated(self):
util.warn("This collection has been invalidated.")
@property
def data(self):
"The entity collection being adapted."
return self._data()
@property
def _referenced_by_owner(self):
"""return True if the owner state still refers to this collection.
This will return False within a bulk replace operation,
where this collection is the one being replaced.
"""
return self.owner_state.dict[self._key] is self._data()
def bulk_appender(self):
return self._data()._sa_appender
def append_with_event(self, item, initiator=None):
"""Add an entity to the collection, firing mutation events."""
self._data()._sa_appender(item, _sa_initiator=initiator)
def append_without_event(self, item):
"""Add or restore an entity to the collection, firing no events."""
self._data()._sa_appender(item, _sa_initiator=False)
def append_multiple_without_event(self, items):
"""Add or restore an entity to the collection, firing no events."""
appender = self._data()._sa_appender
for item in items:
appender(item, _sa_initiator=False)
def bulk_remover(self):
return self._data()._sa_remover
def remove_with_event(self, item, initiator=None):
"""Remove an entity from the collection, firing mutation events."""
self._data()._sa_remover(item, _sa_initiator=initiator)
def remove_without_event(self, item):
"""Remove an entity from the collection, firing no events."""
self._data()._sa_remover(item, _sa_initiator=False)
def clear_with_event(self, initiator=None):
"""Empty the collection, firing a mutation event for each entity."""
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=initiator)
def clear_without_event(self):
"""Empty the collection, firing no events."""
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=False)
def __iter__(self):
"""Iterate over entities in the collection."""
return iter(self._data()._sa_iterator())
def __len__(self):
"""Count entities in the collection."""
return len(list(self._data()._sa_iterator()))
def __bool__(self):
return True
__nonzero__ = __bool__
def fire_append_event(self, item, initiator=None):
"""Notify that a entity has entered the collection.
Initiator is a token owned by the InstrumentedAttribute that
initiated the membership mutation, and should be left as None
unless you are passing along an initiator value from a chained
operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
return self.attr.fire_append_event(
self.owner_state, self.owner_state.dict, item, initiator
)
else:
return item
def fire_remove_event(self, item, initiator=None):
"""Notify that a entity has been removed from the collection.
Initiator is the InstrumentedAttribute that initiated the membership
mutation, and should be left as None unless you are passing along
an initiator value from a chained operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
self.attr.fire_remove_event(
self.owner_state, self.owner_state.dict, item, initiator
)
def fire_pre_remove_event(self, initiator=None):
"""Notify that an entity is about to be removed from the collection.
Only called if the entity cannot be removed after calling
fire_remove_event().
"""
if self.invalidated:
self._warn_invalidated()
self.attr.fire_pre_remove_event(
self.owner_state, self.owner_state.dict, initiator=initiator
)
def __getstate__(self):
return {
"key": self._key,
"owner_state": self.owner_state,
"owner_cls": self.owner_state.class_,
"data": self.data,
"invalidated": self.invalidated,
}
def __setstate__(self, d):
self._key = d["key"]
self.owner_state = d["owner_state"]
self._data = weakref.ref(d["data"])
self._converter = d["data"]._sa_converter
d["data"]._sa_adapter = self
self.invalidated = d["invalidated"]
self.attr = getattr(d["owner_cls"], self._key).impl
def bulk_replace(values, existing_adapter, new_adapter, initiator=None):
"""Load a new collection, firing events based on prior like membership.
Appends instances in ``values`` onto the ``new_adapter``. Events will be
fired for any instance not present in the ``existing_adapter``. Any
instances in ``existing_adapter`` not present in ``values`` will have
remove events fired upon them.
:param values: An iterable of collection member instances
:param existing_adapter: A :class:`.CollectionAdapter` of
instances to be replaced
:param new_adapter: An empty :class:`.CollectionAdapter`
to load with ``values``
"""
assert isinstance(values, list)
idset = util.IdentitySet
existing_idset = idset(existing_adapter or ())
constants = existing_idset.intersection(values or ())
additions = idset(values or ()).difference(constants)
removals = existing_idset.difference(constants)
appender = new_adapter.bulk_appender()
for member in values or ():
if member in additions:
appender(member, _sa_initiator=initiator)
elif member in constants:
appender(member, _sa_initiator=False)
if existing_adapter:
for member in removals:
existing_adapter.fire_remove_event(member, initiator=initiator)
def prepare_instrumentation(factory):
"""Prepare a callable for future use as a collection class factory.
Given a collection class factory (either a type or no-arg callable),
return another factory that will produce compatible instances when
called.
This function is responsible for converting collection_class=list
into the run-time behavior of collection_class=InstrumentedList.
"""
# Convert a builtin to 'Instrumented*'
if factory in __canned_instrumentation:
factory = __canned_instrumentation[factory]
# Create a specimen
cls = type(factory())
# Did factory callable return a builtin?
if cls in __canned_instrumentation:
# Wrap it so that it returns our 'Instrumented*'
factory = __converting_factory(cls, factory)
cls = factory()
# Instrument the class if needed.
if __instrumentation_mutex.acquire():
try:
if getattr(cls, "_sa_instrumented", None) != id(cls):
_instrument_class(cls)
finally:
__instrumentation_mutex.release()
return factory
def __converting_factory(specimen_cls, original_factory):
"""Return a wrapper that converts a "canned" collection like
set, dict, list into the Instrumented* version.
"""
instrumented_cls = __canned_instrumentation[specimen_cls]
def wrapper():
collection = original_factory()
return instrumented_cls(collection)
# often flawed but better than nothing
wrapper.__name__ = "%sWrapper" % original_factory.__name__
wrapper.__doc__ = original_factory.__doc__
return wrapper
def _instrument_class(cls):
"""Modify methods in a class and install instrumentation."""
# In the normal call flow, a request for any of the 3 basic collection
# types is transformed into one of our trivial subclasses
# (e.g. InstrumentedList). Catch anything else that sneaks in here...
if cls.__module__ == "__builtin__":
raise sa_exc.ArgumentError(
"Can not instrument a built-in type. Use a "
"subclass, even a trivial one."
)
roles, methods = _locate_roles_and_methods(cls)
_setup_canned_roles(cls, roles, methods)
_assert_required_roles(cls, roles, methods)
_set_collection_attributes(cls, roles, methods)
def _locate_roles_and_methods(cls):
"""search for _sa_instrument_role-decorated methods in
method resolution order, assign to roles.
"""
roles = {}
methods = {}
for supercls in cls.__mro__:
for name, method in vars(supercls).items():
if not util.callable(method):
continue
# note role declarations
if hasattr(method, "_sa_instrument_role"):
role = method._sa_instrument_role
assert role in (
"appender",
"remover",
"iterator",
"linker",
"converter",
)
roles.setdefault(role, name)
# transfer instrumentation requests from decorated function
# to the combined queue
before, after = None, None
if hasattr(method, "_sa_instrument_before"):
op, argument = method._sa_instrument_before
assert op in ("fire_append_event", "fire_remove_event")
before = op, argument
if hasattr(method, "_sa_instrument_after"):
op = method._sa_instrument_after
assert op in ("fire_append_event", "fire_remove_event")
after = op
if before:
methods[name] = before + (after,)
elif after:
methods[name] = None, None, after
return roles, methods
def _setup_canned_roles(cls, roles, methods):
"""see if this class has "canned" roles based on a known
collection type (dict, set, list). Apply those roles
as needed to the "roles" dictionary, and also
prepare "decorator" methods
"""
collection_type = util.duck_type_collection(cls)
if collection_type in __interfaces:
canned_roles, decorators = __interfaces[collection_type]
for role, name in canned_roles.items():
roles.setdefault(role, name)
# apply ABC auto-decoration to methods that need it
for method, decorator in decorators.items():
fn = getattr(cls, method, None)
if (
fn
and method not in methods
and not hasattr(fn, "_sa_instrumented")
):
setattr(cls, method, decorator(fn))
def _assert_required_roles(cls, roles, methods):
"""ensure all roles are present, and apply implicit instrumentation if
needed
"""
if "appender" not in roles or not hasattr(cls, roles["appender"]):
raise sa_exc.ArgumentError(
"Type %s must elect an appender method to be "
"a collection class" % cls.__name__
)
elif roles["appender"] not in methods and not hasattr(
getattr(cls, roles["appender"]), "_sa_instrumented"
):
methods[roles["appender"]] = ("fire_append_event", 1, None)
if "remover" not in roles or not hasattr(cls, roles["remover"]):
raise sa_exc.ArgumentError(
"Type %s must elect a remover method to be "
"a collection class" % cls.__name__
)
elif roles["remover"] not in methods and not hasattr(
getattr(cls, roles["remover"]), "_sa_instrumented"
):
methods[roles["remover"]] = ("fire_remove_event", 1, None)
if "iterator" not in roles or not hasattr(cls, roles["iterator"]):
raise sa_exc.ArgumentError(
"Type %s must elect an iterator method to be "
"a collection class" % cls.__name__
)
def _set_collection_attributes(cls, roles, methods):
"""apply ad-hoc instrumentation from decorators, class-level defaults
and implicit role declarations
"""
for method_name, (before, argument, after) in methods.items():
setattr(
cls,
method_name,
_instrument_membership_mutator(
getattr(cls, method_name), before, argument, after
),
)
# intern the role map
for role, method_name in roles.items():
setattr(cls, "_sa_%s" % role, getattr(cls, method_name))
cls._sa_adapter = None
if not hasattr(cls, "_sa_converter"):
cls._sa_converter = None
cls._sa_instrumented = id(cls)
def _instrument_membership_mutator(method, before, argument, after):
"""Route method args and/or return value through the collection
adapter."""
# This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))'
if before:
fn_args = list(
util.flatten_iterator(inspect_getfullargspec(method)[0])
)
if isinstance(argument, int):
pos_arg = argument
named_arg = len(fn_args) > argument and fn_args[argument] or None
else:
if argument in fn_args:
pos_arg = fn_args.index(argument)
else:
pos_arg = None
named_arg = argument
del fn_args
def wrapper(*args, **kw):
if before:
if pos_arg is None:
if named_arg not in kw:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument
)
value = kw[named_arg]
else:
if len(args) > pos_arg:
value = args[pos_arg]
elif named_arg in kw:
value = kw[named_arg]
else:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument
)
initiator = kw.pop("_sa_initiator", None)
if initiator is False:
executor = None
else:
executor = args[0]._sa_adapter
if before and executor:
getattr(executor, before)(value, initiator)
if not after or not executor:
return method(*args, **kw)
else:
res = method(*args, **kw)
if res is not None:
getattr(executor, after)(res, initiator)
return res
wrapper._sa_instrumented = True
if hasattr(method, "_sa_instrument_role"):
wrapper._sa_instrument_role = method._sa_instrument_role
wrapper.__name__ = method.__name__
wrapper.__doc__ = method.__doc__
return wrapper
def __set(collection, item, _sa_initiator=None):
"""Run set events.
This event always occurs before the collection is actually mutated.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
item = executor.fire_append_event(item, _sa_initiator)
return item
def __del(collection, item, _sa_initiator=None):
"""Run del events.
This event occurs before the collection is actually mutated, *except*
in the case of a pop operation, in which case it occurs afterwards.
For pop operations, the __before_pop hook is called before the
operation occurs.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
executor.fire_remove_event(item, _sa_initiator)
def __before_pop(collection, _sa_initiator=None):
"""An event which occurs on a before a pop() operation occurs."""
executor = collection._sa_adapter
if executor:
executor.fire_pre_remove_event(_sa_initiator)
def _list_decorators():
"""Tailored instrumentation wrappers for any list-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(list, fn.__name__).__doc__
def append(fn):
def append(self, item, _sa_initiator=None):
item = __set(self, item, _sa_initiator)
fn(self, item)
_tidy(append)
return append
def remove(fn):
def remove(self, value, _sa_initiator=None):
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__eq__
fn(self, value)
_tidy(remove)
return remove
def insert(fn):
def insert(self, index, value):
value = __set(self, value)
fn(self, index, value)
_tidy(insert)
return insert
def __setitem__(fn):
def __setitem__(self, index, value):
if not isinstance(index, slice):
existing = self[index]
if existing is not None:
__del(self, existing)
value = __set(self, value)
fn(self, index, value)
else:
# slice assignment requires __delitem__, insert, __len__
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
if index.stop is not None:
stop = index.stop
else:
stop = len(self)
if stop < 0:
stop += len(self)
if step == 1:
if value is self:
return
for i in range(start, stop, step):
if len(self) > start:
del self[start]
for i, item in enumerate(value):
self.insert(i + start, item)
else:
rng = list(range(start, stop, step))
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s"
% (len(value), len(rng))
)
for i, item in zip(rng, value):
self.__setitem__(i, item)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, index):
if not isinstance(index, slice):
item = self[index]
__del(self, item)
fn(self, index)
else:
# slice deletion requires __getslice__ and a slice-groking
# __getitem__ for stepped deletion
# note: not breaking this into atomic dels
for item in self[index]:
__del(self, item)
fn(self, index)
_tidy(__delitem__)
return __delitem__
if util.py2k:
def __setslice__(fn):
def __setslice__(self, start, end, values):
for value in self[start:end]:
__del(self, value)
values = [__set(self, value) for value in values]
fn(self, start, end, values)
_tidy(__setslice__)
return __setslice__
def __delslice__(fn):
def __delslice__(self, start, end):
for value in self[start:end]:
__del(self, value)
fn(self, start, end)
_tidy(__delslice__)
return __delslice__
def extend(fn):
def extend(self, iterable):
for value in iterable:
self.append(value)
_tidy(extend)
return extend
def __iadd__(fn):
def __iadd__(self, iterable):
# list.__iadd__ takes any iterable and seems to let TypeError
# raise as-is instead of returning NotImplemented
for value in iterable:
self.append(value)
return self
_tidy(__iadd__)
return __iadd__
def pop(fn):
def pop(self, index=-1):
__before_pop(self)
item = fn(self, index)
__del(self, item)
return item
_tidy(pop)
return pop
if not util.py2k:
def clear(fn):
def clear(self, index=-1):
for item in self:
__del(self, item)
fn(self)
_tidy(clear)
return clear
# __imul__ : not wrapping this. all members of the collection are already
# present, so no need to fire appends... wrapping it with an explicit
# decorator is still possible, so events on *= can be had if they're
# desired. hard to imagine a use case for __imul__, though.
l = locals().copy()
l.pop("_tidy")
return l
def _dict_decorators():
"""Tailored instrumentation wrappers for any dict-like mapping class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(dict, fn.__name__).__doc__
Unspecified = util.symbol("Unspecified")
def __setitem__(fn):
def __setitem__(self, key, value, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator)
value = __set(self, value, _sa_initiator)
fn(self, key, value)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, key, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator)
fn(self, key)
_tidy(__delitem__)
return __delitem__
def clear(fn):
def clear(self):
for key in self:
__del(self, self[key])
fn(self)
_tidy(clear)
return clear
def pop(fn):
def pop(self, key, default=Unspecified):
__before_pop(self)
_to_del = key in self
if default is Unspecified:
item = fn(self, key)
else:
item = fn(self, key, default)
if _to_del:
__del(self, item)
return item
_tidy(pop)
return pop
def popitem(fn):
def popitem(self):
__before_pop(self)
item = fn(self)
__del(self, item[1])
return item
_tidy(popitem)
return popitem
def setdefault(fn):
def setdefault(self, key, default=None):
if key not in self:
self.__setitem__(key, default)
return default
else:
return self.__getitem__(key)
_tidy(setdefault)
return setdefault
def update(fn):
def update(self, __other=Unspecified, **kw):
if __other is not Unspecified:
if hasattr(__other, "keys"):
for key in list(__other):
if key not in self or self[key] is not __other[key]:
self[key] = __other[key]
else:
for key, value in __other:
if key not in self or self[key] is not value:
self[key] = value
for key in kw:
if key not in self or self[key] is not kw[key]:
self[key] = kw[key]
_tidy(update)
return update
l = locals().copy()
l.pop("_tidy")
l.pop("Unspecified")
return l
_set_binop_bases = (set, frozenset)
def _set_binops_check_strict(self, obj):
"""Allow only set, frozenset and self.__class__-derived
objects in binops."""
return isinstance(obj, _set_binop_bases + (self.__class__,))
def _set_binops_check_loose(self, obj):
"""Allow anything set-like to participate in set binops."""
return (
isinstance(obj, _set_binop_bases + (self.__class__,))
or util.duck_type_collection(obj) == set
)
def _set_decorators():
"""Tailored instrumentation wrappers for any set-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(set, fn.__name__).__doc__
Unspecified = util.symbol("Unspecified")
def add(fn):
def add(self, value, _sa_initiator=None):
if value not in self:
value = __set(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(add)
return add
def discard(fn):
def discard(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(discard)
return discard
def remove(fn):
def remove(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(remove)
return remove
def pop(fn):
def pop(self):
__before_pop(self)
item = fn(self)
# for set in particular, we have no way to access the item
# that will be popped before pop is called.
__del(self, item)
return item
_tidy(pop)
return pop
def clear(fn):
def clear(self):
for item in list(self):
self.remove(item)
_tidy(clear)
return clear
def update(fn):
def update(self, value):
for item in value:
self.add(item)
_tidy(update)
return update
def __ior__(fn):
def __ior__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.add(item)
return self
_tidy(__ior__)
return __ior__
def difference_update(fn):
def difference_update(self, value):
for item in value:
self.discard(item)
_tidy(difference_update)
return difference_update
def __isub__(fn):
def __isub__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.discard(item)
return self
_tidy(__isub__)
return __isub__
def intersection_update(fn):
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(intersection_update)
return intersection_update
def __iand__(fn):
def __iand__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__iand__)
return __iand__
def symmetric_difference_update(fn):
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(symmetric_difference_update)
return symmetric_difference_update
def __ixor__(fn):
def __ixor__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__ixor__)
return __ixor__
l = locals().copy()
l.pop("_tidy")
l.pop("Unspecified")
return l
class InstrumentedList(list):
"""An instrumented version of the built-in list."""
class InstrumentedSet(set):
"""An instrumented version of the built-in set."""
class InstrumentedDict(dict):
"""An instrumented version of the built-in dict."""
__canned_instrumentation = {
list: InstrumentedList,
set: InstrumentedSet,
dict: InstrumentedDict,
}
__interfaces = {
list: (
{"appender": "append", "remover": "remove", "iterator": "__iter__"},
_list_decorators(),
),
set: (
{"appender": "add", "remover": "remove", "iterator": "__iter__"},
_set_decorators(),
),
# decorators are required for dicts and object collections.
dict: ({"iterator": "values"}, _dict_decorators())
if util.py3k
else ({"iterator": "itervalues"}, _dict_decorators()),
}
class MappedCollection(dict):
"""A basic dictionary-based collection class.
Extends dict with the minimal bag semantics that collection
classes require. ``set`` and ``remove`` are implemented in terms
of a keying function: any callable that takes an object and
returns an object for use as a dictionary key.
"""
def __init__(self, keyfunc):
"""Create a new collection with keying provided by keyfunc.
keyfunc may be any callable that takes an object and returns an object
for use as a dictionary key.
The keyfunc will be called every time the ORM needs to add a member by
value-only (such as when loading instances from the database) or
remove a member. The usual cautions about dictionary keying apply-
``keyfunc(object)`` should return the same output for the life of the
collection. Keying based on mutable properties can result in
unreachable instances "lost" in the collection.
"""
self.keyfunc = keyfunc
@collection.appender
@collection.internally_instrumented
def set(self, value, _sa_initiator=None):
"""Add an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
self.__setitem__(key, value, _sa_initiator)
@collection.remover
@collection.internally_instrumented
def remove(self, value, _sa_initiator=None):
"""Remove an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
# Let self[key] raise if key is not in this collection
# testlib.pragma exempt:__ne__
if self[key] != value:
raise sa_exc.InvalidRequestError(
"Can not remove '%s': collection holds '%s' for key '%s'. "
"Possible cause: is the MappedCollection key function "
"based on mutable properties or properties that only obtain "
"values after flush?" % (value, self[key], key)
)
self.__delitem__(key, _sa_initiator)
# ensure instrumentation is associated with
# these built-in classes; if a user-defined class
# subclasses these and uses @internally_instrumented,
# the superclass is otherwise not instrumented.
# see [ticket:2406].
_instrument_class(MappedCollection)
_instrument_class(InstrumentedList)
_instrument_class(InstrumentedSet)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/dependency.py
|
# orm/dependency.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Relationship dependencies.
"""
from . import attributes
from . import exc
from . import sync
from . import unitofwork
from . import util as mapperutil
from .interfaces import MANYTOMANY
from .interfaces import MANYTOONE
from .interfaces import ONETOMANY
from .. import exc as sa_exc
from .. import sql
from .. import util
class DependencyProcessor(object):
def __init__(self, prop):
self.prop = prop
self.cascade = prop.cascade
self.mapper = prop.mapper
self.parent = prop.parent
self.secondary = prop.secondary
self.direction = prop.direction
self.post_update = prop.post_update
self.passive_deletes = prop.passive_deletes
self.passive_updates = prop.passive_updates
self.enable_typechecks = prop.enable_typechecks
if self.passive_deletes:
self._passive_delete_flag = attributes.PASSIVE_NO_INITIALIZE
else:
self._passive_delete_flag = attributes.PASSIVE_OFF
if self.passive_updates:
self._passive_update_flag = attributes.PASSIVE_NO_INITIALIZE
else:
self._passive_update_flag = attributes.PASSIVE_OFF
self.key = prop.key
if not self.prop.synchronize_pairs:
raise sa_exc.ArgumentError(
"Can't build a DependencyProcessor for relationship %s. "
"No target attributes to populate between parent and "
"child are present" % self.prop
)
@classmethod
def from_relationship(cls, prop):
return _direction_to_processor[prop.direction](prop)
def hasparent(self, state):
"""return True if the given object instance has a parent,
according to the ``InstrumentedAttribute`` handled by this
``DependencyProcessor``.
"""
return self.parent.class_manager.get_impl(self.key).hasparent(state)
def per_property_preprocessors(self, uow):
"""establish actions and dependencies related to a flush.
These actions will operate on all relevant states in
the aggregate.
"""
uow.register_preprocessor(self, True)
def per_property_flush_actions(self, uow):
after_save = unitofwork.ProcessAll(uow, self, False, True)
before_delete = unitofwork.ProcessAll(uow, self, True, True)
parent_saves = unitofwork.SaveUpdateAll(
uow, self.parent.primary_base_mapper
)
child_saves = unitofwork.SaveUpdateAll(
uow, self.mapper.primary_base_mapper
)
parent_deletes = unitofwork.DeleteAll(
uow, self.parent.primary_base_mapper
)
child_deletes = unitofwork.DeleteAll(
uow, self.mapper.primary_base_mapper
)
self.per_property_dependencies(
uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete,
)
def per_state_flush_actions(self, uow, states, isdelete):
"""establish actions and dependencies related to a flush.
These actions will operate on all relevant states
individually. This occurs only if there are cycles
in the 'aggregated' version of events.
"""
child_base_mapper = self.mapper.primary_base_mapper
child_saves = unitofwork.SaveUpdateAll(uow, child_base_mapper)
child_deletes = unitofwork.DeleteAll(uow, child_base_mapper)
# locate and disable the aggregate processors
# for this dependency
if isdelete:
before_delete = unitofwork.ProcessAll(uow, self, True, True)
before_delete.disabled = True
else:
after_save = unitofwork.ProcessAll(uow, self, False, True)
after_save.disabled = True
# check if the "child" side is part of the cycle
if child_saves not in uow.cycles:
# based on the current dependencies we use, the saves/
# deletes should always be in the 'cycles' collection
# together. if this changes, we will have to break up
# this method a bit more.
assert child_deletes not in uow.cycles
# child side is not part of the cycle, so we will link per-state
# actions to the aggregate "saves", "deletes" actions
child_actions = [(child_saves, False), (child_deletes, True)]
child_in_cycles = False
else:
child_in_cycles = True
# check if the "parent" side is part of the cycle
if not isdelete:
parent_saves = unitofwork.SaveUpdateAll(
uow, self.parent.base_mapper
)
parent_deletes = before_delete = None
if parent_saves in uow.cycles:
parent_in_cycles = True
else:
parent_deletes = unitofwork.DeleteAll(uow, self.parent.base_mapper)
parent_saves = after_save = None
if parent_deletes in uow.cycles:
parent_in_cycles = True
# now create actions /dependencies for each state.
for state in states:
# detect if there's anything changed or loaded
# by a preprocessor on this state/attribute. In the
# case of deletes we may try to load missing items here as well.
sum_ = state.manager[self.key].impl.get_all_pending(
state,
state.dict,
self._passive_delete_flag
if isdelete
else attributes.PASSIVE_NO_INITIALIZE,
)
if not sum_:
continue
if isdelete:
before_delete = unitofwork.ProcessState(uow, self, True, state)
if parent_in_cycles:
parent_deletes = unitofwork.DeleteState(uow, state)
else:
after_save = unitofwork.ProcessState(uow, self, False, state)
if parent_in_cycles:
parent_saves = unitofwork.SaveUpdateState(uow, state)
if child_in_cycles:
child_actions = []
for child_state, child in sum_:
if child_state not in uow.states:
child_action = (None, None)
else:
(deleted, listonly) = uow.states[child_state]
if deleted:
child_action = (
unitofwork.DeleteState(uow, child_state),
True,
)
else:
child_action = (
unitofwork.SaveUpdateState(uow, child_state),
False,
)
child_actions.append(child_action)
# establish dependencies between our possibly per-state
# parent action and our possibly per-state child action.
for child_action, childisdelete in child_actions:
self.per_state_dependencies(
uow,
parent_saves,
parent_deletes,
child_action,
after_save,
before_delete,
isdelete,
childisdelete,
)
def presort_deletes(self, uowcommit, states):
return False
def presort_saves(self, uowcommit, states):
return False
def process_deletes(self, uowcommit, states):
pass
def process_saves(self, uowcommit, states):
pass
def prop_has_changes(self, uowcommit, states, isdelete):
if not isdelete or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
elif self.direction is MANYTOONE:
passive = attributes.PASSIVE_NO_FETCH_RELATED
else:
passive = attributes.PASSIVE_OFF
for s in states:
# TODO: add a high speed method
# to InstanceState which returns: attribute
# has a non-None value, or had one
history = uowcommit.get_attribute_history(s, self.key, passive)
if history and not history.empty():
return True
else:
return (
states
and not self.prop._is_self_referential
and self.mapper in uowcommit.mappers
)
def _verify_canload(self, state):
if self.prop.uselist and state is None:
raise exc.FlushError(
"Can't flush None value found in "
"collection %s" % (self.prop,)
)
elif state is not None and not self.mapper._canload(
state, allow_subtypes=not self.enable_typechecks
):
if self.mapper._canload(state, allow_subtypes=True):
raise exc.FlushError(
"Attempting to flush an item of type "
"%(x)s as a member of collection "
'"%(y)s". Expected an object of type '
"%(z)s or a polymorphic subclass of "
"this type. If %(x)s is a subclass of "
'%(z)s, configure mapper "%(zm)s" to '
"load this subtype polymorphically, or "
"set enable_typechecks=False to allow "
"any subtype to be accepted for flush. "
% {
"x": state.class_,
"y": self.prop,
"z": self.mapper.class_,
"zm": self.mapper,
}
)
else:
raise exc.FlushError(
"Attempting to flush an item of type "
"%(x)s as a member of collection "
'"%(y)s". Expected an object of type '
"%(z)s or a polymorphic subclass of "
"this type."
% {
"x": state.class_,
"y": self.prop,
"z": self.mapper.class_,
}
)
def _synchronize(self, state, child, associationrow, clearkeys, uowcommit):
raise NotImplementedError()
def _get_reversed_processed_set(self, uow):
if not self.prop._reverse_property:
return None
process_key = tuple(
sorted([self.key] + [p.key for p in self.prop._reverse_property])
)
return uow.memo(("reverse_key", process_key), set)
def _post_update(self, state, uowcommit, related, is_m2o_delete=False):
for x in related:
if not is_m2o_delete or x is not None:
uowcommit.register_post_update(
state, [r for l, r in self.prop.synchronize_pairs]
)
break
def _pks_changed(self, uowcommit, state):
raise NotImplementedError()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.prop)
class OneToManyDP(DependencyProcessor):
def per_property_dependencies(
self,
uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete,
):
if self.post_update:
child_post_updates = unitofwork.PostUpdateAll(
uow, self.mapper.primary_base_mapper, False
)
child_pre_updates = unitofwork.PostUpdateAll(
uow, self.mapper.primary_base_mapper, True
)
uow.dependencies.update(
[
(child_saves, after_save),
(parent_saves, after_save),
(after_save, child_post_updates),
(before_delete, child_pre_updates),
(child_pre_updates, parent_deletes),
(child_pre_updates, child_deletes),
]
)
else:
uow.dependencies.update(
[
(parent_saves, after_save),
(after_save, child_saves),
(after_save, child_deletes),
(child_saves, parent_deletes),
(child_deletes, parent_deletes),
(before_delete, child_saves),
(before_delete, child_deletes),
]
)
def per_state_dependencies(
self,
uow,
save_parent,
delete_parent,
child_action,
after_save,
before_delete,
isdelete,
childisdelete,
):
if self.post_update:
child_post_updates = unitofwork.PostUpdateAll(
uow, self.mapper.primary_base_mapper, False
)
child_pre_updates = unitofwork.PostUpdateAll(
uow, self.mapper.primary_base_mapper, True
)
# TODO: this whole block is not covered
# by any tests
if not isdelete:
if childisdelete:
uow.dependencies.update(
[
(child_action, after_save),
(after_save, child_post_updates),
]
)
else:
uow.dependencies.update(
[
(save_parent, after_save),
(child_action, after_save),
(after_save, child_post_updates),
]
)
else:
if childisdelete:
uow.dependencies.update(
[
(before_delete, child_pre_updates),
(child_pre_updates, delete_parent),
]
)
else:
uow.dependencies.update(
[
(before_delete, child_pre_updates),
(child_pre_updates, delete_parent),
]
)
elif not isdelete:
uow.dependencies.update(
[
(save_parent, after_save),
(after_save, child_action),
(save_parent, child_action),
]
)
else:
uow.dependencies.update(
[(before_delete, child_action), (child_action, delete_parent)]
)
def presort_deletes(self, uowcommit, states):
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their
# foreign key to the parent set to NULL
should_null_fks = (
not self.cascade.delete and not self.passive_deletes == "all"
)
for state in states:
history = uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
if history:
for child in history.deleted:
if child is not None and self.hasparent(child) is False:
if self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=True)
else:
uowcommit.register_object(child)
if should_null_fks:
for child in history.unchanged:
if child is not None:
uowcommit.register_object(
child, operation="delete", prop=self.prop
)
def presort_saves(self, uowcommit, states):
children_added = uowcommit.memo(("children_added", self), set)
should_null_fks = (
not self.cascade.delete_orphan
and not self.passive_deletes == "all"
)
for state in states:
pks_changed = self._pks_changed(uowcommit, state)
if not pks_changed or self.passive_updates:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
history = uowcommit.get_attribute_history(state, self.key, passive)
if history:
for child in history.added:
if child is not None:
uowcommit.register_object(
child,
cancel_delete=True,
operation="add",
prop=self.prop,
)
children_added.update(history.added)
for child in history.deleted:
if not self.cascade.delete_orphan:
if should_null_fks:
uowcommit.register_object(
child,
isdelete=False,
operation="delete",
prop=self.prop,
)
elif self.hasparent(child) is False:
uowcommit.register_object(
child,
isdelete=True,
operation="delete",
prop=self.prop,
)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
"delete", child
):
uowcommit.register_object(st_, isdelete=True)
if pks_changed:
if history:
for child in history.unchanged:
if child is not None:
uowcommit.register_object(
child,
False,
self.passive_updates,
operation="pk change",
prop=self.prop,
)
def process_deletes(self, uowcommit, states):
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their foreign
# key to the parent set to NULL this phase can be called
# safely for any cascade but is unnecessary if delete cascade
# is on.
if self.post_update or not self.passive_deletes == "all":
children_added = uowcommit.memo(("children_added", self), set)
for state in states:
history = uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
if history:
for child in history.deleted:
if (
child is not None
and self.hasparent(child) is False
):
self._synchronize(
state, child, None, True, uowcommit, False
)
if self.post_update and child:
self._post_update(child, uowcommit, [state])
if self.post_update or not self.cascade.delete:
for child in set(history.unchanged).difference(
children_added
):
if child is not None:
self._synchronize(
state, child, None, True, uowcommit, False
)
if self.post_update and child:
self._post_update(
child, uowcommit, [state]
)
# technically, we can even remove each child from the
# collection here too. but this would be a somewhat
# inconsistent behavior since it wouldn't happen
# if the old parent wasn't deleted but child was moved.
def process_saves(self, uowcommit, states):
should_null_fks = (
not self.cascade.delete_orphan
and not self.passive_deletes == "all"
)
for state in states:
history = uowcommit.get_attribute_history(
state, self.key, attributes.PASSIVE_NO_INITIALIZE
)
if history:
for child in history.added:
self._synchronize(
state, child, None, False, uowcommit, False
)
if child is not None and self.post_update:
self._post_update(child, uowcommit, [state])
for child in history.deleted:
if (
should_null_fks
and not self.cascade.delete_orphan
and not self.hasparent(child)
):
self._synchronize(
state, child, None, True, uowcommit, False
)
if self._pks_changed(uowcommit, state):
for child in history.unchanged:
self._synchronize(
state, child, None, False, uowcommit, True
)
def _synchronize(
self, state, child, associationrow, clearkeys, uowcommit, pks_changed
):
source = state
dest = child
self._verify_canload(child)
if dest is None or (
not self.post_update and uowcommit.is_deleted(dest)
):
return
if clearkeys:
sync.clear(dest, self.mapper, self.prop.synchronize_pairs)
else:
sync.populate(
source,
self.parent,
dest,
self.mapper,
self.prop.synchronize_pairs,
uowcommit,
self.passive_updates and pks_changed,
)
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
uowcommit, state, self.parent, self.prop.synchronize_pairs
)
class ManyToOneDP(DependencyProcessor):
def __init__(self, prop):
DependencyProcessor.__init__(self, prop)
for mapper in self.mapper.self_and_descendants:
mapper._dependency_processors.append(DetectKeySwitch(prop))
def per_property_dependencies(
self,
uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete,
):
if self.post_update:
parent_post_updates = unitofwork.PostUpdateAll(
uow, self.parent.primary_base_mapper, False
)
parent_pre_updates = unitofwork.PostUpdateAll(
uow, self.parent.primary_base_mapper, True
)
uow.dependencies.update(
[
(child_saves, after_save),
(parent_saves, after_save),
(after_save, parent_post_updates),
(after_save, parent_pre_updates),
(before_delete, parent_pre_updates),
(parent_pre_updates, child_deletes),
(parent_pre_updates, parent_deletes),
]
)
else:
uow.dependencies.update(
[
(child_saves, after_save),
(after_save, parent_saves),
(parent_saves, child_deletes),
(parent_deletes, child_deletes),
]
)
def per_state_dependencies(
self,
uow,
save_parent,
delete_parent,
child_action,
after_save,
before_delete,
isdelete,
childisdelete,
):
if self.post_update:
if not isdelete:
parent_post_updates = unitofwork.PostUpdateAll(
uow, self.parent.primary_base_mapper, False
)
if childisdelete:
uow.dependencies.update(
[
(after_save, parent_post_updates),
(parent_post_updates, child_action),
]
)
else:
uow.dependencies.update(
[
(save_parent, after_save),
(child_action, after_save),
(after_save, parent_post_updates),
]
)
else:
parent_pre_updates = unitofwork.PostUpdateAll(
uow, self.parent.primary_base_mapper, True
)
uow.dependencies.update(
[
(before_delete, parent_pre_updates),
(parent_pre_updates, delete_parent),
(parent_pre_updates, child_action),
]
)
elif not isdelete:
if not childisdelete:
uow.dependencies.update(
[(child_action, after_save), (after_save, save_parent)]
)
else:
uow.dependencies.update([(after_save, save_parent)])
else:
if childisdelete:
uow.dependencies.update([(delete_parent, child_action)])
def presort_deletes(self, uowcommit, states):
if self.cascade.delete or self.cascade.delete_orphan:
for state in states:
history = uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
if history:
if self.cascade.delete_orphan:
todelete = history.sum()
else:
todelete = history.non_deleted()
for child in todelete:
if child is None:
continue
uowcommit.register_object(
child,
isdelete=True,
operation="delete",
prop=self.prop,
)
t = self.mapper.cascade_iterator("delete", child)
for c, m, st_, dct_ in t:
uowcommit.register_object(st_, isdelete=True)
def presort_saves(self, uowcommit, states):
for state in states:
uowcommit.register_object(state, operation="add", prop=self.prop)
if self.cascade.delete_orphan:
history = uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(
child,
isdelete=True,
operation="delete",
prop=self.prop,
)
t = self.mapper.cascade_iterator("delete", child)
for c, m, st_, dct_ in t:
uowcommit.register_object(st_, isdelete=True)
def process_deletes(self, uowcommit, states):
if (
self.post_update
and not self.cascade.delete_orphan
and not self.passive_deletes == "all"
):
# post_update means we have to update our
# row to not reference the child object
# before we can DELETE the row
for state in states:
self._synchronize(state, None, None, True, uowcommit)
if state and self.post_update:
history = uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
if history:
self._post_update(
state, uowcommit, history.sum(), is_m2o_delete=True
)
def process_saves(self, uowcommit, states):
for state in states:
history = uowcommit.get_attribute_history(
state, self.key, attributes.PASSIVE_NO_INITIALIZE
)
if history:
if history.added:
for child in history.added:
self._synchronize(
state, child, None, False, uowcommit, "add"
)
elif history.deleted:
self._synchronize(
state, None, None, True, uowcommit, "delete"
)
if self.post_update:
self._post_update(state, uowcommit, history.sum())
def _synchronize(
self,
state,
child,
associationrow,
clearkeys,
uowcommit,
operation=None,
):
if state is None or (
not self.post_update and uowcommit.is_deleted(state)
):
return
if (
operation is not None
and child is not None
and not uowcommit.session._contains_state(child)
):
util.warn(
"Object of type %s not in session, %s "
"operation along '%s' won't proceed"
% (mapperutil.state_class_str(child), operation, self.prop)
)
return
if clearkeys or child is None:
sync.clear(state, self.parent, self.prop.synchronize_pairs)
else:
self._verify_canload(child)
sync.populate(
child,
self.mapper,
state,
self.parent,
self.prop.synchronize_pairs,
uowcommit,
False,
)
class DetectKeySwitch(DependencyProcessor):
"""For many-to-one relationships with no one-to-many backref,
searches for parents through the unit of work when a primary
key has changed and updates them.
Theoretically, this approach could be expanded to support transparent
deletion of objects referenced via many-to-one as well, although
the current attribute system doesn't do enough bookkeeping for this
to be efficient.
"""
def per_property_preprocessors(self, uow):
if self.prop._reverse_property:
if self.passive_updates:
return
else:
if False in (
prop.passive_updates
for prop in self.prop._reverse_property
):
return
uow.register_preprocessor(self, False)
def per_property_flush_actions(self, uow):
parent_saves = unitofwork.SaveUpdateAll(uow, self.parent.base_mapper)
after_save = unitofwork.ProcessAll(uow, self, False, False)
uow.dependencies.update([(parent_saves, after_save)])
def per_state_flush_actions(self, uow, states, isdelete):
pass
def presort_deletes(self, uowcommit, states):
pass
def presort_saves(self, uow, states):
if not self.passive_updates:
# for non-passive updates, register in the preprocess stage
# so that mapper save_obj() gets a hold of changes
self._process_key_switches(states, uow)
def prop_has_changes(self, uow, states, isdelete):
if not isdelete and self.passive_updates:
d = self._key_switchers(uow, states)
return bool(d)
return False
def process_deletes(self, uowcommit, states):
assert False
def process_saves(self, uowcommit, states):
# for passive updates, register objects in the process stage
# so that we avoid ManyToOneDP's registering the object without
# the listonly flag in its own preprocess stage (results in UPDATE)
# statements being emitted
assert self.passive_updates
self._process_key_switches(states, uowcommit)
def _key_switchers(self, uow, states):
switched, notswitched = uow.memo(
("pk_switchers", self), lambda: (set(), set())
)
allstates = switched.union(notswitched)
for s in states:
if s not in allstates:
if self._pks_changed(uow, s):
switched.add(s)
else:
notswitched.add(s)
return switched
def _process_key_switches(self, deplist, uowcommit):
switchers = self._key_switchers(uowcommit, deplist)
if switchers:
# if primary key values have actually changed somewhere, perform
# a linear search through the UOW in search of a parent.
for state in uowcommit.session.identity_map.all_states():
if not issubclass(state.class_, self.parent.class_):
continue
dict_ = state.dict
related = state.get_impl(self.key).get(
state, dict_, passive=self._passive_update_flag
)
if (
related is not attributes.PASSIVE_NO_RESULT
and related is not None
):
if self.prop.uselist:
if not related:
continue
related_obj = related[0]
else:
related_obj = related
related_state = attributes.instance_state(related_obj)
if related_state in switchers:
uowcommit.register_object(
state, False, self.passive_updates
)
sync.populate(
related_state,
self.mapper,
state,
self.parent,
self.prop.synchronize_pairs,
uowcommit,
self.passive_updates,
)
def _pks_changed(self, uowcommit, state):
return bool(state.key) and sync.source_modified(
uowcommit, state, self.mapper, self.prop.synchronize_pairs
)
class ManyToManyDP(DependencyProcessor):
def per_property_dependencies(
self,
uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete,
):
uow.dependencies.update(
[
(parent_saves, after_save),
(child_saves, after_save),
(after_save, child_deletes),
# a rowswitch on the parent from deleted to saved
# can make this one occur, as the "save" may remove
# an element from the
# "deleted" list before we have a chance to
# process its child rows
(before_delete, parent_saves),
(before_delete, parent_deletes),
(before_delete, child_deletes),
(before_delete, child_saves),
]
)
def per_state_dependencies(
self,
uow,
save_parent,
delete_parent,
child_action,
after_save,
before_delete,
isdelete,
childisdelete,
):
if not isdelete:
if childisdelete:
uow.dependencies.update(
[(save_parent, after_save), (after_save, child_action)]
)
else:
uow.dependencies.update(
[(save_parent, after_save), (child_action, after_save)]
)
else:
uow.dependencies.update(
[(before_delete, child_action), (before_delete, delete_parent)]
)
def presort_deletes(self, uowcommit, states):
# TODO: no tests fail if this whole
# thing is removed !!!!
if not self.passive_deletes:
# if no passive deletes, load history on
# the collection, so that prop_has_changes()
# returns True
for state in states:
uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
def presort_saves(self, uowcommit, states):
if not self.passive_updates:
# if no passive updates, load history on
# each collection where parent has changed PK,
# so that prop_has_changes() returns True
for state in states:
if self._pks_changed(uowcommit, state):
history = uowcommit.get_attribute_history(
state, self.key, attributes.PASSIVE_OFF
)
if not self.cascade.delete_orphan:
return
# check for child items removed from the collection
# if delete_orphan check is turned on.
for state in states:
history = uowcommit.get_attribute_history(
state, self.key, attributes.PASSIVE_NO_INITIALIZE
)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(
child,
isdelete=True,
operation="delete",
prop=self.prop,
)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
"delete", child
):
uowcommit.register_object(st_, isdelete=True)
def process_deletes(self, uowcommit, states):
secondary_delete = []
secondary_insert = []
secondary_update = []
processed = self._get_reversed_processed_set(uowcommit)
tmp = set()
for state in states:
# this history should be cached already, as
# we loaded it in preprocess_deletes
history = uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
if history:
for child in history.non_added():
if child is None or (
processed is not None and (state, child) in processed
):
continue
associationrow = {}
if not self._synchronize(
state,
child,
associationrow,
False,
uowcommit,
"delete",
):
continue
secondary_delete.append(associationrow)
tmp.update((c, state) for c in history.non_added())
if processed is not None:
processed.update(tmp)
self._run_crud(
uowcommit, secondary_insert, secondary_update, secondary_delete
)
def process_saves(self, uowcommit, states):
secondary_delete = []
secondary_insert = []
secondary_update = []
processed = self._get_reversed_processed_set(uowcommit)
tmp = set()
for state in states:
need_cascade_pks = not self.passive_updates and self._pks_changed(
uowcommit, state
)
if need_cascade_pks:
passive = attributes.PASSIVE_OFF
else:
passive = attributes.PASSIVE_NO_INITIALIZE
history = uowcommit.get_attribute_history(state, self.key, passive)
if history:
for child in history.added:
if processed is not None and (state, child) in processed:
continue
associationrow = {}
if not self._synchronize(
state, child, associationrow, False, uowcommit, "add"
):
continue
secondary_insert.append(associationrow)
for child in history.deleted:
if processed is not None and (state, child) in processed:
continue
associationrow = {}
if not self._synchronize(
state,
child,
associationrow,
False,
uowcommit,
"delete",
):
continue
secondary_delete.append(associationrow)
tmp.update((c, state) for c in history.added + history.deleted)
if need_cascade_pks:
for child in history.unchanged:
associationrow = {}
sync.update(
state,
self.parent,
associationrow,
"old_",
self.prop.synchronize_pairs,
)
sync.update(
child,
self.mapper,
associationrow,
"old_",
self.prop.secondary_synchronize_pairs,
)
secondary_update.append(associationrow)
if processed is not None:
processed.update(tmp)
self._run_crud(
uowcommit, secondary_insert, secondary_update, secondary_delete
)
def _run_crud(
self, uowcommit, secondary_insert, secondary_update, secondary_delete
):
connection = uowcommit.transaction.connection(self.mapper)
if secondary_delete:
associationrow = secondary_delete[0]
statement = self.secondary.delete(
sql.and_(
*[
c == sql.bindparam(c.key, type_=c.type)
for c in self.secondary.c
if c.key in associationrow
]
)
)
result = connection.execute(statement, secondary_delete)
if (
result.supports_sane_multi_rowcount()
) and result.rowcount != len(secondary_delete):
raise exc.StaleDataError(
"DELETE statement on table '%s' expected to delete "
"%d row(s); Only %d were matched."
% (
self.secondary.description,
len(secondary_delete),
result.rowcount,
)
)
if secondary_update:
associationrow = secondary_update[0]
statement = self.secondary.update(
sql.and_(
*[
c == sql.bindparam("old_" + c.key, type_=c.type)
for c in self.secondary.c
if c.key in associationrow
]
)
)
result = connection.execute(statement, secondary_update)
if (
result.supports_sane_multi_rowcount()
) and result.rowcount != len(secondary_update):
raise exc.StaleDataError(
"UPDATE statement on table '%s' expected to update "
"%d row(s); Only %d were matched."
% (
self.secondary.description,
len(secondary_update),
result.rowcount,
)
)
if secondary_insert:
statement = self.secondary.insert()
connection.execute(statement, secondary_insert)
def _synchronize(
self, state, child, associationrow, clearkeys, uowcommit, operation
):
# this checks for None if uselist=True
self._verify_canload(child)
# but if uselist=False we get here. If child is None,
# no association row can be generated, so return.
if child is None:
return False
if child is not None and not uowcommit.session._contains_state(child):
if not child.deleted:
util.warn(
"Object of type %s not in session, %s "
"operation along '%s' won't proceed"
% (mapperutil.state_class_str(child), operation, self.prop)
)
return False
sync.populate_dict(
state, self.parent, associationrow, self.prop.synchronize_pairs
)
sync.populate_dict(
child,
self.mapper,
associationrow,
self.prop.secondary_synchronize_pairs,
)
return True
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
uowcommit, state, self.parent, self.prop.synchronize_pairs
)
_direction_to_processor = {
ONETOMANY: OneToManyDP,
MANYTOONE: ManyToOneDP,
MANYTOMANY: ManyToManyDP,
}
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/deprecated_interfaces.py
|
# orm/deprecated_interfaces.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .interfaces import EXT_CONTINUE
from .. import event
from .. import util
@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces")
class MapperExtension(object):
"""Base implementation for :class:`_orm.Mapper` event hooks.
.. deprecated:: 0.7
:class:`.MapperExtension` is deprecated and will be removed in a future
release. Please refer to :func:`.event.listen` in conjunction with
the :class:`.MapperEvents` listener interface.
New extension classes subclass :class:`.MapperExtension` and are specified
using the ``extension`` mapper() argument, which is a single
:class:`.MapperExtension` or a list of such::
from sqlalchemy.orm.interfaces import MapperExtension
class MyExtension(MapperExtension):
def before_insert(self, mapper, connection, instance):
print "instance %s before insert !" % instance
m = mapper(User, users_table, extension=MyExtension())
A single mapper can maintain a chain of ``MapperExtension``
objects. When a particular mapping event occurs, the
corresponding method on each ``MapperExtension`` is invoked
serially, and each method has the ability to halt the chain
from proceeding further::
m = mapper(User, users_table, extension=[ext1, ext2, ext3])
Each ``MapperExtension`` method returns the symbol
EXT_CONTINUE by default. This symbol generally means "move
to the next ``MapperExtension`` for processing". For methods
that return objects like translated rows or new object
instances, EXT_CONTINUE means the result of the method
should be ignored. In some cases it's required for a
default mapper activity to be performed, such as adding a
new instance to a result list.
The symbol EXT_STOP has significance within a chain
of ``MapperExtension`` objects that the chain will be stopped
when this symbol is returned. Like EXT_CONTINUE, it also
has additional significance in some cases that a default
mapper activity will not be performed.
"""
@classmethod
def _adapt_instrument_class(cls, self, listener):
cls._adapt_listener_methods(self, listener, ("instrument_class",))
@classmethod
def _adapt_listener(cls, self, listener):
cls._adapt_listener_methods(
self,
listener,
(
"init_instance",
"init_failed",
"reconstruct_instance",
"before_insert",
"after_insert",
"before_update",
"after_update",
"before_delete",
"after_delete",
),
)
@classmethod
def _adapt_listener_methods(cls, self, listener, methods):
for meth in methods:
me_meth = getattr(MapperExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
util.warn_deprecated(
"MapperExtension.%s is deprecated. The "
"MapperExtension class will be removed in a future "
"release. Please transition to the @event interface, "
"using @event.listens_for(mapped_class, '%s')."
% (meth, meth)
)
if meth == "reconstruct_instance":
def go(ls_meth):
def reconstruct(instance, ctx):
ls_meth(self, instance)
return reconstruct
event.listen(
self.class_manager,
"load",
go(ls_meth),
raw=False,
propagate=True,
)
elif meth == "init_instance":
def go(ls_meth):
def init_instance(instance, args, kwargs):
ls_meth(
self,
self.class_,
self.class_manager.original_init,
instance,
args,
kwargs,
)
return init_instance
event.listen(
self.class_manager,
"init",
go(ls_meth),
raw=False,
propagate=True,
)
elif meth == "init_failed":
def go(ls_meth):
def init_failed(instance, args, kwargs):
util.warn_exception(
ls_meth,
self,
self.class_,
self.class_manager.original_init,
instance,
args,
kwargs,
)
return init_failed
event.listen(
self.class_manager,
"init_failure",
go(ls_meth),
raw=False,
propagate=True,
)
else:
event.listen(
self,
"%s" % meth,
ls_meth,
raw=False,
retval=True,
propagate=True,
)
def instrument_class(self, mapper, class_):
"""Receive a class when the mapper is first constructed, and has
applied instrumentation to the mapped class.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when its constructor is called.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_failed(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when its constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def reconstruct_instance(self, mapper, instance):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_insert(self, mapper, connection, instance):
"""Receive an object instance before that instance is inserted
into its table.
This is a good place to set up primary key values and such
that aren't handled otherwise.
Column-based attributes can be modified within this method
which will result in the new value being inserted. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_insert(self, mapper, connection, instance):
"""Receive an object instance after that instance is inserted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_update(self, mapper, connection, instance):
"""Receive an object instance before that instance is updated.
Note that this method is called for all instances that are marked as
"dirty", even those which have no net changes to their column-based
attributes. An object is marked as dirty when any of its column-based
attributes have a "set attribute" operation called or when any of its
collections are modified. If, at update time, no column-based
attributes have any net changes, no UPDATE statement will be issued.
This means that an instance being sent to before_update is *not* a
guarantee that an UPDATE statement will be issued (although you can
affect the outcome here).
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
Column-based attributes can be modified within this method
which will result in the new value being updated. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_update(self, mapper, connection, instance):
"""Receive an object instance after that instance is updated.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_delete(self, mapper, connection, instance):
"""Receive an object instance before that instance is deleted.
Note that *no* changes to the overall flush plan can be made
here; and manipulation of the ``Session`` will not have the
desired effect. To manipulate the ``Session`` within an
extension, use ``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
"""Receive an object instance after that instance is deleted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces")
class SessionExtension(object):
"""Base implementation for :class:`.Session` event hooks.
.. deprecated:: 0.7
:class:`.SessionExtension` is deprecated and will be removed in a future
release. Please refer to :func:`.event.listen` in conjunction with
the :class:`.SessionEvents` listener interface.
Subclasses may be installed into a :class:`.Session` (or
:class:`.sessionmaker`) using the ``extension`` keyword
argument::
from sqlalchemy.orm.interfaces import SessionExtension
class MySessionExtension(SessionExtension):
def before_commit(self, session):
print "before commit!"
Session = sessionmaker(extension=MySessionExtension())
The same :class:`.SessionExtension` instance can be used
with any number of sessions.
"""
@classmethod
def _adapt_listener(cls, self, listener):
for meth in [
"before_commit",
"after_commit",
"after_rollback",
"before_flush",
"after_flush",
"after_flush_postexec",
"after_begin",
"after_attach",
"after_bulk_update",
"after_bulk_delete",
]:
me_meth = getattr(SessionExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
util.warn_deprecated(
"SessionExtension.%s is deprecated. The "
"SessionExtension class will be removed in a future "
"release. Please transition to the @event interface, "
"using @event.listens_for(Session, '%s')." % (meth, meth)
)
event.listen(self, meth, getattr(listener, meth))
def before_commit(self, session):
"""Execute right before commit is called.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_commit(self, session):
"""Execute after a commit has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_rollback(self, session):
"""Execute after a rollback has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def before_flush(self, session, flush_context, instances):
"""Execute before flush process has started.
`instances` is an optional list of objects which were passed to
the ``flush()`` method. """
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes."""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction. """
def after_begin(self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
`transaction` is the SessionTransaction. This method is called
after an engine level transaction is begun on a connection. """
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge. """
def after_bulk_update(self, session, query, query_context, result):
"""Execute after a bulk update operation to the session.
This is called after a session.query(...).update()
`query` is the query object that this update operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
def after_bulk_delete(self, session, query, query_context, result):
"""Execute after a bulk delete operation to the session.
This is called after a session.query(...).delete()
`query` is the query object that this delete operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces")
class AttributeExtension(object):
"""Base implementation for :class:`.AttributeImpl` event hooks, events
that fire upon attribute mutations in user code.
.. deprecated:: 0.7
:class:`.AttributeExtension` is deprecated and will be removed in a
future release. Please refer to :func:`.event.listen` in conjunction
with the :class:`.AttributeEvents` listener interface.
:class:`.AttributeExtension` is used to listen for set,
remove, and append events on individual mapped attributes.
It is established on an individual mapped attribute using
the `extension` argument, available on
:func:`.column_property`, :func:`_orm.relationship`, and
others::
from sqlalchemy.orm.interfaces import AttributeExtension
from sqlalchemy.orm import mapper, relationship, column_property
class MyAttrExt(AttributeExtension):
def append(self, state, value, initiator):
print "append event !"
return value
def set(self, state, value, oldvalue, initiator):
print "set event !"
return value
mapper(SomeClass, sometable, properties={
'foo':column_property(sometable.c.foo, extension=MyAttrExt()),
'bar':relationship(Bar, extension=MyAttrExt())
})
Note that the :class:`.AttributeExtension` methods
:meth:`~.AttributeExtension.append` and
:meth:`~.AttributeExtension.set` need to return the
``value`` parameter. The returned value is used as the
effective value, and allows the extension to change what is
ultimately persisted.
AttributeExtension is assembled within the descriptors associated
with a mapped class.
"""
active_history = True
"""indicates that the set() method would like to receive the 'old' value,
even if it means firing lazy callables.
Note that ``active_history`` can also be set directly via
:func:`.column_property` and :func:`_orm.relationship`.
"""
@classmethod
def _adapt_listener(cls, self, listener):
for meth in ["append", "remove", "set"]:
me_meth = getattr(AttributeExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
util.warn_deprecated(
"AttributeExtension.%s is deprecated. The "
"AttributeExtension class will be removed in a future "
"release. Please transition to the @event interface, "
"using @event.listens_for(Class.attribute, '%s')."
% (meth, meth)
)
event.listen(
self,
"append",
listener.append,
active_history=listener.active_history,
raw=True,
retval=True,
)
event.listen(
self,
"remove",
listener.remove,
active_history=listener.active_history,
raw=True,
retval=True,
)
event.listen(
self,
"set",
listener.set,
active_history=listener.active_history,
raw=True,
retval=True,
)
def append(self, state, value, initiator):
"""Receive a collection append event.
The returned value will be used as the actual value to be
appended.
"""
return value
def remove(self, state, value, initiator):
"""Receive a remove event.
No return value is defined.
"""
pass
def set(self, state, value, oldvalue, initiator):
"""Receive a set event.
The returned value will be used as the actual value to be
set.
"""
return value
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/descriptor_props.py
|
# orm/descriptor_props.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Descriptor properties are more "auxiliary" properties
that exist as configurational elements, but don't participate
as actively in the load/persist ORM loop.
"""
from . import attributes
from . import properties
from . import query
from .interfaces import MapperProperty
from .interfaces import PropComparator
from .util import _none_set
from .. import event
from .. import exc as sa_exc
from .. import schema
from .. import sql
from .. import util
from ..sql import expression
class DescriptorProperty(MapperProperty):
""":class:`.MapperProperty` which proxies access to a
user-defined descriptor."""
doc = None
uses_objects = False
def instrument_class(self, mapper):
prop = self
class _ProxyImpl(object):
accepts_scalar_loader = False
expire_missing = True
collection = False
@property
def uses_objects(self):
return prop.uses_objects
def __init__(self, key):
self.key = key
if hasattr(prop, "get_history"):
def get_history(
self, state, dict_, passive=attributes.PASSIVE_OFF
):
return prop.get_history(state, dict_, passive)
if self.descriptor is None:
desc = getattr(mapper.class_, self.key, None)
if mapper._is_userland_descriptor(desc):
self.descriptor = desc
if self.descriptor is None:
def fset(obj, value):
setattr(obj, self.name, value)
def fdel(obj):
delattr(obj, self.name)
def fget(obj):
return getattr(obj, self.name)
self.descriptor = property(fget=fget, fset=fset, fdel=fdel)
proxy_attr = attributes.create_proxied_attribute(self.descriptor)(
self.parent.class_,
self.key,
self.descriptor,
lambda: self._comparator_factory(mapper),
doc=self.doc,
original_property=self,
)
proxy_attr.impl = _ProxyImpl(self.key)
mapper.class_manager.instrument_attribute(self.key, proxy_attr)
@util.langhelpers.dependency_for("sqlalchemy.orm.properties", add_to_all=True)
class CompositeProperty(DescriptorProperty):
"""Defines a "composite" mapped attribute, representing a collection
of columns as one attribute.
:class:`.CompositeProperty` is constructed using the :func:`.composite`
function.
.. seealso::
:ref:`mapper_composite`
"""
@util.deprecated_params(
extension=(
"0.7",
":class:`.AttributeExtension` is deprecated in favor of the "
":class:`.AttributeEvents` listener interface. The "
":paramref:`.composite.extension` parameter will be "
"removed in a future release.",
)
)
def __init__(self, class_, *attrs, **kwargs):
r"""Return a composite column-based property for use with a Mapper.
See the mapping documentation section :ref:`mapper_composite` for a
full usage example.
The :class:`.MapperProperty` returned by :func:`.composite`
is the :class:`.CompositeProperty`.
:param class\_:
The "composite type" class, or any classmethod or callable which
will produce a new instance of the composite object given the
column values in order.
:param \*cols:
List of Column objects to be mapped.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
scalar attribute should be loaded when replaced, if not
already loaded. See the same flag on :func:`.column_property`.
:param group:
A group name for this property when marked as deferred.
:param deferred:
When True, the column property is "deferred", meaning that it does
not load immediately, and is instead loaded when the attribute is
first accessed on an instance. See also
:func:`~sqlalchemy.orm.deferred`.
:param comparator_factory: a class which extends
:class:`.CompositeProperty.Comparator` which provides custom SQL
clause generation for comparison operations.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
:param extension:
an :class:`.AttributeExtension` instance,
or list of extensions, which will be prepended to the list of
attribute listeners for the resulting descriptor placed on the
class.
"""
super(CompositeProperty, self).__init__()
self.attrs = attrs
self.composite_class = class_
self.active_history = kwargs.get("active_history", False)
self.deferred = kwargs.get("deferred", False)
self.group = kwargs.get("group", None)
self.comparator_factory = kwargs.pop(
"comparator_factory", self.__class__.Comparator
)
if "info" in kwargs:
self.info = kwargs.pop("info")
util.set_creation_order(self)
self._create_descriptor()
def instrument_class(self, mapper):
super(CompositeProperty, self).instrument_class(mapper)
self._setup_event_handlers()
def do_init(self):
"""Initialization which occurs after the :class:`.CompositeProperty`
has been associated with its parent mapper.
"""
self._setup_arguments_on_columns()
def _create_descriptor(self):
"""Create the Python descriptor that will serve as
the access point on instances of the mapped class.
"""
def fget(instance):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
if self.key not in dict_:
# key not present. Iterate through related
# attributes, retrieve their values. This
# ensures they all load.
values = [
getattr(instance, key) for key in self._attribute_keys
]
# current expected behavior here is that the composite is
# created on access if the object is persistent or if
# col attributes have non-None. This would be better
# if the composite were created unconditionally,
# but that would be a behavioral change.
if self.key not in dict_ and (
state.key is not None or not _none_set.issuperset(values)
):
dict_[self.key] = self.composite_class(*values)
state.manager.dispatch.refresh(state, None, [self.key])
return dict_.get(self.key, None)
def fset(instance, value):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
attr = state.manager[self.key]
previous = dict_.get(self.key, attributes.NO_VALUE)
for fn in attr.dispatch.set:
value = fn(state, value, previous, attr.impl)
dict_[self.key] = value
if value is None:
for key in self._attribute_keys:
setattr(instance, key, None)
else:
for key, value in zip(
self._attribute_keys, value.__composite_values__()
):
setattr(instance, key, value)
def fdel(instance):
state = attributes.instance_state(instance)
dict_ = attributes.instance_dict(instance)
previous = dict_.pop(self.key, attributes.NO_VALUE)
attr = state.manager[self.key]
attr.dispatch.remove(state, previous, attr.impl)
for key in self._attribute_keys:
setattr(instance, key, None)
self.descriptor = property(fget, fset, fdel)
@util.memoized_property
def _comparable_elements(self):
return [getattr(self.parent.class_, prop.key) for prop in self.props]
@util.memoized_property
def props(self):
props = []
for attr in self.attrs:
if isinstance(attr, str):
prop = self.parent.get_property(attr, _configure_mappers=False)
elif isinstance(attr, schema.Column):
prop = self.parent._columntoproperty[attr]
elif isinstance(attr, attributes.InstrumentedAttribute):
prop = attr.property
else:
raise sa_exc.ArgumentError(
"Composite expects Column objects or mapped "
"attributes/attribute names as arguments, got: %r"
% (attr,)
)
props.append(prop)
return props
@property
def columns(self):
return [a for a in self.attrs if isinstance(a, schema.Column)]
def _setup_arguments_on_columns(self):
"""Propagate configuration arguments made on this composite
to the target columns, for those that apply.
"""
for prop in self.props:
prop.active_history = self.active_history
if self.deferred:
prop.deferred = self.deferred
prop.strategy_key = (("deferred", True), ("instrument", True))
prop.group = self.group
def _setup_event_handlers(self):
"""Establish events that populate/expire the composite attribute."""
def load_handler(state, *args):
_load_refresh_handler(state, args, is_refresh=False)
def refresh_handler(state, *args):
_load_refresh_handler(state, args, is_refresh=True)
def _load_refresh_handler(state, args, is_refresh):
dict_ = state.dict
if not is_refresh and self.key in dict_:
return
# if column elements aren't loaded, skip.
# __get__() will initiate a load for those
# columns
for k in self._attribute_keys:
if k not in dict_:
return
dict_[self.key] = self.composite_class(
*[state.dict[key] for key in self._attribute_keys]
)
def expire_handler(state, keys):
if keys is None or set(self._attribute_keys).intersection(keys):
state.dict.pop(self.key, None)
def insert_update_handler(mapper, connection, state):
"""After an insert or update, some columns may be expired due
to server side defaults, or re-populated due to client side
defaults. Pop out the composite value here so that it
recreates.
"""
state.dict.pop(self.key, None)
event.listen(
self.parent, "after_insert", insert_update_handler, raw=True
)
event.listen(
self.parent, "after_update", insert_update_handler, raw=True
)
event.listen(
self.parent, "load", load_handler, raw=True, propagate=True
)
event.listen(
self.parent, "refresh", refresh_handler, raw=True, propagate=True
)
event.listen(
self.parent, "expire", expire_handler, raw=True, propagate=True
)
# TODO: need a deserialize hook here
@util.memoized_property
def _attribute_keys(self):
return [prop.key for prop in self.props]
def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
"""Provided for userland code that uses attributes.get_history()."""
added = []
deleted = []
has_history = False
for prop in self.props:
key = prop.key
hist = state.manager[key].impl.get_history(state, dict_)
if hist.has_changes():
has_history = True
non_deleted = hist.non_deleted()
if non_deleted:
added.extend(non_deleted)
else:
added.append(None)
if hist.deleted:
deleted.extend(hist.deleted)
else:
deleted.append(None)
if has_history:
return attributes.History(
[self.composite_class(*added)],
(),
[self.composite_class(*deleted)],
)
else:
return attributes.History((), [self.composite_class(*added)], ())
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
class CompositeBundle(query.Bundle):
def __init__(self, property_, expr):
self.property = property_
super(CompositeProperty.CompositeBundle, self).__init__(
property_.key, *expr
)
def create_row_processor(self, query, procs, labels):
def proc(row):
return self.property.composite_class(
*[proc(row) for proc in procs]
)
return proc
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.CompositeProperty` attributes.
See the example in :ref:`composite_operations` for an overview
of usage , as well as the documentation for :class:`.PropComparator`.
.. seealso::
:class:`.PropComparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__hash__ = None
@property
def clauses(self):
return self.__clause_element__()
def __clause_element__(self):
return expression.ClauseList(
group=False, *self._comparable_elements
)
def _query_clause_element(self):
return CompositeProperty.CompositeBundle(
self.prop, self.__clause_element__()
)
def _bulk_update_tuples(self, value):
if value is None:
values = [None for key in self.prop._attribute_keys]
elif isinstance(value, self.prop.composite_class):
values = value.__composite_values__()
else:
raise sa_exc.ArgumentError(
"Can't UPDATE composite attribute %s to %r"
% (self.prop, value)
)
return zip(self._comparable_elements, values)
@util.memoized_property
def _comparable_elements(self):
if self._adapt_to_entity:
return [
getattr(self._adapt_to_entity.entity, prop.key)
for prop in self.prop._comparable_elements
]
else:
return self.prop._comparable_elements
def __eq__(self, other):
if other is None:
values = [None] * len(self.prop._comparable_elements)
else:
values = other.__composite_values__()
comparisons = [
a == b for a, b in zip(self.prop._comparable_elements, values)
]
if self._adapt_to_entity:
comparisons = [self.adapter(x) for x in comparisons]
return sql.and_(*comparisons)
def __ne__(self, other):
return sql.not_(self.__eq__(other))
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
@util.langhelpers.dependency_for("sqlalchemy.orm.properties", add_to_all=True)
class ConcreteInheritedProperty(DescriptorProperty):
"""A 'do nothing' :class:`.MapperProperty` that disables
an attribute on a concrete subclass that is only present
on the inherited mapper, not the concrete classes' mapper.
Cases where this occurs include:
* When the superclass mapper is mapped against a
"polymorphic union", which includes all attributes from
all subclasses.
* When a relationship() is configured on an inherited mapper,
but not on the subclass mapper. Concrete mappers require
that relationship() is configured explicitly on each
subclass.
"""
def _comparator_factory(self, mapper):
comparator_callable = None
for m in self.parent.iterate_to_root():
p = m._props[self.key]
if not isinstance(p, ConcreteInheritedProperty):
comparator_callable = p.comparator_factory
break
return comparator_callable
def __init__(self):
super(ConcreteInheritedProperty, self).__init__()
def warn():
raise AttributeError(
"Concrete %s does not implement "
"attribute %r at the instance level. Add "
"this property explicitly to %s."
% (self.parent, self.key, self.parent)
)
class NoninheritedConcreteProp(object):
def __set__(s, obj, value):
warn()
def __delete__(s, obj):
warn()
def __get__(s, obj, owner):
if obj is None:
return self.descriptor
warn()
self.descriptor = NoninheritedConcreteProp()
@util.langhelpers.dependency_for("sqlalchemy.orm.properties", add_to_all=True)
class SynonymProperty(DescriptorProperty):
def __init__(
self,
name,
map_column=None,
descriptor=None,
comparator_factory=None,
doc=None,
info=None,
):
"""Denote an attribute name as a synonym to a mapped property,
in that the attribute will mirror the value and expression behavior
of another attribute.
e.g.::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
job_status = Column(String(50))
status = synonym("job_status")
:param name: the name of the existing mapped property. This
can refer to the string name ORM-mapped attribute
configured on the class, including column-bound attributes
and relationships.
:param descriptor: a Python :term:`descriptor` that will be used
as a getter (and potentially a setter) when this attribute is
accessed at the instance level.
:param map_column: **For classical mappings and mappings against
an existing Table object only**. if ``True``, the :func:`.synonym`
construct will locate the :class:`_schema.Column`
object upon the mapped
table that would normally be associated with the attribute name of
this synonym, and produce a new :class:`.ColumnProperty` that instead
maps this :class:`_schema.Column`
to the alternate name given as the "name"
argument of the synonym; in this way, the usual step of redefining
the mapping of the :class:`_schema.Column`
to be under a different name is
unnecessary. This is usually intended to be used when a
:class:`_schema.Column`
is to be replaced with an attribute that also uses a
descriptor, that is, in conjunction with the
:paramref:`.synonym.descriptor` parameter::
my_table = Table(
"my_table", metadata,
Column('id', Integer, primary_key=True),
Column('job_status', String(50))
)
class MyClass(object):
@property
def _job_status_descriptor(self):
return "Status: %s" % self._job_status
mapper(
MyClass, my_table, properties={
"job_status": synonym(
"_job_status", map_column=True,
descriptor=MyClass._job_status_descriptor)
}
)
Above, the attribute named ``_job_status`` is automatically
mapped to the ``job_status`` column::
>>> j1 = MyClass()
>>> j1._job_status = "employed"
>>> j1.job_status
Status: employed
When using Declarative, in order to provide a descriptor in
conjunction with a synonym, use the
:func:`sqlalchemy.ext.declarative.synonym_for` helper. However,
note that the :ref:`hybrid properties <mapper_hybrids>` feature
should usually be preferred, particularly when redefining attribute
behavior.
:param info: Optional data dictionary which will be populated into the
:attr:`.InspectionAttr.info` attribute of this object.
.. versionadded:: 1.0.0
:param comparator_factory: A subclass of :class:`.PropComparator`
that will provide custom comparison behavior at the SQL expression
level.
.. note::
For the use case of providing an attribute which redefines both
Python-level and SQL-expression level behavior of an attribute,
please refer to the Hybrid attribute introduced at
:ref:`mapper_hybrids` for a more effective technique.
.. seealso::
:ref:`synonyms` - Overview of synonyms
:func:`.synonym_for` - a helper oriented towards Declarative
:ref:`mapper_hybrids` - The Hybrid Attribute extension provides an
updated approach to augmenting attribute behavior more flexibly
than can be achieved with synonyms.
"""
super(SynonymProperty, self).__init__()
self.name = name
self.map_column = map_column
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
if info:
self.info = info
util.set_creation_order(self)
@property
def uses_objects(self):
return getattr(self.parent.class_, self.name).impl.uses_objects
# TODO: when initialized, check _proxied_property,
# emit a warning if its not a column-based property
@util.memoized_property
def _proxied_property(self):
attr = getattr(self.parent.class_, self.name)
if not hasattr(attr, "property") or not isinstance(
attr.property, MapperProperty
):
raise sa_exc.InvalidRequestError(
"""synonym() attribute "%s.%s" only supports """
"""ORM mapped attributes, got %r"""
% (self.parent.class_.__name__, self.name, attr)
)
return attr.property
def _comparator_factory(self, mapper):
prop = self._proxied_property
if self.comparator_factory:
comp = self.comparator_factory(prop, mapper)
else:
comp = prop.comparator_factory(prop, mapper)
return comp
def get_history(self, *arg, **kw):
attr = getattr(self.parent.class_, self.name)
return attr.impl.get_history(*arg, **kw)
def set_parent(self, parent, init):
if self.map_column:
# implement the 'map_column' option.
if self.key not in parent.persist_selectable.c:
raise sa_exc.ArgumentError(
"Can't compile synonym '%s': no column on table "
"'%s' named '%s'"
% (
self.name,
parent.persist_selectable.description,
self.key,
)
)
elif (
parent.persist_selectable.c[self.key]
in parent._columntoproperty
and parent._columntoproperty[
parent.persist_selectable.c[self.key]
].key
== self.name
):
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r"
% (self.key, self.name, self.name, self.key)
)
p = properties.ColumnProperty(
parent.persist_selectable.c[self.key]
)
parent._configure_property(self.name, p, init=init, setparent=True)
p._mapped_by_synonym = self.key
self.parent = parent
@util.langhelpers.dependency_for("sqlalchemy.orm.properties", add_to_all=True)
@util.deprecated_cls(
"0.7",
":func:`.comparable_property` is deprecated and will be removed in a "
"future release. Please refer to the :mod:`~sqlalchemy.ext.hybrid` "
"extension.",
)
class ComparableProperty(DescriptorProperty):
"""Instruments a Python property for use in query expressions."""
def __init__(
self, comparator_factory, descriptor=None, doc=None, info=None
):
"""Provides a method of applying a :class:`.PropComparator`
to any Python descriptor attribute.
Allows any Python descriptor to behave like a SQL-enabled
attribute when used at the class level in queries, allowing
redefinition of expression operator behavior.
In the example below we redefine :meth:`.PropComparator.operate`
to wrap both sides of an expression in ``func.lower()`` to produce
case-insensitive comparison::
from sqlalchemy.orm import comparable_property
from sqlalchemy.orm.interfaces import PropComparator
from sqlalchemy.sql import func
from sqlalchemy import Integer, String, Column
from sqlalchemy.ext.declarative import declarative_base
class CaseInsensitiveComparator(PropComparator):
def __clause_element__(self):
return self.prop
def operate(self, op, other):
return op(
func.lower(self.__clause_element__()),
func.lower(other)
)
Base = declarative_base()
class SearchWord(Base):
__tablename__ = 'search_word'
id = Column(Integer, primary_key=True)
word = Column(String)
word_insensitive = comparable_property(lambda prop, mapper:
CaseInsensitiveComparator(
mapper.c.word, mapper)
)
A mapping like the above allows the ``word_insensitive`` attribute
to render an expression like::
>>> print(SearchWord.word_insensitive == "Trucks")
lower(search_word.word) = lower(:lower_1)
:param comparator_factory:
A PropComparator subclass or factory that defines operator behavior
for this property.
:param descriptor:
Optional when used in a ``properties={}`` declaration. The Python
descriptor or property to layer comparison behavior on top of.
The like-named descriptor will be automatically retrieved from the
mapped class if left blank in a ``properties`` declaration.
:param info: Optional data dictionary which will be populated into the
:attr:`.InspectionAttr.info` attribute of this object.
.. versionadded:: 1.0.0
"""
super(ComparableProperty, self).__init__()
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
if info:
self.info = info
util.set_creation_order(self)
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/identity.py
|
# orm/identity.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import weakref
from . import attributes
from . import util as orm_util
from .. import exc as sa_exc
from .. import util
class IdentityMap(object):
def __init__(self):
self._dict = {}
self._modified = set()
self._wr = weakref.ref(self)
def keys(self):
return self._dict.keys()
def replace(self, state):
raise NotImplementedError()
def add(self, state):
raise NotImplementedError()
def _add_unpresent(self, state, key):
"""optional inlined form of add() which can assume item isn't present
in the map"""
self.add(state)
def update(self, dict_):
raise NotImplementedError("IdentityMap uses add() to insert data")
def clear(self):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def _manage_incoming_state(self, state):
state._instance_dict = self._wr
if state.modified:
self._modified.add(state)
def _manage_removed_state(self, state):
del state._instance_dict
if state.modified:
self._modified.discard(state)
def _dirty_states(self):
return self._modified
def check_modified(self):
"""return True if any InstanceStates present have been marked
as 'modified'.
"""
return bool(self._modified)
def has_key(self, key):
return key in self
def popitem(self):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def pop(self, key, *args):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def setdefault(self, key, default=None):
raise NotImplementedError("IdentityMap uses add() to insert data")
def __len__(self):
return len(self._dict)
def copy(self):
raise NotImplementedError()
def __setitem__(self, key, value):
raise NotImplementedError("IdentityMap uses add() to insert data")
def __delitem__(self, key):
raise NotImplementedError("IdentityMap uses remove() to remove data")
class WeakInstanceDict(IdentityMap):
def __getitem__(self, key):
state = self._dict[key]
o = state.obj()
if o is None:
raise KeyError(key)
return o
def __contains__(self, key):
try:
if key in self._dict:
state = self._dict[key]
o = state.obj()
else:
return False
except KeyError:
return False
else:
return o is not None
def contains_state(self, state):
if state.key in self._dict:
try:
return self._dict[state.key] is state
except KeyError:
return False
else:
return False
def replace(self, state):
if state.key in self._dict:
try:
existing = self._dict[state.key]
except KeyError:
# catch gc removed the key after we just checked for it
pass
else:
if existing is not state:
self._manage_removed_state(existing)
else:
return None
else:
existing = None
self._dict[state.key] = state
self._manage_incoming_state(state)
return existing
def add(self, state):
key = state.key
# inline of self.__contains__
if key in self._dict:
try:
existing_state = self._dict[key]
except KeyError:
# catch gc removed the key after we just checked for it
pass
else:
if existing_state is not state:
o = existing_state.obj()
if o is not None:
raise sa_exc.InvalidRequestError(
"Can't attach instance "
"%s; another instance with key %s is already "
"present in this session."
% (orm_util.state_str(state), state.key)
)
else:
return False
self._dict[key] = state
self._manage_incoming_state(state)
return True
def _add_unpresent(self, state, key):
# inlined form of add() called by loading.py
self._dict[key] = state
state._instance_dict = self._wr
def get(self, key, default=None):
if key not in self._dict:
return default
try:
state = self._dict[key]
except KeyError:
# catch gc removed the key after we just checked for it
return default
else:
o = state.obj()
if o is None:
return default
return o
def items(self):
values = self.all_states()
result = []
for state in values:
value = state.obj()
if value is not None:
result.append((state.key, value))
return result
def values(self):
values = self.all_states()
result = []
for state in values:
value = state.obj()
if value is not None:
result.append(value)
return result
def __iter__(self):
return iter(self.keys())
if util.py2k:
def iteritems(self):
return iter(self.items())
def itervalues(self):
return iter(self.values())
def all_states(self):
if util.py2k:
return self._dict.values()
else:
return list(self._dict.values())
def _fast_discard(self, state):
# used by InstanceState for state being
# GC'ed, inlines _managed_removed_state
try:
st = self._dict[state.key]
except KeyError:
# catch gc removed the key after we just checked for it
pass
else:
if st is state:
self._dict.pop(state.key, None)
def discard(self, state):
self.safe_discard(state)
def safe_discard(self, state):
if state.key in self._dict:
try:
st = self._dict[state.key]
except KeyError:
# catch gc removed the key after we just checked for it
pass
else:
if st is state:
self._dict.pop(state.key, None)
self._manage_removed_state(state)
def prune(self):
return 0
class StrongInstanceDict(IdentityMap):
"""A 'strong-referencing' version of the identity map.
.. deprecated 1.1::
The strong
reference identity map is legacy. See the
recipe at :ref:`session_referencing_behavior` for
an event-based approach to maintaining strong identity
references.
"""
if util.py2k:
def itervalues(self):
return self._dict.itervalues()
def iteritems(self):
return self._dict.iteritems()
def __iter__(self):
return iter(self.dict_)
def __getitem__(self, key):
return self._dict[key]
def __contains__(self, key):
return key in self._dict
def get(self, key, default=None):
return self._dict.get(key, default)
def values(self):
return self._dict.values()
def items(self):
return self._dict.items()
def all_states(self):
return [attributes.instance_state(o) for o in self.values()]
def contains_state(self, state):
return (
state.key in self
and attributes.instance_state(self[state.key]) is state
)
def replace(self, state):
if state.key in self._dict:
existing = self._dict[state.key]
existing = attributes.instance_state(existing)
if existing is not state:
self._manage_removed_state(existing)
else:
return
else:
existing = None
self._dict[state.key] = state.obj()
self._manage_incoming_state(state)
return existing
def add(self, state):
if state.key in self:
if attributes.instance_state(self._dict[state.key]) is not state:
raise sa_exc.InvalidRequestError(
"Can't attach instance "
"%s; another instance with key %s is already "
"present in this session."
% (orm_util.state_str(state), state.key)
)
return False
else:
self._dict[state.key] = state.obj()
self._manage_incoming_state(state)
return True
def _add_unpresent(self, state, key):
# inlined form of add() called by loading.py
self._dict[key] = state.obj()
state._instance_dict = self._wr
def _fast_discard(self, state):
# used by InstanceState for state being
# GC'ed, inlines _managed_removed_state
try:
obj = self._dict[state.key]
except KeyError:
# catch gc removed the key after we just checked for it
pass
else:
if attributes.instance_state(obj) is state:
self._dict.pop(state.key, None)
def discard(self, state):
self.safe_discard(state)
def safe_discard(self, state):
if state.key in self._dict:
obj = self._dict[state.key]
st = attributes.instance_state(obj)
if st is state:
self._dict.pop(state.key, None)
self._manage_removed_state(state)
def prune(self):
"""prune unreferenced, non-dirty states."""
ref_count = len(self)
dirty = [s.obj() for s in self.all_states() if s.modified]
# work around http://bugs.python.org/issue6149
keepers = weakref.WeakValueDictionary()
keepers.update(self)
self._dict.clear()
self._dict.update(keepers)
self.modified = bool(dirty)
return ref_count - len(self)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/evaluator.py
|
# orm/evaluator.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import operator
from .. import inspect
from .. import util
from ..sql import operators
class UnevaluatableError(Exception):
pass
_straight_ops = set(
getattr(operators, op)
for op in (
"add",
"mul",
"sub",
"div",
"mod",
"truediv",
"lt",
"le",
"ne",
"gt",
"ge",
"eq",
)
)
_notimplemented_ops = set(
getattr(operators, op)
for op in (
"like_op",
"notlike_op",
"ilike_op",
"notilike_op",
"between_op",
"in_op",
"notin_op",
"endswith_op",
"concat_op",
)
)
class EvaluatorCompiler(object):
def __init__(self, target_cls=None):
self.target_cls = target_cls
def process(self, clause):
meth = getattr(self, "visit_%s" % clause.__visit_name__, None)
if not meth:
raise UnevaluatableError(
"Cannot evaluate %s" % type(clause).__name__
)
return meth(clause)
def visit_grouping(self, clause):
return self.process(clause.element)
def visit_null(self, clause):
return lambda obj: None
def visit_false(self, clause):
return lambda obj: False
def visit_true(self, clause):
return lambda obj: True
def visit_column(self, clause):
if "parentmapper" in clause._annotations:
parentmapper = clause._annotations["parentmapper"]
if self.target_cls and not issubclass(
self.target_cls, parentmapper.class_
):
raise UnevaluatableError(
"Can't evaluate criteria against alternate class %s"
% parentmapper.class_
)
key = parentmapper._columntoproperty[clause].key
else:
key = clause.key
if (
self.target_cls
and key in inspect(self.target_cls).column_attrs
):
util.warn(
"Evaluating non-mapped column expression '%s' onto "
"ORM instances; this is a deprecated use case. Please "
"make use of the actual mapped columns in ORM-evaluated "
"UPDATE / DELETE expressions." % clause
)
else:
raise UnevaluatableError("Cannot evaluate column: %s" % clause)
get_corresponding_attr = operator.attrgetter(key)
return lambda obj: get_corresponding_attr(obj)
def visit_clauselist(self, clause):
evaluators = list(map(self.process, clause.clauses))
if clause.operator is operators.or_:
def evaluate(obj):
has_null = False
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if value:
return True
has_null = has_null or value is None
if has_null:
return None
return False
elif clause.operator is operators.and_:
def evaluate(obj):
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if not value:
if value is None:
return None
return False
return True
else:
raise UnevaluatableError(
"Cannot evaluate clauselist with operator %s" % clause.operator
)
return evaluate
def visit_binary(self, clause):
eval_left, eval_right = list(
map(self.process, [clause.left, clause.right])
)
operator = clause.operator
if operator is operators.is_:
def evaluate(obj):
return eval_left(obj) == eval_right(obj)
elif operator is operators.isnot:
def evaluate(obj):
return eval_left(obj) != eval_right(obj)
elif operator in _straight_ops:
def evaluate(obj):
left_val = eval_left(obj)
right_val = eval_right(obj)
if left_val is None or right_val is None:
return None
return operator(eval_left(obj), eval_right(obj))
else:
raise UnevaluatableError(
"Cannot evaluate %s with operator %s"
% (type(clause).__name__, clause.operator)
)
return evaluate
def visit_unary(self, clause):
eval_inner = self.process(clause.element)
if clause.operator is operators.inv:
def evaluate(obj):
value = eval_inner(obj)
if value is None:
return None
return not value
return evaluate
raise UnevaluatableError(
"Cannot evaluate %s with operator %s"
% (type(clause).__name__, clause.operator)
)
def visit_bindparam(self, clause):
if clause.callable:
val = clause.callable()
else:
val = clause.value
return lambda obj: val
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/unitofwork.py
|
# orm/unitofwork.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The internals for the unit of work system.
The session's flush() process passes objects to a contextual object
here, which assembles flush tasks based on mappers and their properties,
organizes them in order of dependency, and executes.
"""
from . import attributes
from . import exc as orm_exc
from . import persistence
from . import util as orm_util
from .. import event
from .. import util
from ..util import topological
def track_cascade_events(descriptor, prop):
"""Establish event listeners on object attributes which handle
cascade-on-set/append.
"""
key = prop.key
def append(state, item, initiator):
# process "save_update" cascade rules for when
# an instance is appended to the list of another instance
if item is None:
return
sess = state.session
if sess:
if sess._warn_on_events:
sess._flush_warning("collection append")
prop = state.manager.mapper._props[key]
item_state = attributes.instance_state(item)
if (
prop._cascade.save_update
and (prop.cascade_backrefs or key == initiator.key)
and not sess._contains_state(item_state)
):
sess._save_or_update_state(item_state)
return item
def remove(state, item, initiator):
if item is None:
return
sess = state.session
prop = state.manager.mapper._props[key]
if sess and sess._warn_on_events:
sess._flush_warning(
"collection remove"
if prop.uselist
else "related attribute delete"
)
if (
item is not None
and item is not attributes.NEVER_SET
and item is not attributes.PASSIVE_NO_RESULT
and prop._cascade.delete_orphan
):
# expunge pending orphans
item_state = attributes.instance_state(item)
if prop.mapper._is_orphan(item_state):
if sess and item_state in sess._new:
sess.expunge(item)
else:
# the related item may or may not itself be in a
# Session, however the parent for which we are catching
# the event is not in a session, so memoize this on the
# item
item_state._orphaned_outside_of_session = True
def set_(state, newvalue, oldvalue, initiator):
# process "save_update" cascade rules for when an instance
# is attached to another instance
if oldvalue is newvalue:
return newvalue
sess = state.session
if sess:
if sess._warn_on_events:
sess._flush_warning("related attribute set")
prop = state.manager.mapper._props[key]
if newvalue is not None:
newvalue_state = attributes.instance_state(newvalue)
if (
prop._cascade.save_update
and (prop.cascade_backrefs or key == initiator.key)
and not sess._contains_state(newvalue_state)
):
sess._save_or_update_state(newvalue_state)
if (
oldvalue is not None
and oldvalue is not attributes.NEVER_SET
and oldvalue is not attributes.PASSIVE_NO_RESULT
and prop._cascade.delete_orphan
):
# possible to reach here with attributes.NEVER_SET ?
oldvalue_state = attributes.instance_state(oldvalue)
if oldvalue_state in sess._new and prop.mapper._is_orphan(
oldvalue_state
):
sess.expunge(oldvalue)
return newvalue
event.listen(descriptor, "append", append, raw=True, retval=True)
event.listen(descriptor, "remove", remove, raw=True, retval=True)
event.listen(descriptor, "set", set_, raw=True, retval=True)
class UOWTransaction(object):
def __init__(self, session):
self.session = session
# dictionary used by external actors to
# store arbitrary state information.
self.attributes = {}
# dictionary of mappers to sets of
# DependencyProcessors, which are also
# set to be part of the sorted flush actions,
# which have that mapper as a parent.
self.deps = util.defaultdict(set)
# dictionary of mappers to sets of InstanceState
# items pending for flush which have that mapper
# as a parent.
self.mappers = util.defaultdict(set)
# a dictionary of Preprocess objects, which gather
# additional states impacted by the flush
# and determine if a flush action is needed
self.presort_actions = {}
# dictionary of PostSortRec objects, each
# one issues work during the flush within
# a certain ordering.
self.postsort_actions = {}
# a set of 2-tuples, each containing two
# PostSortRec objects where the second
# is dependent on the first being executed
# first
self.dependencies = set()
# dictionary of InstanceState-> (isdelete, listonly)
# tuples, indicating if this state is to be deleted
# or insert/updated, or just refreshed
self.states = {}
# tracks InstanceStates which will be receiving
# a "post update" call. Keys are mappers,
# values are a set of states and a set of the
# columns which should be included in the update.
self.post_update_states = util.defaultdict(lambda: (set(), set()))
@property
def has_work(self):
return bool(self.states)
def was_already_deleted(self, state):
"""return true if the given state is expired and was deleted
previously.
"""
if state.expired:
try:
state._load_expired(state, attributes.PASSIVE_OFF)
except orm_exc.ObjectDeletedError:
self.session._remove_newly_deleted([state])
return True
return False
def is_deleted(self, state):
"""return true if the given state is marked as deleted
within this uowtransaction."""
return state in self.states and self.states[state][0]
def memo(self, key, callable_):
if key in self.attributes:
return self.attributes[key]
else:
self.attributes[key] = ret = callable_()
return ret
def remove_state_actions(self, state):
"""remove pending actions for a state from the uowtransaction."""
isdelete = self.states[state][0]
self.states[state] = (isdelete, True)
def get_attribute_history(
self, state, key, passive=attributes.PASSIVE_NO_INITIALIZE
):
"""facade to attributes.get_state_history(), including
caching of results."""
hashkey = ("history", state, key)
# cache the objects, not the states; the strong reference here
# prevents newly loaded objects from being dereferenced during the
# flush process
if hashkey in self.attributes:
history, state_history, cached_passive = self.attributes[hashkey]
# if the cached lookup was "passive" and now
# we want non-passive, do a non-passive lookup and re-cache
if (
not cached_passive & attributes.SQL_OK
and passive & attributes.SQL_OK
):
impl = state.manager[key].impl
history = impl.get_history(
state,
state.dict,
attributes.PASSIVE_OFF | attributes.LOAD_AGAINST_COMMITTED,
)
if history and impl.uses_objects:
state_history = history.as_state()
else:
state_history = history
self.attributes[hashkey] = (history, state_history, passive)
else:
impl = state.manager[key].impl
# TODO: store the history as (state, object) tuples
# so we don't have to keep converting here
history = impl.get_history(
state, state.dict, passive | attributes.LOAD_AGAINST_COMMITTED
)
if history and impl.uses_objects:
state_history = history.as_state()
else:
state_history = history
self.attributes[hashkey] = (history, state_history, passive)
return state_history
def has_dep(self, processor):
return (processor, True) in self.presort_actions
def register_preprocessor(self, processor, fromparent):
key = (processor, fromparent)
if key not in self.presort_actions:
self.presort_actions[key] = Preprocess(processor, fromparent)
def register_object(
self,
state,
isdelete=False,
listonly=False,
cancel_delete=False,
operation=None,
prop=None,
):
if not self.session._contains_state(state):
# this condition is normal when objects are registered
# as part of a relationship cascade operation. it should
# not occur for the top-level register from Session.flush().
if not state.deleted and operation is not None:
util.warn(
"Object of type %s not in session, %s operation "
"along '%s' will not proceed"
% (orm_util.state_class_str(state), operation, prop)
)
return False
if state not in self.states:
mapper = state.manager.mapper
if mapper not in self.mappers:
self._per_mapper_flush_actions(mapper)
self.mappers[mapper].add(state)
self.states[state] = (isdelete, listonly)
else:
if not listonly and (isdelete or cancel_delete):
self.states[state] = (isdelete, False)
return True
def register_post_update(self, state, post_update_cols):
mapper = state.manager.mapper.base_mapper
states, cols = self.post_update_states[mapper]
states.add(state)
cols.update(post_update_cols)
def _per_mapper_flush_actions(self, mapper):
saves = SaveUpdateAll(self, mapper.base_mapper)
deletes = DeleteAll(self, mapper.base_mapper)
self.dependencies.add((saves, deletes))
for dep in mapper._dependency_processors:
dep.per_property_preprocessors(self)
for prop in mapper.relationships:
if prop.viewonly:
continue
dep = prop._dependency_processor
dep.per_property_preprocessors(self)
@util.memoized_property
def _mapper_for_dep(self):
"""return a dynamic mapping of (Mapper, DependencyProcessor) to
True or False, indicating if the DependencyProcessor operates
on objects of that Mapper.
The result is stored in the dictionary persistently once
calculated.
"""
return util.PopulateDict(
lambda tup: tup[0]._props.get(tup[1].key) is tup[1].prop
)
def filter_states_for_dep(self, dep, states):
"""Filter the given list of InstanceStates to those relevant to the
given DependencyProcessor.
"""
mapper_for_dep = self._mapper_for_dep
return [s for s in states if mapper_for_dep[(s.manager.mapper, dep)]]
def states_for_mapper_hierarchy(self, mapper, isdelete, listonly):
checktup = (isdelete, listonly)
for mapper in mapper.base_mapper.self_and_descendants:
for state in self.mappers[mapper]:
if self.states[state] == checktup:
yield state
def _generate_actions(self):
"""Generate the full, unsorted collection of PostSortRecs as
well as dependency pairs for this UOWTransaction.
"""
# execute presort_actions, until all states
# have been processed. a presort_action might
# add new states to the uow.
while True:
ret = False
for action in list(self.presort_actions.values()):
if action.execute(self):
ret = True
if not ret:
break
# see if the graph of mapper dependencies has cycles.
self.cycles = cycles = topological.find_cycles(
self.dependencies, list(self.postsort_actions.values())
)
if cycles:
# if yes, break the per-mapper actions into
# per-state actions
convert = dict(
(rec, set(rec.per_state_flush_actions(self))) for rec in cycles
)
# rewrite the existing dependencies to point to
# the per-state actions for those per-mapper actions
# that were broken up.
for edge in list(self.dependencies):
if (
None in edge
or edge[0].disabled
or edge[1].disabled
or cycles.issuperset(edge)
):
self.dependencies.remove(edge)
elif edge[0] in cycles:
self.dependencies.remove(edge)
for dep in convert[edge[0]]:
self.dependencies.add((dep, edge[1]))
elif edge[1] in cycles:
self.dependencies.remove(edge)
for dep in convert[edge[1]]:
self.dependencies.add((edge[0], dep))
return set(
[a for a in self.postsort_actions.values() if not a.disabled]
).difference(cycles)
def execute(self):
postsort_actions = self._generate_actions()
# sort = topological.sort(self.dependencies, postsort_actions)
# print "--------------"
# print "\ndependencies:", self.dependencies
# print "\ncycles:", self.cycles
# print "\nsort:", list(sort)
# print "\nCOUNT OF POSTSORT ACTIONS", len(postsort_actions)
# execute
if self.cycles:
for set_ in topological.sort_as_subsets(
self.dependencies, postsort_actions
):
while set_:
n = set_.pop()
n.execute_aggregate(self, set_)
else:
for rec in topological.sort(self.dependencies, postsort_actions):
rec.execute(self)
def finalize_flush_changes(self):
"""mark processed objects as clean / deleted after a successful
flush().
this method is called within the flush() method after the
execute() method has succeeded and the transaction has been committed.
"""
if not self.states:
return
states = set(self.states)
isdel = set(
s for (s, (isdelete, listonly)) in self.states.items() if isdelete
)
other = states.difference(isdel)
if isdel:
self.session._remove_newly_deleted(isdel)
if other:
self.session._register_persistent(other)
class IterateMappersMixin(object):
def _mappers(self, uow):
if self.fromparent:
return iter(
m
for m in self.dependency_processor.parent.self_and_descendants
if uow._mapper_for_dep[(m, self.dependency_processor)]
)
else:
return self.dependency_processor.mapper.self_and_descendants
class Preprocess(IterateMappersMixin):
__slots__ = (
"dependency_processor",
"fromparent",
"processed",
"setup_flush_actions",
)
def __init__(self, dependency_processor, fromparent):
self.dependency_processor = dependency_processor
self.fromparent = fromparent
self.processed = set()
self.setup_flush_actions = False
def execute(self, uow):
delete_states = set()
save_states = set()
for mapper in self._mappers(uow):
for state in uow.mappers[mapper].difference(self.processed):
(isdelete, listonly) = uow.states[state]
if not listonly:
if isdelete:
delete_states.add(state)
else:
save_states.add(state)
if delete_states:
self.dependency_processor.presort_deletes(uow, delete_states)
self.processed.update(delete_states)
if save_states:
self.dependency_processor.presort_saves(uow, save_states)
self.processed.update(save_states)
if delete_states or save_states:
if not self.setup_flush_actions and (
self.dependency_processor.prop_has_changes(
uow, delete_states, True
)
or self.dependency_processor.prop_has_changes(
uow, save_states, False
)
):
self.dependency_processor.per_property_flush_actions(uow)
self.setup_flush_actions = True
return True
else:
return False
class PostSortRec(object):
__slots__ = ("disabled",)
def __new__(cls, uow, *args):
key = (cls,) + args
if key in uow.postsort_actions:
return uow.postsort_actions[key]
else:
uow.postsort_actions[key] = ret = object.__new__(cls)
ret.disabled = False
return ret
def execute_aggregate(self, uow, recs):
self.execute(uow)
class ProcessAll(IterateMappersMixin, PostSortRec):
__slots__ = "dependency_processor", "isdelete", "fromparent"
def __init__(self, uow, dependency_processor, isdelete, fromparent):
self.dependency_processor = dependency_processor
self.isdelete = isdelete
self.fromparent = fromparent
uow.deps[dependency_processor.parent.base_mapper].add(
dependency_processor
)
def execute(self, uow):
states = self._elements(uow)
if self.isdelete:
self.dependency_processor.process_deletes(uow, states)
else:
self.dependency_processor.process_saves(uow, states)
def per_state_flush_actions(self, uow):
# this is handled by SaveUpdateAll and DeleteAll,
# since a ProcessAll should unconditionally be pulled
# into per-state if either the parent/child mappers
# are part of a cycle
return iter([])
def __repr__(self):
return "%s(%s, isdelete=%s)" % (
self.__class__.__name__,
self.dependency_processor,
self.isdelete,
)
def _elements(self, uow):
for mapper in self._mappers(uow):
for state in uow.mappers[mapper]:
(isdelete, listonly) = uow.states[state]
if isdelete == self.isdelete and not listonly:
yield state
class PostUpdateAll(PostSortRec):
__slots__ = "mapper", "isdelete"
def __init__(self, uow, mapper, isdelete):
self.mapper = mapper
self.isdelete = isdelete
def execute(self, uow):
states, cols = uow.post_update_states[self.mapper]
states = [s for s in states if uow.states[s][0] == self.isdelete]
persistence.post_update(self.mapper, states, uow, cols)
class SaveUpdateAll(PostSortRec):
__slots__ = ("mapper",)
def __init__(self, uow, mapper):
self.mapper = mapper
assert mapper is mapper.base_mapper
def execute(self, uow):
persistence.save_obj(
self.mapper,
uow.states_for_mapper_hierarchy(self.mapper, False, False),
uow,
)
def per_state_flush_actions(self, uow):
states = list(
uow.states_for_mapper_hierarchy(self.mapper, False, False)
)
base_mapper = self.mapper.base_mapper
delete_all = DeleteAll(uow, base_mapper)
for state in states:
# keep saves before deletes -
# this ensures 'row switch' operations work
action = SaveUpdateState(uow, state)
uow.dependencies.add((action, delete_all))
yield action
for dep in uow.deps[self.mapper]:
states_for_prop = uow.filter_states_for_dep(dep, states)
dep.per_state_flush_actions(uow, states_for_prop, False)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.mapper)
class DeleteAll(PostSortRec):
__slots__ = ("mapper",)
def __init__(self, uow, mapper):
self.mapper = mapper
assert mapper is mapper.base_mapper
def execute(self, uow):
persistence.delete_obj(
self.mapper,
uow.states_for_mapper_hierarchy(self.mapper, True, False),
uow,
)
def per_state_flush_actions(self, uow):
states = list(
uow.states_for_mapper_hierarchy(self.mapper, True, False)
)
base_mapper = self.mapper.base_mapper
save_all = SaveUpdateAll(uow, base_mapper)
for state in states:
# keep saves before deletes -
# this ensures 'row switch' operations work
action = DeleteState(uow, state)
uow.dependencies.add((save_all, action))
yield action
for dep in uow.deps[self.mapper]:
states_for_prop = uow.filter_states_for_dep(dep, states)
dep.per_state_flush_actions(uow, states_for_prop, True)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.mapper)
class ProcessState(PostSortRec):
__slots__ = "dependency_processor", "isdelete", "state"
def __init__(self, uow, dependency_processor, isdelete, state):
self.dependency_processor = dependency_processor
self.isdelete = isdelete
self.state = state
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
dependency_processor = self.dependency_processor
isdelete = self.isdelete
our_recs = [
r
for r in recs
if r.__class__ is cls_
and r.dependency_processor is dependency_processor
and r.isdelete is isdelete
]
recs.difference_update(our_recs)
states = [self.state] + [r.state for r in our_recs]
if isdelete:
dependency_processor.process_deletes(uow, states)
else:
dependency_processor.process_saves(uow, states)
def __repr__(self):
return "%s(%s, %s, delete=%s)" % (
self.__class__.__name__,
self.dependency_processor,
orm_util.state_str(self.state),
self.isdelete,
)
class SaveUpdateState(PostSortRec):
__slots__ = "state", "mapper"
def __init__(self, uow, state):
self.state = state
self.mapper = state.mapper.base_mapper
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
mapper = self.mapper
our_recs = [
r for r in recs if r.__class__ is cls_ and r.mapper is mapper
]
recs.difference_update(our_recs)
persistence.save_obj(
mapper, [self.state] + [r.state for r in our_recs], uow
)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
orm_util.state_str(self.state),
)
class DeleteState(PostSortRec):
__slots__ = "state", "mapper"
def __init__(self, uow, state):
self.state = state
self.mapper = state.mapper.base_mapper
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
mapper = self.mapper
our_recs = [
r for r in recs if r.__class__ is cls_ and r.mapper is mapper
]
recs.difference_update(our_recs)
states = [self.state] + [r.state for r in our_recs]
persistence.delete_obj(
mapper, [s for s in states if uow.states[s][0]], uow
)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
orm_util.state_str(self.state),
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/base.py
|
# orm/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Constants and rudimental functions used throughout the ORM.
"""
import operator
from . import exc
from .. import exc as sa_exc
from .. import inspection
from .. import util
from ..sql import expression
PASSIVE_NO_RESULT = util.symbol(
"PASSIVE_NO_RESULT",
"""Symbol returned by a loader callable or other attribute/history
retrieval operation when a value could not be determined, based
on loader callable flags.
""",
)
PASSIVE_CLASS_MISMATCH = util.symbol(
"PASSIVE_CLASS_MISMATCH",
"""Symbol indicating that an object is locally present for a given
primary key identity but it is not of the requested class. The
return value is therefore None and no SQL should be emitted.""",
)
ATTR_WAS_SET = util.symbol(
"ATTR_WAS_SET",
"""Symbol returned by a loader callable to indicate the
retrieved value, or values, were assigned to their attributes
on the target object.
""",
)
ATTR_EMPTY = util.symbol(
"ATTR_EMPTY",
"""Symbol used internally to indicate an attribute had no callable.""",
)
NO_VALUE = util.symbol(
"NO_VALUE",
"""Symbol which may be placed as the 'previous' value of an attribute,
indicating no value was loaded for an attribute when it was modified,
and flags indicated we were not to load it.
""",
)
NEVER_SET = util.symbol(
"NEVER_SET",
"""Symbol which may be placed as the 'previous' value of an attribute
indicating that the attribute had not been assigned to previously.
""",
)
NO_CHANGE = util.symbol(
"NO_CHANGE",
"""No callables or SQL should be emitted on attribute access
and no state should change
""",
canonical=0,
)
CALLABLES_OK = util.symbol(
"CALLABLES_OK",
"""Loader callables can be fired off if a value
is not present.
""",
canonical=1,
)
SQL_OK = util.symbol(
"SQL_OK",
"""Loader callables can emit SQL at least on scalar value attributes.""",
canonical=2,
)
RELATED_OBJECT_OK = util.symbol(
"RELATED_OBJECT_OK",
"""Callables can use SQL to load related objects as well
as scalar value attributes.
""",
canonical=4,
)
INIT_OK = util.symbol(
"INIT_OK",
"""Attributes should be initialized with a blank
value (None or an empty collection) upon get, if no other
value can be obtained.
""",
canonical=8,
)
NON_PERSISTENT_OK = util.symbol(
"NON_PERSISTENT_OK",
"""Callables can be emitted if the parent is not persistent.""",
canonical=16,
)
LOAD_AGAINST_COMMITTED = util.symbol(
"LOAD_AGAINST_COMMITTED",
"""Callables should use committed values as primary/foreign keys during a
load.
""",
canonical=32,
)
NO_AUTOFLUSH = util.symbol(
"NO_AUTOFLUSH",
"""Loader callables should disable autoflush.""",
canonical=64,
)
NO_RAISE = util.symbol(
"NO_RAISE",
"""Loader callables should not raise any assertions""",
canonical=128,
)
# pre-packaged sets of flags used as inputs
PASSIVE_OFF = util.symbol(
"PASSIVE_OFF",
"Callables can be emitted in all cases.",
canonical=(
RELATED_OBJECT_OK | NON_PERSISTENT_OK | INIT_OK | CALLABLES_OK | SQL_OK
),
)
PASSIVE_RETURN_NEVER_SET = util.symbol(
"PASSIVE_RETURN_NEVER_SET",
"""PASSIVE_OFF ^ INIT_OK""",
canonical=PASSIVE_OFF ^ INIT_OK,
)
PASSIVE_NO_INITIALIZE = util.symbol(
"PASSIVE_NO_INITIALIZE",
"PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK",
canonical=PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK,
)
PASSIVE_NO_FETCH = util.symbol(
"PASSIVE_NO_FETCH", "PASSIVE_OFF ^ SQL_OK", canonical=PASSIVE_OFF ^ SQL_OK
)
PASSIVE_NO_FETCH_RELATED = util.symbol(
"PASSIVE_NO_FETCH_RELATED",
"PASSIVE_OFF ^ RELATED_OBJECT_OK",
canonical=PASSIVE_OFF ^ RELATED_OBJECT_OK,
)
PASSIVE_ONLY_PERSISTENT = util.symbol(
"PASSIVE_ONLY_PERSISTENT",
"PASSIVE_OFF ^ NON_PERSISTENT_OK",
canonical=PASSIVE_OFF ^ NON_PERSISTENT_OK,
)
DEFAULT_MANAGER_ATTR = "_sa_class_manager"
DEFAULT_STATE_ATTR = "_sa_instance_state"
_INSTRUMENTOR = ("mapper", "instrumentor")
EXT_CONTINUE = util.symbol("EXT_CONTINUE")
EXT_STOP = util.symbol("EXT_STOP")
EXT_SKIP = util.symbol("EXT_SKIP")
ONETOMANY = util.symbol(
"ONETOMANY",
"""Indicates the one-to-many direction for a :func:`_orm.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""",
)
MANYTOONE = util.symbol(
"MANYTOONE",
"""Indicates the many-to-one direction for a :func:`_orm.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""",
)
MANYTOMANY = util.symbol(
"MANYTOMANY",
"""Indicates the many-to-many direction for a :func:`_orm.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""",
)
NOT_EXTENSION = util.symbol(
"NOT_EXTENSION",
"""Symbol indicating an :class:`InspectionAttr` that's
not part of sqlalchemy.ext.
Is assigned to the :attr:`.InspectionAttr.extension_type`
attribute.
""",
)
_never_set = frozenset([NEVER_SET])
_none_set = frozenset([None, NEVER_SET, PASSIVE_NO_RESULT])
_SET_DEFERRED_EXPIRED = util.symbol("SET_DEFERRED_EXPIRED")
_DEFER_FOR_STATE = util.symbol("DEFER_FOR_STATE")
def _generative(*assertions):
"""Mark a method as generative, e.g. method-chained."""
@util.decorator
def generate(fn, *args, **kw):
self = args[0]._clone()
for assertion in assertions:
assertion(self, fn.__name__)
fn(self, *args[1:], **kw)
return self
return generate
# these can be replaced by sqlalchemy.ext.instrumentation
# if augmented class instrumentation is enabled.
def manager_of_class(cls):
return cls.__dict__.get(DEFAULT_MANAGER_ATTR, None)
instance_state = operator.attrgetter(DEFAULT_STATE_ATTR)
instance_dict = operator.attrgetter("__dict__")
def instance_str(instance):
"""Return a string describing an instance."""
return state_str(instance_state(instance))
def state_str(state):
"""Return a string describing an instance via its InstanceState."""
if state is None:
return "None"
else:
return "<%s at 0x%x>" % (state.class_.__name__, id(state.obj()))
def state_class_str(state):
"""Return a string describing an instance's class via its
InstanceState.
"""
if state is None:
return "None"
else:
return "<%s>" % (state.class_.__name__,)
def attribute_str(instance, attribute):
return instance_str(instance) + "." + attribute
def state_attribute_str(state, attribute):
return state_str(state) + "." + attribute
def object_mapper(instance):
"""Given an object, return the primary Mapper associated with the object
instance.
Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError`
if no mapping is configured.
This function is available via the inspection system as::
inspect(instance).mapper
Using the inspection system will raise
:class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is
not part of a mapping.
"""
return object_state(instance).mapper
def object_state(instance):
"""Given an object, return the :class:`.InstanceState`
associated with the object.
Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError`
if no mapping is configured.
Equivalent functionality is available via the :func:`_sa.inspect`
function as::
inspect(instance)
Using the inspection system will raise
:class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is
not part of a mapping.
"""
state = _inspect_mapped_object(instance)
if state is None:
raise exc.UnmappedInstanceError(instance)
else:
return state
@inspection._inspects(object)
def _inspect_mapped_object(instance):
try:
return instance_state(instance)
# TODO: whats the py-2/3 syntax to catch two
# different kinds of exceptions at once ?
except exc.UnmappedClassError:
return None
except exc.NO_STATE:
return None
def _class_to_mapper(class_or_mapper):
insp = inspection.inspect(class_or_mapper, False)
if insp is not None:
return insp.mapper
else:
raise exc.UnmappedClassError(class_or_mapper)
def _mapper_or_none(entity):
"""Return the :class:`_orm.Mapper` for the given class or None if the
class is not mapped.
"""
insp = inspection.inspect(entity, False)
if insp is not None:
return insp.mapper
else:
return None
def _is_mapped_class(entity):
"""Return True if the given object is a mapped class,
:class:`_orm.Mapper`, or :class:`.AliasedClass`.
"""
insp = inspection.inspect(entity, False)
return (
insp is not None
and not insp.is_clause_element
and (insp.is_mapper or insp.is_aliased_class)
)
def _attr_as_key(attr):
if hasattr(attr, "key"):
return attr.key
else:
return expression._column_as_key(attr)
def _orm_columns(entity):
insp = inspection.inspect(entity, False)
if hasattr(insp, "selectable") and hasattr(insp.selectable, "c"):
return [c for c in insp.selectable.c]
else:
return [entity]
def _is_aliased_class(entity):
insp = inspection.inspect(entity, False)
return insp is not None and getattr(insp, "is_aliased_class", False)
def _entity_descriptor(entity, key):
"""Return a class attribute given an entity and string name.
May return :class:`.InstrumentedAttribute` or user-defined
attribute.
"""
insp = inspection.inspect(entity)
if insp.is_selectable:
description = entity
entity = insp.c
elif insp.is_aliased_class:
entity = insp.entity
description = entity
elif hasattr(insp, "mapper"):
description = entity = insp.mapper.class_
else:
description = entity
try:
return getattr(entity, key)
except AttributeError as err:
util.raise_(
sa_exc.InvalidRequestError(
"Entity '%s' has no property '%s'" % (description, key)
),
replace_context=err,
)
_state_mapper = util.dottedgetter("manager.mapper")
@inspection._inspects(type)
def _inspect_mapped_class(class_, configure=False):
try:
class_manager = manager_of_class(class_)
if not class_manager.is_mapped:
return None
mapper = class_manager.mapper
except exc.NO_STATE:
return None
else:
if configure and mapper._new_mappers:
mapper._configure_all()
return mapper
def class_mapper(class_, configure=True):
"""Given a class, return the primary :class:`_orm.Mapper` associated
with the key.
Raises :exc:`.UnmappedClassError` if no mapping is configured
on the given class, or :exc:`.ArgumentError` if a non-class
object is passed.
Equivalent functionality is available via the :func:`_sa.inspect`
function as::
inspect(some_mapped_class)
Using the inspection system will raise
:class:`sqlalchemy.exc.NoInspectionAvailable` if the class is not mapped.
"""
mapper = _inspect_mapped_class(class_, configure=configure)
if mapper is None:
if not isinstance(class_, type):
raise sa_exc.ArgumentError(
"Class object expected, got '%r'." % (class_,)
)
raise exc.UnmappedClassError(class_)
else:
return mapper
class InspectionAttr(object):
"""A base class applied to all ORM objects that can be returned
by the :func:`_sa.inspect` function.
The attributes defined here allow the usage of simple boolean
checks to test basic facts about the object returned.
While the boolean checks here are basically the same as using
the Python isinstance() function, the flags here can be used without
the need to import all of these classes, and also such that
the SQLAlchemy class system can change while leaving the flags
here intact for forwards-compatibility.
"""
__slots__ = ()
is_selectable = False
"""Return True if this object is an instance of """
""":class:`expression.Selectable`."""
is_aliased_class = False
"""True if this object is an instance of :class:`.AliasedClass`."""
is_instance = False
"""True if this object is an instance of :class:`.InstanceState`."""
is_mapper = False
"""True if this object is an instance of :class:`_orm.Mapper`."""
is_property = False
"""True if this object is an instance of :class:`.MapperProperty`."""
is_attribute = False
"""True if this object is a Python :term:`descriptor`.
This can refer to one of many types. Usually a
:class:`.QueryableAttribute` which handles attributes events on behalf
of a :class:`.MapperProperty`. But can also be an extension type
such as :class:`.AssociationProxy` or :class:`.hybrid_property`.
The :attr:`.InspectionAttr.extension_type` will refer to a constant
identifying the specific subtype.
.. seealso::
:attr:`_orm.Mapper.all_orm_descriptors`
"""
_is_internal_proxy = False
"""True if this object is an internal proxy object.
.. versionadded:: 1.2.12
"""
is_clause_element = False
"""True if this object is an instance of """
""":class:`_expression.ClauseElement`."""
extension_type = NOT_EXTENSION
"""The extension type, if any.
Defaults to :data:`.interfaces.NOT_EXTENSION`
.. seealso::
:data:`.HYBRID_METHOD`
:data:`.HYBRID_PROPERTY`
:data:`.ASSOCIATION_PROXY`
"""
class InspectionAttrInfo(InspectionAttr):
"""Adds the ``.info`` attribute to :class:`.InspectionAttr`.
The rationale for :class:`.InspectionAttr` vs. :class:`.InspectionAttrInfo`
is that the former is compatible as a mixin for classes that specify
``__slots__``; this is essentially an implementation artifact.
"""
@util.memoized_property
def info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.InspectionAttr`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`_orm.relationship`, or
:func:`.composite`
functions.
.. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
available on extension types via the
:attr:`.InspectionAttrInfo.info` attribute, so that it can apply
to a wider variety of ORM and extension constructs.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
class _MappedAttribute(object):
"""Mixin for attributes which should be replaced by mapper-assigned
attributes.
"""
__slots__ = ()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/relationships.py
|
# orm/relationships.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Heuristics related to join conditions as used in
:func:`_orm.relationship`.
Provides the :class:`.JoinCondition` object, which encapsulates
SQL annotation and aliasing behavior focused on the `primaryjoin`
and `secondaryjoin` aspects of :func:`_orm.relationship`.
"""
from __future__ import absolute_import
import collections
import weakref
from . import attributes
from . import dependency
from . import mapper as mapperlib
from .base import state_str
from .interfaces import MANYTOMANY
from .interfaces import MANYTOONE
from .interfaces import ONETOMANY
from .interfaces import PropComparator
from .interfaces import StrategizedProperty
from .util import _orm_annotate
from .util import _orm_deannotate
from .util import CascadeOptions
from .. import exc as sa_exc
from .. import log
from .. import schema
from .. import sql
from .. import util
from ..inspection import inspect
from ..sql import expression
from ..sql import operators
from ..sql import visitors
from ..sql.util import _deep_deannotate
from ..sql.util import _shallow_annotate
from ..sql.util import adapt_criterion_to_null
from ..sql.util import ClauseAdapter
from ..sql.util import join_condition
from ..sql.util import selectables_overlap
from ..sql.util import visit_binary_product
def remote(expr):
"""Annotate a portion of a primaryjoin expression
with a 'remote' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.foreign`
"""
return _annotate_columns(
expression._clause_element_as_expr(expr), {"remote": True}
)
def foreign(expr):
"""Annotate a portion of a primaryjoin expression
with a 'foreign' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.remote`
"""
return _annotate_columns(
expression._clause_element_as_expr(expr), {"foreign": True}
)
@log.class_logger
@util.langhelpers.dependency_for("sqlalchemy.orm.properties", add_to_all=True)
class RelationshipProperty(StrategizedProperty):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`_orm.relationship` function.
.. seealso::
:ref:`relationship_config_toplevel`
"""
strategy_wildcard_key = "relationship"
_persistence_only = dict(
passive_deletes=False,
passive_updates=True,
enable_typechecks=True,
active_history=False,
cascade_backrefs=True,
)
_dependency_processor = None
@util.deprecated_params(
extension=(
"0.7",
":class:`.AttributeExtension` is deprecated in favor of the "
":class:`.AttributeEvents` listener interface. The "
":paramref:`_orm.relationship.extension` parameter will be "
"removed in a future release.",
)
)
def __init__(
self,
argument,
secondary=None,
primaryjoin=None,
secondaryjoin=None,
foreign_keys=None,
uselist=None,
order_by=False,
backref=None,
back_populates=None,
post_update=False,
cascade=False,
extension=None,
viewonly=False,
lazy="select",
collection_class=None,
passive_deletes=_persistence_only["passive_deletes"],
passive_updates=_persistence_only["passive_updates"],
remote_side=None,
enable_typechecks=_persistence_only["enable_typechecks"],
join_depth=None,
comparator_factory=None,
single_parent=False,
innerjoin=False,
distinct_target_key=None,
doc=None,
active_history=_persistence_only["active_history"],
cascade_backrefs=_persistence_only["cascade_backrefs"],
load_on_pending=False,
bake_queries=True,
_local_remote_pairs=None,
query_class=None,
info=None,
omit_join=None,
sync_backref=None,
):
"""Provide a relationship between two mapped classes.
This corresponds to a parent-child or associative table relationship.
The constructed class is an instance of
:class:`.RelationshipProperty`.
A typical :func:`_orm.relationship`, used in a classical mapping::
mapper(Parent, properties={
'children': relationship(Child)
})
Some arguments accepted by :func:`_orm.relationship`
optionally accept a
callable function, which when called produces the desired value.
The callable is invoked by the parent :class:`_orm.Mapper` at "mapper
initialization" time, which happens only when mappers are first used,
and is assumed to be after all mappings have been constructed. This
can be used to resolve order-of-declaration and other dependency
issues, such as if ``Child`` is declared below ``Parent`` in the same
file::
mapper(Parent, properties={
"children":relationship(lambda: Child,
order_by=lambda: Child.id)
})
When using the :ref:`declarative_toplevel` extension, the Declarative
initializer allows string arguments to be passed to
:func:`_orm.relationship`. These string arguments are converted into
callables that evaluate the string as Python code, using the
Declarative class-registry as a namespace. This allows the lookup of
related classes to be automatic via their string name, and removes the
need for related classes to be imported into the local module space
before the dependent classes have been declared. It is still required
that the modules in which these related classes appear are imported
anywhere in the application at some point before the related mappings
are actually used, else a lookup error will be raised when the
:func:`_orm.relationship`
attempts to resolve the string reference to the
related class. An example of a string- resolved class is as
follows::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
children = relationship("Child", order_by="Child.id")
.. seealso::
:ref:`relationship_config_toplevel` - Full introductory and
reference documentation for :func:`_orm.relationship`.
:ref:`orm_tutorial_relationship` - ORM tutorial introduction.
:param argument:
A mapped class, or actual :class:`_orm.Mapper` instance,
representing
the target of the relationship.
:paramref:`_orm.relationship.argument`
may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a string name when using Declarative.
.. warning:: Prior to SQLAlchemy 1.3.16, this value is interpreted
using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. versionchanged 1.3.16::
The string evaluation of the main "argument" no longer accepts an
open ended Python expression, instead only accepting a string
class name or dotted package-qualified name.
.. seealso::
:ref:`declarative_configuring_relationships` - further detail
on relationship configuration when using Declarative.
:param secondary:
For a many-to-many relationship, specifies the intermediary
table, and is typically an instance of :class:`_schema.Table`.
In less common circumstances, the argument may also be specified
as an :class:`_expression.Alias` construct, or even a
:class:`_expression.Join` construct.
:paramref:`_orm.relationship.secondary` may
also be passed as a callable function which is evaluated at
mapper initialization time. When using Declarative, it may also
be a string argument noting the name of a :class:`_schema.Table`
that is
present in the :class:`_schema.MetaData`
collection associated with the
parent-mapped :class:`_schema.Table`.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
The :paramref:`_orm.relationship.secondary` keyword argument is
typically applied in the case where the intermediary
:class:`_schema.Table`
is not otherwise expressed in any direct class mapping. If the
"secondary" table is also explicitly mapped elsewhere (e.g. as in
:ref:`association_pattern`), one should consider applying the
:paramref:`_orm.relationship.viewonly` flag so that this
:func:`_orm.relationship`
is not used for persistence operations which
may conflict with those of the association object pattern.
.. seealso::
:ref:`relationships_many_to_many` - Reference example of "many
to many".
:ref:`orm_tutorial_many_to_many` - ORM tutorial introduction to
many-to-many relationships.
:ref:`self_referential_many_to_many` - Specifics on using
many-to-many in a self-referential case.
:ref:`declarative_many_to_many` - Additional options when using
Declarative.
:ref:`association_pattern` - an alternative to
:paramref:`_orm.relationship.secondary`
when composing association
table relationships, allowing additional attributes to be
specified on the association table.
:ref:`composite_secondary_join` - a lesser-used pattern which
in some cases can enable complex :func:`_orm.relationship` SQL
conditions to be used.
.. versionadded:: 0.9.2 :paramref:`_orm.relationship.secondary`
works
more effectively when referring to a :class:`_expression.Join`
instance.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
many-to-one reference should be loaded when replaced, if
not already loaded. Normally, history tracking logic for
simple many-to-ones only needs to be aware of the "new"
value in order to perform a flush. This flag is available
for applications that make use of
:func:`.attributes.get_history` which also need to know
the "previous" value of the attribute.
:param backref:
Indicates the string name of a property to be placed on the related
mapper's class that will handle this relationship in the other
direction. The other property will be created automatically
when the mappers are configured. Can also be passed as a
:func:`.backref` object to control the configuration of the
new relationship.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`_orm.relationship.back_populates` - alternative form
of backref specification.
:func:`.backref` - allows control over :func:`_orm.relationship`
configuration when using :paramref:`_orm.relationship.backref`.
:param back_populates:
Takes a string name and has the same meaning as
:paramref:`_orm.relationship.backref`, except the complementing
property is **not** created automatically, and instead must be
configured explicitly on the other mapper. The complementing
property should also indicate
:paramref:`_orm.relationship.back_populates` to this relationship to
ensure proper functioning.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`_orm.relationship.backref` - alternative form
of backref specification.
:param bake_queries=True:
Use the :class:`.BakedQuery` cache to cache the construction of SQL
used in lazy loads. True by default. Set to False if the
join condition of the relationship has unusual features that
might not respond well to statement caching.
.. versionchanged:: 1.2
"Baked" loading is the default implementation for the "select",
a.k.a. "lazy" loading strategy for relationships.
.. versionadded:: 1.0.0
.. seealso::
:ref:`baked_toplevel`
:param cascade:
A comma-separated list of cascade rules which determines how
Session operations should be "cascaded" from parent to child.
This defaults to ``False``, which means the default cascade
should be used - this default cascade is ``"save-update, merge"``.
The available cascades are ``save-update``, ``merge``,
``expunge``, ``delete``, ``delete-orphan``, and ``refresh-expire``.
An additional option, ``all`` indicates shorthand for
``"save-update, merge, refresh-expire,
expunge, delete"``, and is often used as in ``"all, delete-orphan"``
to indicate that related objects should follow along with the
parent object in all cases, and be deleted when de-associated.
.. seealso::
:ref:`unitofwork_cascades` - Full detail on each of the available
cascade options.
:ref:`tutorial_delete_cascade` - Tutorial example describing
a delete cascade.
:param cascade_backrefs=True:
A boolean value indicating if the ``save-update`` cascade should
operate along an assignment event intercepted by a backref.
When set to ``False``, the attribute managed by this relationship
will not cascade an incoming transient object into the session of a
persistent parent, if the event is received via backref.
.. seealso::
:ref:`backref_cascade` - Full discussion and examples on how
the :paramref:`_orm.relationship.cascade_backrefs` option is used.
:param collection_class:
A class or callable that returns a new list-holding object. will
be used in place of a plain list for storing elements.
.. seealso::
:ref:`custom_collections` - Introductory documentation and
examples.
:param comparator_factory:
A class which extends :class:`.RelationshipProperty.Comparator`
which provides custom SQL clause generation for comparison
operations.
.. seealso::
:class:`.PropComparator` - some detail on redefining comparators
at this level.
:ref:`custom_comparators` - Brief intro to this feature.
:param distinct_target_key=None:
Indicate if a "subquery" eager load should apply the DISTINCT
keyword to the innermost SELECT statement. When left as ``None``,
the DISTINCT keyword will be applied in those cases when the target
columns do not comprise the full primary key of the target table.
When set to ``True``, the DISTINCT keyword is applied to the
innermost SELECT unconditionally.
It may be desirable to set this flag to False when the DISTINCT is
reducing performance of the innermost subquery beyond that of what
duplicate innermost rows may be causing.
.. versionchanged:: 0.9.0 -
:paramref:`_orm.relationship.distinct_target_key` now defaults to
``None``, so that the feature enables itself automatically for
those cases where the innermost query targets a non-unique
key.
.. seealso::
:ref:`loading_toplevel` - includes an introduction to subquery
eager loading.
:param doc:
Docstring which will be applied to the resulting descriptor.
:param extension:
an :class:`.AttributeExtension` instance, or list of extensions,
which will be prepended to the list of attribute listeners for
the resulting descriptor placed on the class.
:param foreign_keys:
A list of columns which are to be used as "foreign key"
columns, or columns which refer to the value in a remote
column, within the context of this :func:`_orm.relationship`
object's :paramref:`_orm.relationship.primaryjoin` condition.
That is, if the :paramref:`_orm.relationship.primaryjoin`
condition of this :func:`_orm.relationship` is ``a.id ==
b.a_id``, and the values in ``b.a_id`` are required to be
present in ``a.id``, then the "foreign key" column of this
:func:`_orm.relationship` is ``b.a_id``.
In normal cases, the :paramref:`_orm.relationship.foreign_keys`
parameter is **not required.** :func:`_orm.relationship` will
automatically determine which columns in the
:paramref:`_orm.relationship.primaryjoin` condition are to be
considered "foreign key" columns based on those
:class:`_schema.Column` objects that specify
:class:`_schema.ForeignKey`,
or are otherwise listed as referencing columns in a
:class:`_schema.ForeignKeyConstraint` construct.
:paramref:`_orm.relationship.foreign_keys` is only needed when:
1. There is more than one way to construct a join from the local
table to the remote table, as there are multiple foreign key
references present. Setting ``foreign_keys`` will limit the
:func:`_orm.relationship`
to consider just those columns specified
here as "foreign".
2. The :class:`_schema.Table` being mapped does not actually have
:class:`_schema.ForeignKey` or
:class:`_schema.ForeignKeyConstraint`
constructs present, often because the table
was reflected from a database that does not support foreign key
reflection (MySQL MyISAM).
3. The :paramref:`_orm.relationship.primaryjoin`
argument is used to
construct a non-standard join condition, which makes use of
columns or expressions that do not normally refer to their
"parent" column, such as a join condition expressed by a
complex comparison using a SQL function.
The :func:`_orm.relationship` construct will raise informative
error messages that suggest the use of the
:paramref:`_orm.relationship.foreign_keys` parameter when
presented with an ambiguous condition. In typical cases,
if :func:`_orm.relationship` doesn't raise any exceptions, the
:paramref:`_orm.relationship.foreign_keys` parameter is usually
not needed.
:paramref:`_orm.relationship.foreign_keys` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`relationship_foreign_keys`
:ref:`relationship_custom_foreign`
:func:`.foreign` - allows direct annotation of the "foreign"
columns within a :paramref:`_orm.relationship.primaryjoin`
condition.
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
:param innerjoin=False:
When ``True``, joined eager loads will use an inner join to join
against related tables instead of an outer join. The purpose
of this option is generally one of performance, as inner joins
generally perform better than outer joins.
This flag can be set to ``True`` when the relationship references an
object via many-to-one using local foreign keys that are not
nullable, or when the reference is one-to-one or a collection that
is guaranteed to have one or at least one entry.
The option supports the same "nested" and "unnested" options as
that of :paramref:`_orm.joinedload.innerjoin`. See that flag
for details on nested / unnested behaviors.
.. seealso::
:paramref:`_orm.joinedload.innerjoin` - the option as specified by
loader option, including detail on nesting behavior.
:ref:`what_kind_of_loading` - Discussion of some details of
various loader options.
:param join_depth:
When non-``None``, an integer value indicating how many levels
deep "eager" loaders should join on a self-referring or cyclical
relationship. The number counts how many times the same Mapper
shall be present in the loading condition along a particular join
branch. When left at its default of ``None``, eager loaders
will stop chaining when they encounter a the same target mapper
which is already higher up in the chain. This option applies
both to joined- and subquery- eager loaders.
.. seealso::
:ref:`self_referential_eager_loading` - Introductory documentation
and examples.
:param lazy='select': specifies
How the related items should be loaded. Default value is
``select``. Values include:
* ``select`` - items should be loaded lazily when the property is
first accessed, using a separate SELECT statement, or identity map
fetch for simple many-to-one references.
* ``immediate`` - items should be loaded as the parents are loaded,
using a separate SELECT statement, or identity map fetch for
simple many-to-one references.
* ``joined`` - items should be loaded "eagerly" in the same query as
that of the parent, using a JOIN or LEFT OUTER JOIN. Whether
the join is "outer" or not is determined by the
:paramref:`_orm.relationship.innerjoin` parameter.
* ``subquery`` - items should be loaded "eagerly" as the parents are
loaded, using one additional SQL statement, which issues a JOIN to
a subquery of the original statement, for each collection
requested.
* ``selectin`` - items should be loaded "eagerly" as the parents
are loaded, using one or more additional SQL statements, which
issues a JOIN to the immediate parent object, specifying primary
key identifiers using an IN clause.
.. versionadded:: 1.2
* ``noload`` - no loading should occur at any time. This is to
support "write-only" attributes, or attributes which are
populated in some manner specific to the application.
* ``raise`` - lazy loading is disallowed; accessing
the attribute, if its value were not already loaded via eager
loading, will raise an :exc:`~sqlalchemy.exc.InvalidRequestError`.
This strategy can be used when objects are to be detached from
their attached :class:`.Session` after they are loaded.
.. versionadded:: 1.1
* ``raise_on_sql`` - lazy loading that emits SQL is disallowed;
accessing the attribute, if its value were not already loaded via
eager loading, will raise an
:exc:`~sqlalchemy.exc.InvalidRequestError`, **if the lazy load
needs to emit SQL**. If the lazy load can pull the related value
from the identity map or determine that it should be None, the
value is loaded. This strategy can be used when objects will
remain associated with the attached :class:`.Session`, however
additional SELECT statements should be blocked.
.. versionadded:: 1.1
* ``dynamic`` - the attribute will return a pre-configured
:class:`_query.Query` object for all read
operations, onto which further filtering operations can be
applied before iterating the results. See
the section :ref:`dynamic_relationship` for more details.
* True - a synonym for 'select'
* False - a synonym for 'joined'
* None - a synonym for 'noload'
.. seealso::
:doc:`/orm/loading_relationships` - Full documentation on
relationship loader configuration.
:ref:`dynamic_relationship` - detail on the ``dynamic`` option.
:ref:`collections_noload_raiseload` - notes on "noload" and "raise"
:param load_on_pending=False:
Indicates loading behavior for transient or pending parent objects.
When set to ``True``, causes the lazy-loader to
issue a query for a parent object that is not persistent, meaning it
has never been flushed. This may take effect for a pending object
when autoflush is disabled, or for a transient object that has been
"attached" to a :class:`.Session` but is not part of its pending
collection.
The :paramref:`_orm.relationship.load_on_pending`
flag does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before a flush proceeds.
This flag is not not intended for general use.
.. seealso::
:meth:`.Session.enable_relationship_loading` - this method
establishes "load on pending" behavior for the whole object, and
also allows loading on objects that remain transient or
detached.
:param order_by:
Indicates the ordering that should be applied when loading these
items. :paramref:`_orm.relationship.order_by`
is expected to refer to
one of the :class:`_schema.Column`
objects to which the target class is
mapped, or the attribute itself bound to the target class which
refers to the column.
:paramref:`_orm.relationship.order_by`
may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a Python-evaluable string when using Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
:param passive_deletes=False:
Indicates loading behavior during delete operations.
A value of True indicates that unloaded child items should not
be loaded during a delete operation on the parent. Normally,
when a parent item is deleted, all child items are loaded so
that they can either be marked as deleted, or have their
foreign key to the parent set to NULL. Marking this flag as
True usually implies an ON DELETE <CASCADE|SET NULL> rule is in
place which will handle updating/deleting child rows on the
database side.
Additionally, setting the flag to the string value 'all' will
disable the "nulling out" of the child foreign keys, when the parent
object is deleted and there is no delete or delete-orphan cascade
enabled. This is typically used when a triggering or error raise
scenario is in place on the database side. Note that the foreign
key attributes on in-session child objects will not be changed after
a flush occurs so this is a very special use-case setting.
Additionally, the "nulling out" will still occur if the child
object is de-associated with the parent.
.. seealso::
:ref:`passive_deletes` - Introductory documentation
and examples.
:param passive_updates=True:
Indicates the persistence behavior to take when a referenced
primary key value changes in place, indicating that the referencing
foreign key columns will also need their value changed.
When True, it is assumed that ``ON UPDATE CASCADE`` is configured on
the foreign key in the database, and that the database will
handle propagation of an UPDATE from a source column to
dependent rows. When False, the SQLAlchemy
:func:`_orm.relationship`
construct will attempt to emit its own UPDATE statements to
modify related targets. However note that SQLAlchemy **cannot**
emit an UPDATE for more than one level of cascade. Also,
setting this flag to False is not compatible in the case where
the database is in fact enforcing referential integrity, unless
those constraints are explicitly "deferred", if the target backend
supports it.
It is highly advised that an application which is employing
mutable primary keys keeps ``passive_updates`` set to True,
and instead uses the referential integrity features of the database
itself in order to handle the change efficiently and fully.
.. seealso::
:ref:`passive_updates` - Introductory documentation and
examples.
:paramref:`.mapper.passive_updates` - a similar flag which
takes effect for joined-table inheritance mappings.
:param post_update:
This indicates that the relationship should be handled by a
second UPDATE statement after an INSERT or before a
DELETE. Currently, it also will issue an UPDATE after the
instance was UPDATEd as well, although this technically should
be improved. This flag is used to handle saving bi-directional
dependencies between two individual rows (i.e. each row
references the other), where it would otherwise be impossible to
INSERT or DELETE both rows fully since one row exists before the
other. Use this flag when a particular mapping arrangement will
incur two rows that are dependent on each other, such as a table
that has a one-to-many relationship to a set of child rows, and
also has a column that references a single child row within that
list (i.e. both tables contain a foreign key to each other). If
a flush operation returns an error that a "cyclical
dependency" was detected, this is a cue that you might want to
use :paramref:`_orm.relationship.post_update` to "break" the cycle.
.. seealso::
:ref:`post_update` - Introductory documentation and examples.
:param primaryjoin:
A SQL expression that will be used as the primary
join of the child object against the parent object, or in a
many-to-many relationship the join of the parent object to the
association table. By default, this value is computed based on the
foreign key relationships of the parent and child tables (or
association table).
:paramref:`_orm.relationship.primaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`relationship_primaryjoin`
:param remote_side:
Used for self-referential relationships, indicates the column or
list of columns that form the "remote side" of the relationship.
:paramref:`_orm.relationship.remote_side` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`self_referential` - in-depth explanation of how
:paramref:`_orm.relationship.remote_side`
is used to configure self-referential relationships.
:func:`.remote` - an annotation function that accomplishes the
same purpose as :paramref:`_orm.relationship.remote_side`,
typically
when a custom :paramref:`_orm.relationship.primaryjoin` condition
is used.
:param query_class:
A :class:`_query.Query`
subclass that will be used as the base of the
"appender query" returned by a "dynamic" relationship, that
is, a relationship that specifies ``lazy="dynamic"`` or was
otherwise constructed using the :func:`_orm.dynamic_loader`
function.
.. seealso::
:ref:`dynamic_relationship` - Introduction to "dynamic"
relationship loaders.
:param secondaryjoin:
A SQL expression that will be used as the join of
an association table to the child object. By default, this value is
computed based on the foreign key relationships of the association
and child tables.
:paramref:`_orm.relationship.secondaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`relationship_primaryjoin`
:param single_parent:
When True, installs a validator which will prevent objects
from being associated with more than one parent at a time.
This is used for many-to-one or many-to-many relationships that
should be treated either as one-to-one or one-to-many. Its usage
is optional, except for :func:`_orm.relationship` constructs which
are many-to-one or many-to-many and also
specify the ``delete-orphan`` cascade option. The
:func:`_orm.relationship` construct itself will raise an error
instructing when this option is required.
.. seealso::
:ref:`unitofwork_cascades` - includes detail on when the
:paramref:`_orm.relationship.single_parent`
flag may be appropriate.
:param uselist:
A boolean that indicates if this property should be loaded as a
list or a scalar. In most cases, this value is determined
automatically by :func:`_orm.relationship` at mapper configuration
time, based on the type and direction
of the relationship - one to many forms a list, many to one
forms a scalar, many to many is a list. If a scalar is desired
where normally a list would be present, such as a bi-directional
one-to-one relationship, set :paramref:`_orm.relationship.uselist`
to
False.
The :paramref:`_orm.relationship.uselist`
flag is also available on an
existing :func:`_orm.relationship`
construct as a read-only attribute,
which can be used to determine if this :func:`_orm.relationship`
deals
with collections or scalar attributes::
>>> User.addresses.property.uselist
True
.. seealso::
:ref:`relationships_one_to_one` - Introduction to the "one to
one" relationship pattern, which is typically when the
:paramref:`_orm.relationship.uselist` flag is needed.
:param viewonly=False:
When set to ``True``, the relationship is used only for loading
objects, and not for any persistence operation. A
:func:`_orm.relationship` which specifies
:paramref:`_orm.relationship.viewonly` can work
with a wider range of SQL operations within the
:paramref:`_orm.relationship.primaryjoin` condition, including
operations that feature the use of a variety of comparison operators
as well as SQL functions such as :func:`_expression.cast`. The
:paramref:`_orm.relationship.viewonly`
flag is also of general use when defining any kind of
:func:`_orm.relationship` that doesn't represent
the full set of related objects, to prevent modifications of the
collection from resulting in persistence operations.
When using the :paramref:`_orm.relationship.viewonly` flag in
conjunction with backrefs, the
:paramref:`_orm.relationship.sync_backref` should be set to False;
this indicates that the backref should not actually populate this
relationship with data when changes occur on the other side; as this
is a viewonly relationship, it cannot accommodate changes in state
correctly as these will not be persisted.
.. versionadded:: 1.3.17 - the
:paramref:`_orm.relationship.sync_backref`
flag set to False is required when using viewonly in conjunction
with backrefs. A warning is emitted when this flag is not set.
.. seealso::
:paramref:`_orm.relationship.sync_backref`
:param sync_backref:
A boolean that enables the events used to synchronize the in-Python
attributes when this relationship is target of either
:paramref:`_orm.relationship.backref` or
:paramref:`_orm.relationship.back_populates`.
Defaults to ``None``, which indicates that an automatic value should
be selected based on the value of the
:paramref:`_orm.relationship.viewonly` flag. When left at its
default, changes in state for writable relationships will be
back-populated normally. For viewonly relationships, a warning is
emitted unless the flag is set to ``False``.
.. versionadded:: 1.3.17
.. seealso::
:paramref:`_orm.relationship.viewonly`
:param omit_join:
Allows manual control over the "selectin" automatic join
optimization. Set to ``False`` to disable the "omit join" feature
added in SQLAlchemy 1.3; or leave as ``None`` to leave automatic
optimization in place.
.. note:: This flag may only be set to ``False``. It is not
necessary to set it to ``True`` as the "omit_join" optimization is
automatically detected; if it is not detected, then the
optimization is not supported.
.. versionchanged:: 1.3.11 setting ``omit_join`` to True will now
emit a warning as this was not the intended use of this flag.
.. versionadded:: 1.3
"""
super(RelationshipProperty, self).__init__()
self.uselist = uselist
self.argument = argument
self.secondary = secondary
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.post_update = post_update
self.direction = None
self.viewonly = viewonly
if viewonly:
self._warn_for_persistence_only_flags(
passive_deletes=passive_deletes,
passive_updates=passive_updates,
enable_typechecks=enable_typechecks,
active_history=active_history,
cascade_backrefs=cascade_backrefs,
)
if viewonly and sync_backref:
raise sa_exc.ArgumentError(
"sync_backref and viewonly cannot both be True"
)
self.sync_backref = sync_backref
self.lazy = lazy
self.single_parent = single_parent
self._user_defined_foreign_keys = foreign_keys
self.collection_class = collection_class
self.passive_deletes = passive_deletes
self.cascade_backrefs = cascade_backrefs
self.passive_updates = passive_updates
self.remote_side = remote_side
self.enable_typechecks = enable_typechecks
self.query_class = query_class
self.innerjoin = innerjoin
self.distinct_target_key = distinct_target_key
self.doc = doc
self.active_history = active_history
self.join_depth = join_depth
if omit_join:
util.warn(
"setting omit_join to True is not supported; selectin "
"loading of this relationship may not work correctly if this "
"flag is set explicitly. omit_join optimization is "
"automatically detected for conditions under which it is "
"supported."
)
self.omit_join = omit_join
self.local_remote_pairs = _local_remote_pairs
self.extension = extension
self.bake_queries = bake_queries
self.load_on_pending = load_on_pending
self.comparator_factory = (
comparator_factory or RelationshipProperty.Comparator
)
self.comparator = self.comparator_factory(self, None)
util.set_creation_order(self)
if info is not None:
self.info = info
self.strategy_key = (("lazy", self.lazy),)
self._reverse_property = set()
if cascade is not False:
self.cascade = cascade
else:
self._set_cascade("save-update, merge", warn=False)
self.order_by = order_by
self.back_populates = back_populates
if self.back_populates:
if backref:
raise sa_exc.ArgumentError(
"backref and back_populates keyword arguments "
"are mutually exclusive"
)
self.backref = None
else:
self.backref = backref
def _warn_for_persistence_only_flags(self, **kw):
for k, v in kw.items():
if v != self._persistence_only[k]:
# we are warning here rather than warn deprecated as this is a
# configuration mistake, and Python shows regular warnings more
# aggressively than deprecation warnings by default. Unlike the
# case of setting viewonly with cascade, the settings being
# warned about here are not actively doing the wrong thing
# against viewonly=True, so it is not as urgent to have these
# raise an error.
util.warn(
"Setting %s on relationship() while also "
"setting viewonly=True does not make sense, as a "
"viewonly=True relationship does not perform persistence "
"operations. This configuration may raise an error "
"in a future release." % (k,)
)
def instrument_class(self, mapper):
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.RelationshipProperty` attributes.
See the documentation for :class:`.PropComparator` for a brief
overview of ORM level operator definition.
.. seealso::
:class:`.PropComparator`
:class:`.ColumnProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
_of_type = None
def __init__(
self, prop, parentmapper, adapt_to_entity=None, of_type=None
):
"""Construction of :class:`.RelationshipProperty.Comparator`
is internal to the ORM's attribute mechanics.
"""
self.prop = prop
self._parententity = parentmapper
self._adapt_to_entity = adapt_to_entity
if of_type:
self._of_type = of_type
def adapt_to_entity(self, adapt_to_entity):
return self.__class__(
self.property,
self._parententity,
adapt_to_entity=adapt_to_entity,
of_type=self._of_type,
)
@util.memoized_property
def entity(self):
"""The target entity referred to by this
:class:`.RelationshipProperty.Comparator`.
This is either a :class:`_orm.Mapper` or :class:`.AliasedInsp`
object.
This is the "target" or "remote" side of the
:func:`_orm.relationship`.
"""
return self.property.entity
@util.memoized_property
def mapper(self):
"""The target :class:`_orm.Mapper` referred to by this
:class:`.RelationshipProperty.Comparator`.
This is the "target" or "remote" side of the
:func:`_orm.relationship`.
"""
return self.property.mapper
@util.memoized_property
def _parententity(self):
return self.property.parent
def _source_selectable(self):
if self._adapt_to_entity:
return self._adapt_to_entity.selectable
else:
return self.property.parent._with_polymorphic_selectable
def __clause_element__(self):
adapt_from = self._source_selectable()
if self._of_type:
of_type_mapper = inspect(self._of_type).mapper
else:
of_type_mapper = None
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = self.property._create_joins(
source_selectable=adapt_from,
source_polymorphic=True,
of_type_mapper=of_type_mapper,
alias_secondary=True,
)
if sj is not None:
return pj & sj
else:
return pj
def of_type(self, cls):
r"""Redefine this object in terms of a polymorphic subclass.
See :meth:`.PropComparator.of_type` for an example.
"""
return RelationshipProperty.Comparator(
self.property,
self._parententity,
adapt_to_entity=self._adapt_to_entity,
of_type=cls,
)
def in_(self, other):
"""Produce an IN clause - this is not implemented
for :func:`_orm.relationship`-based attributes at this time.
"""
raise NotImplementedError(
"in_() not yet supported for "
"relationships. For a simple "
"many-to-one, use in_() against "
"the set of foreign key values."
)
__hash__ = None
def __eq__(self, other):
"""Implement the ``==`` operator.
In a many-to-one context, such as::
MyClass.some_prop == <some object>
this will typically produce a
clause such as::
mytable.related_id == <some id>
Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use :meth:`~.RelationshipProperty.Comparator.contains`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce a NOT EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction in [ONETOMANY, MANYTOMANY]:
return ~self._criterion_exists()
else:
return _orm_annotate(
self.property._optimized_compare(
None, adapt_source=self.adapter
)
)
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection to an object or collection; "
"use contains() to test for membership."
)
else:
return _orm_annotate(
self.property._optimized_compare(
other, adapt_source=self.adapter
)
)
def _criterion_exists(self, criterion=None, **kwargs):
if getattr(self, "_of_type", None):
info = inspect(self._of_type)
target_mapper, to_selectable, is_aliased_class = (
info.mapper,
info.selectable,
info.is_aliased_class,
)
if self.property._is_self_referential and not is_aliased_class:
to_selectable = to_selectable.alias()
single_crit = target_mapper._single_table_criterion
if single_crit is not None:
if criterion is not None:
criterion = single_crit & criterion
else:
criterion = single_crit
else:
is_aliased_class = False
to_selectable = None
if self.adapter:
source_selectable = self._source_selectable()
else:
source_selectable = None
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = self.property._create_joins(
dest_polymorphic=True,
dest_selectable=to_selectable,
source_selectable=source_selectable,
)
for k in kwargs:
crit = getattr(self.property.mapper.class_, k) == kwargs[k]
if criterion is None:
criterion = crit
else:
criterion = criterion & crit
# annotate the *local* side of the join condition, in the case
# of pj + sj this is the full primaryjoin, in the case of just
# pj its the local side of the primaryjoin.
if sj is not None:
j = _orm_annotate(pj) & sj
else:
j = _orm_annotate(pj, exclude=self.property.remote_side)
if (
criterion is not None
and target_adapter
and not is_aliased_class
):
# limit this adapter to annotated only?
criterion = target_adapter.traverse(criterion)
# only have the "joined left side" of what we
# return be subject to Query adaption. The right
# side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if criterion is not None:
criterion = criterion._annotate(
{"no_replacement_traverse": True}
)
crit = j & sql.True_._ifnone(criterion)
if secondary is not None:
ex = sql.exists(
[1], crit, from_obj=[dest, secondary]
).correlate_except(dest, secondary)
else:
ex = sql.exists([1], crit, from_obj=dest).correlate_except(
dest
)
return ex
def any(self, criterion=None, **kwargs):
"""Produce an expression that tests a collection against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.any(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.any` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.any` is particularly
useful for testing for empty collections::
session.query(MyClass).filter(
~MyClass.somereference.any()
)
will produce::
SELECT * FROM my_table WHERE
NOT EXISTS (SELECT 1 FROM related WHERE
related.my_id=my_table.id)
:meth:`~.RelationshipProperty.Comparator.any` is only
valid for collections, i.e. a :func:`_orm.relationship`
that has ``uselist=True``. For scalar references,
use :meth:`~.RelationshipProperty.Comparator.has`.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'any()' not implemented for scalar "
"attributes. Use has()."
)
return self._criterion_exists(criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Produce an expression that tests a scalar reference against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.has(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE
related.id==my_table.related_id AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.has` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.has` is only
valid for scalar references, i.e. a :func:`_orm.relationship`
that has ``uselist=False``. For collection references,
use :meth:`~.RelationshipProperty.Comparator.any`.
"""
if self.property.uselist:
raise sa_exc.InvalidRequestError(
"'has()' not implemented for collections. " "Use any()."
)
return self._criterion_exists(criterion, **kwargs)
def contains(self, other, **kwargs):
"""Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.RelationshipProperty.Comparator.contains` is
only valid for a collection, i.e. a
:func:`_orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
Produces a clause like::
mytable.id == <some id>
Where ``<some id>`` is the value of the foreign key
attribute on ``other`` which refers to the primary
key of its parent object. From this it follows that
:meth:`~.RelationshipProperty.Comparator.contains` is
very useful when used with simple one-to-many
operations.
For many-to-many operations, the behavior of
:meth:`~.RelationshipProperty.Comparator.contains`
has more caveats. The association table will be
rendered in the statement, producing an "implicit"
join, that is, includes multiple tables in the FROM
clause which are equated in the WHERE clause::
query(MyClass).filter(MyClass.contains(other))
Produces a query like::
SELECT * FROM my_table, my_association_table AS
my_association_table_1 WHERE
my_table.id = my_association_table_1.parent_id
AND my_association_table_1.child_id = <some id>
Where ``<some id>`` would be the primary key of
``other``. From the above, it is clear that
:meth:`~.RelationshipProperty.Comparator.contains`
will **not** work with many-to-many collections when
used in queries that move beyond simple AND
conjunctions, such as multiple
:meth:`~.RelationshipProperty.Comparator.contains`
expressions joined by OR. In such cases subqueries or
explicit "outer joins" will need to be used instead.
See :meth:`~.RelationshipProperty.Comparator.any` for
a less-performant alternative using EXISTS, or refer
to :meth:`_query.Query.outerjoin`
as well as :ref:`ormtutorial_joins`
for more details on constructing outer joins.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'contains' not implemented for scalar "
"attributes. Use =="
)
clause = self.property._optimized_compare(
other, adapt_source=self.adapter
)
if self.property.secondaryjoin is not None:
clause.negation_clause = self.__negated_contains_or_equals(
other
)
return clause
def __negated_contains_or_equals(self, other):
if self.property.direction == MANYTOONE:
state = attributes.instance_state(other)
def state_bindparam(x, state, col):
dict_ = state.dict
return sql.bindparam(
x,
unique=True,
callable_=self.property._get_attr_w_warn_on_none(
self.property.mapper, state, dict_, col
),
)
def adapt(col):
if self.adapter:
return self.adapter(col)
else:
return col
if self.property._use_get:
return sql.and_(
*[
sql.or_(
adapt(x)
!= state_bindparam(adapt(x), state, y),
adapt(x) == None,
)
for (x, y) in self.property.local_remote_pairs
]
)
criterion = sql.and_(
*[
x == y
for (x, y) in zip(
self.property.mapper.primary_key,
self.property.mapper.primary_key_from_instance(other),
)
]
)
return ~self._criterion_exists(criterion)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a many-to-one context, such as::
MyClass.some_prop != <some object>
This will typically produce a clause such as::
mytable.related_id != <some id>
Where ``<some id>`` is the primary key of the
given object.
The ``!=`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use
:meth:`~.RelationshipProperty.Comparator.contains`
in conjunction with :func:`_expression.not_`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` in
conjunction with :func:`_expression.not_` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce an EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction == MANYTOONE:
return _orm_annotate(
~self.property._optimized_compare(
None, adapt_source=self.adapter
)
)
else:
return self._criterion_exists()
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection"
" to an object or collection; use "
"contains() to test for membership."
)
else:
return _orm_annotate(self.__negated_contains_or_equals(other))
@util.memoized_property
def property(self):
if mapperlib.Mapper._new_mappers:
mapperlib.Mapper._configure_all()
return self.prop
def _with_parent(self, instance, alias_secondary=True, from_entity=None):
assert instance is not None
adapt_source = None
if from_entity is not None:
insp = inspect(from_entity)
if insp.is_aliased_class:
adapt_source = insp._adapter.adapt_clause
return self._optimized_compare(
instance,
value_is_parent=True,
adapt_source=adapt_source,
alias_secondary=alias_secondary,
)
def _optimized_compare(
self,
state,
value_is_parent=False,
adapt_source=None,
alias_secondary=True,
):
if state is not None:
try:
state = inspect(state)
except sa_exc.NoInspectionAvailable:
state = None
if state is None or not getattr(state, "is_instance", False):
raise sa_exc.ArgumentError(
"Mapped instance expected for relationship "
"comparison to object. Classes, queries and other "
"SQL elements are not accepted in this context; for "
"comparison with a subquery, "
"use %s.has(**criteria)." % self
)
reverse_direction = not value_is_parent
if state is None:
return self._lazy_none_clause(
reverse_direction, adapt_source=adapt_source
)
if not reverse_direction:
criterion, bind_to_col = (
self._lazy_strategy._lazywhere,
self._lazy_strategy._bind_to_col,
)
else:
criterion, bind_to_col = (
self._lazy_strategy._rev_lazywhere,
self._lazy_strategy._rev_bind_to_col,
)
if reverse_direction:
mapper = self.mapper
else:
mapper = self.parent
dict_ = attributes.instance_dict(state.obj())
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
bindparam.callable = self._get_attr_w_warn_on_none(
mapper,
state,
dict_,
bind_to_col[bindparam._identifying_key],
)
if self.secondary is not None and alias_secondary:
criterion = ClauseAdapter(self.secondary.alias()).traverse(
criterion
)
criterion = visitors.cloned_traverse(
criterion, {}, {"bindparam": visit_bindparam}
)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _get_attr_w_warn_on_none(self, mapper, state, dict_, column):
"""Create the callable that is used in a many-to-one expression.
E.g.::
u1 = s.query(User).get(5)
expr = Address.user == u1
Above, the SQL should be "address.user_id = 5". The callable
returned by this method produces the value "5" based on the identity
of ``u1``.
"""
# in this callable, we're trying to thread the needle through
# a wide variety of scenarios, including:
#
# * the object hasn't been flushed yet and there's no value for
# the attribute as of yet
#
# * the object hasn't been flushed yet but it has a user-defined
# value
#
# * the object has a value but it's expired and not locally present
#
# * the object has a value but it's expired and not locally present,
# and the object is also detached
#
# * The object hadn't been flushed yet, there was no value, but
# later, the object has been expired and detached, and *now*
# they're trying to evaluate it
#
# * the object had a value, but it was changed to a new value, and
# then expired
#
# * the object had a value, but it was changed to a new value, and
# then expired, then the object was detached
#
# * the object has a user-set value, but it's None and we don't do
# the comparison correctly for that so warn
#
prop = mapper.get_property_by_column(column)
# by invoking this method, InstanceState will track the last known
# value for this key each time the attribute is to be expired.
# this feature was added explicitly for use in this method.
state._track_last_known_value(prop.key)
def _go():
last_known = to_return = state._last_known_values[prop.key]
existing_is_available = last_known is not attributes.NO_VALUE
# we support that the value may have changed. so here we
# try to get the most recent value including re-fetching.
# only if we can't get a value now due to detachment do we return
# the last known value
current_value = mapper._get_state_attr_by_column(
state,
dict_,
column,
passive=attributes.PASSIVE_OFF
if state.persistent
else attributes.PASSIVE_NO_FETCH ^ attributes.INIT_OK,
)
if current_value is attributes.NEVER_SET:
if not existing_is_available:
raise sa_exc.InvalidRequestError(
"Can't resolve value for column %s on object "
"%s; no value has been set for this column"
% (column, state_str(state))
)
elif current_value is attributes.PASSIVE_NO_RESULT:
if not existing_is_available:
raise sa_exc.InvalidRequestError(
"Can't resolve value for column %s on object "
"%s; the object is detached and the value was "
"expired" % (column, state_str(state))
)
else:
to_return = current_value
if to_return is None:
util.warn(
"Got None for value of column %s; this is unsupported "
"for a relationship comparison and will not "
"currently produce an IS comparison "
"(but may in a future release)" % column
)
return to_return
return _go
def _lazy_none_clause(self, reverse_direction=False, adapt_source=None):
if not reverse_direction:
criterion, bind_to_col = (
self._lazy_strategy._lazywhere,
self._lazy_strategy._bind_to_col,
)
else:
criterion, bind_to_col = (
self._lazy_strategy._rev_lazywhere,
self._lazy_strategy._rev_bind_to_col,
)
criterion = adapt_criterion_to_null(criterion, bind_to_col)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
def merge(
self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load,
_recursive,
_resolve_conflict_map,
):
if load:
for r in self._reverse_property:
if (source_state, r) in _recursive:
return
if "merge" not in self._cascade:
return
if self.key not in source_dict:
return
if self.uselist:
instances = source_state.get_impl(self.key).get(
source_state, source_dict
)
if hasattr(instances, "_sa_adapter"):
# convert collections to adapters to get a true iterator
instances = instances._sa_adapter
if load:
# for a full merge, pre-load the destination collection,
# so that individual _merge of each item pulls from identity
# map for those already present.
# also assumes CollectionAttributeImpl behavior of loading
# "old" list in any case
dest_state.get_impl(self.key).get(dest_state, dest_dict)
dest_list = []
for current in instances:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state,
current_dict,
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
if obj is not None:
dest_list.append(obj)
if not load:
coll = attributes.init_state_collection(
dest_state, dest_dict, self.key
)
for c in dest_list:
coll.append_without_event(c)
else:
dest_state.get_impl(self.key).set(
dest_state, dest_dict, dest_list, _adapt=False
)
else:
current = source_dict[self.key]
if current is not None:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state,
current_dict,
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
else:
obj = None
if not load:
dest_dict[self.key] = obj
else:
dest_state.get_impl(self.key).set(
dest_state, dest_dict, obj, None
)
def _value_as_iterable(
self, state, dict_, key, passive=attributes.PASSIVE_OFF
):
"""Return a list of tuples (state, obj) for the given
key.
returns an empty list if the value is None/empty/PASSIVE_NO_RESULT
"""
impl = state.manager[key].impl
x = impl.get(state, dict_, passive=passive)
if x is attributes.PASSIVE_NO_RESULT or x is None:
return []
elif hasattr(impl, "get_collection"):
return [
(attributes.instance_state(o), o)
for o in impl.get_collection(state, dict_, x, passive=passive)
]
else:
return [(attributes.instance_state(x), x)]
def cascade_iterator(
self, type_, state, dict_, visited_states, halt_on=None
):
# assert type_ in self._cascade
# only actively lazy load on the 'delete' cascade
if type_ != "delete" or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
if type_ == "save-update":
tuples = state.manager[self.key].impl.get_all_pending(state, dict_)
else:
tuples = self._value_as_iterable(
state, dict_, self.key, passive=passive
)
skip_pending = (
type_ == "refresh-expire" and "delete-orphan" not in self._cascade
)
for instance_state, c in tuples:
if instance_state in visited_states:
continue
if c is None:
# would like to emit a warning here, but
# would not be consistent with collection.append(None)
# current behavior of silently skipping.
# see [ticket:2229]
continue
instance_dict = attributes.instance_dict(c)
if halt_on and halt_on(instance_state):
continue
if skip_pending and not instance_state.key:
continue
instance_mapper = instance_state.manager.mapper
if not instance_mapper.isa(self.mapper.class_manager.mapper):
raise AssertionError(
"Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'"
% (self.key, self.parent.class_, c.__class__)
)
visited_states.add(instance_state)
yield c, instance_mapper, instance_state, instance_dict
@property
def _effective_sync_backref(self):
return self.sync_backref is not False
@staticmethod
def _check_sync_backref(rel_a, rel_b):
if rel_a.viewonly and rel_b.sync_backref:
raise sa_exc.InvalidRequestError(
"Relationship %s cannot specify sync_backref=True since %s "
"includes viewonly=True." % (rel_b, rel_a)
)
if rel_a.viewonly and rel_b.sync_backref is not False:
util.warn_limited(
"Setting backref / back_populates on relationship %s to refer "
"to viewonly relationship %s should include "
"sync_backref=False set on the %s relationship. ",
(rel_b, rel_a, rel_b),
)
def _add_reverse_property(self, key):
other = self.mapper.get_property(key, _configure_mappers=False)
# viewonly and sync_backref cases
# 1. self.viewonly==True and other.sync_backref==True -> error
# 2. self.viewonly==True and other.viewonly==False and
# other.sync_backref==None -> warn sync_backref=False, set to False
self._check_sync_backref(self, other)
# 3. other.viewonly==True and self.sync_backref==True -> error
# 4. other.viewonly==True and self.viewonly==False and
# self.sync_backref==None -> warn sync_backref=False, set to False
self._check_sync_backref(other, self)
self._reverse_property.add(other)
other._reverse_property.add(self)
if not other.mapper.common_parent(self.parent):
raise sa_exc.ArgumentError(
"reverse_property %r on "
"relationship %s references relationship %s, which "
"does not reference mapper %s"
% (key, self, other, self.parent)
)
if (
self.direction in (ONETOMANY, MANYTOONE)
and self.direction == other.direction
):
raise sa_exc.ArgumentError(
"%s and back-reference %s are "
"both of the same direction %r. Did you mean to "
"set remote_side on the many-to-one side ?"
% (other, self, self.direction)
)
@util.memoized_property
def entity(self): # type: () -> Union[AliasedInsp, Mapper]
"""Return the target mapped entity, which is an inspect() of the
class or aliased class tha is referred towards.
"""
if util.callable(self.argument) and not isinstance(
self.argument, (type, mapperlib.Mapper)
):
argument = self.argument()
else:
argument = self.argument
if isinstance(argument, type):
return mapperlib.class_mapper(argument, configure=False)
try:
entity = inspect(argument)
except sa_exc.NoInspectionAvailable:
pass
else:
if hasattr(entity, "mapper"):
return entity
raise sa_exc.ArgumentError(
"relationship '%s' expects "
"a class or a mapper argument (received: %s)"
% (self.key, type(argument))
)
@util.memoized_property
def mapper(self):
"""Return the targeted :class:`_orm.Mapper` for this
:class:`.RelationshipProperty`.
This is a lazy-initializing static attribute.
"""
return self.entity.mapper
def do_init(self):
self._check_conflicts()
self._process_dependent_arguments()
self._setup_join_conditions()
self._check_cascade_settings(self._cascade)
self._post_init()
self._generate_backref()
self._join_condition._warn_for_conflicting_sync_targets()
super(RelationshipProperty, self).do_init()
self._lazy_strategy = self._get_strategy((("lazy", "select"),))
def _process_dependent_arguments(self):
"""Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
"""
# accept callables for other attributes which may require
# deferred initialization. This technique is used
# by declarative "string configs" and some recipes.
for attr in (
"order_by",
"primaryjoin",
"secondaryjoin",
"secondary",
"_user_defined_foreign_keys",
"remote_side",
):
attr_value = getattr(self, attr)
if util.callable(attr_value):
setattr(self, attr, attr_value())
# remove "annotations" which are present if mapped class
# descriptors are used to create the join expression.
for attr in "primaryjoin", "secondaryjoin":
val = getattr(self, attr)
if val is not None:
setattr(
self,
attr,
_orm_deannotate(
expression._only_column_elements(val, attr)
),
)
# ensure expressions in self.order_by, foreign_keys,
# remote_side are all columns, not strings.
if self.order_by is not False and self.order_by is not None:
self.order_by = [
expression._only_column_elements(x, "order_by")
for x in util.to_list(self.order_by)
]
self._user_defined_foreign_keys = util.column_set(
expression._only_column_elements(x, "foreign_keys")
for x in util.to_column_set(self._user_defined_foreign_keys)
)
self.remote_side = util.column_set(
expression._only_column_elements(x, "remote_side")
for x in util.to_column_set(self.remote_side)
)
self.target = self.entity.persist_selectable
def _setup_join_conditions(self):
self._join_condition = jc = JoinCondition(
parent_persist_selectable=self.parent.persist_selectable,
child_persist_selectable=self.entity.persist_selectable,
parent_local_selectable=self.parent.local_table,
child_local_selectable=self.entity.local_table,
primaryjoin=self.primaryjoin,
secondary=self.secondary,
secondaryjoin=self.secondaryjoin,
parent_equivalents=self.parent._equivalent_columns,
child_equivalents=self.mapper._equivalent_columns,
consider_as_foreign_keys=self._user_defined_foreign_keys,
local_remote_pairs=self.local_remote_pairs,
remote_side=self.remote_side,
self_referential=self._is_self_referential,
prop=self,
support_sync=not self.viewonly,
can_be_synced_fn=self._columns_are_mapped,
)
self.primaryjoin = jc.primaryjoin
self.secondaryjoin = jc.secondaryjoin
self.direction = jc.direction
self.local_remote_pairs = jc.local_remote_pairs
self.remote_side = jc.remote_columns
self.local_columns = jc.local_columns
self.synchronize_pairs = jc.synchronize_pairs
self._calculated_foreign_keys = jc.foreign_key_columns
self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs
def _check_conflicts(self):
"""Test that this relationship is legal, warn about
inheritance conflicts."""
if self.parent.non_primary and not mapperlib.class_mapper(
self.parent.class_, configure=False
).has_property(self.key):
raise sa_exc.ArgumentError(
"Attempting to assign a new "
"relationship '%s' to a non-primary mapper on "
"class '%s'. New relationships can only be added "
"to the primary mapper, i.e. the very first mapper "
"created for class '%s' "
% (
self.key,
self.parent.class_.__name__,
self.parent.class_.__name__,
)
)
@property
def cascade(self):
"""Return the current cascade setting for this
:class:`.RelationshipProperty`.
"""
return self._cascade
@cascade.setter
def cascade(self, cascade):
self._set_cascade(cascade)
def _set_cascade(self, cascade, warn=True):
cascade = CascadeOptions(cascade)
if warn and self.viewonly:
non_viewonly = set(cascade).difference(
CascadeOptions._viewonly_cascades
)
if non_viewonly:
# we are warning here rather than warn deprecated as this
# setting actively does the wrong thing and Python shows
# regular warnings more aggressively than deprecation warnings
# by default. There's no other guard against setting active
# persistence cascades under viewonly=True so this will raise
# in 1.4.
util.warn(
'Cascade settings "%s" should not be combined with a '
"viewonly=True relationship. This configuration will "
"raise an error in version 1.4. Note that in versions "
"prior to 1.4, "
"these cascade settings may still produce a mutating "
"effect even though this relationship is marked as "
"viewonly=True." % (", ".join(sorted(non_viewonly)))
)
if "mapper" in self.__dict__:
self._check_cascade_settings(cascade)
self._cascade = cascade
if self._dependency_processor:
self._dependency_processor.cascade = cascade
def _check_cascade_settings(self, cascade):
if (
cascade.delete_orphan
and not self.single_parent
and (self.direction is MANYTOMANY or self.direction is MANYTOONE)
):
raise sa_exc.ArgumentError(
"For %(direction)s relationship %(rel)s, delete-orphan "
"cascade is normally "
'configured only on the "one" side of a one-to-many '
"relationship, "
'and not on the "many" side of a many-to-one or many-to-many '
"relationship. "
"To force this relationship to allow a particular "
'"%(relatedcls)s" object to be referred towards by only '
'a single "%(clsname)s" object at a time via the '
"%(rel)s relationship, which "
"would allow "
"delete-orphan cascade to take place in this direction, set "
"the single_parent=True flag."
% {
"rel": self,
"direction": "many-to-one"
if self.direction is MANYTOONE
else "many-to-many",
"clsname": self.parent.class_.__name__,
"relatedcls": self.mapper.class_.__name__,
},
code="bbf0",
)
if self.direction is MANYTOONE and self.passive_deletes:
util.warn(
"On %s, 'passive_deletes' is normally configured "
"on one-to-many, one-to-one, many-to-many "
"relationships only." % self
)
if self.passive_deletes == "all" and (
"delete" in cascade or "delete-orphan" in cascade
):
raise sa_exc.ArgumentError(
"On %s, can't set passive_deletes='all' in conjunction "
"with 'delete' or 'delete-orphan' cascade" % self
)
if cascade.delete_orphan:
self.mapper.primary_mapper()._delete_orphans.append(
(self.key, self.parent.class_)
)
def _persists_for(self, mapper):
"""Return True if this property will persist values on behalf
of the given mapper.
"""
return (
self.key in mapper.relationships
and mapper.relationships[self.key] is self
)
def _columns_are_mapped(self, *cols):
"""Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.Relationship`.
"""
for c in cols:
if (
self.secondary is not None
and self.secondary.c.contains_column(c)
):
continue
if not self.parent.persist_selectable.c.contains_column(
c
) and not self.target.c.contains_column(c):
return False
return True
def _generate_backref(self):
"""Interpret the 'backref' instruction to create a
:func:`_orm.relationship` complementary to this one."""
if self.parent.non_primary:
return
if self.backref is not None and not self.back_populates:
if isinstance(self.backref, util.string_types):
backref_key, kwargs = self.backref, {}
else:
backref_key, kwargs = self.backref
mapper = self.mapper.primary_mapper()
if not mapper.concrete:
check = set(mapper.iterate_to_root()).union(
mapper.self_and_descendants
)
for m in check:
if m.has_property(backref_key) and not m.concrete:
raise sa_exc.ArgumentError(
"Error creating backref "
"'%s' on relationship '%s': property of that "
"name exists on mapper '%s'"
% (backref_key, self, m)
)
# determine primaryjoin/secondaryjoin for the
# backref. Use the one we had, so that
# a custom join doesn't have to be specified in
# both directions.
if self.secondary is not None:
# for many to many, just switch primaryjoin/
# secondaryjoin. use the annotated
# pj/sj on the _join_condition.
pj = kwargs.pop(
"primaryjoin",
self._join_condition.secondaryjoin_minus_local,
)
sj = kwargs.pop(
"secondaryjoin",
self._join_condition.primaryjoin_minus_local,
)
else:
pj = kwargs.pop(
"primaryjoin",
self._join_condition.primaryjoin_reverse_remote,
)
sj = kwargs.pop("secondaryjoin", None)
if sj:
raise sa_exc.InvalidRequestError(
"Can't assign 'secondaryjoin' on a backref "
"against a non-secondary relationship."
)
foreign_keys = kwargs.pop(
"foreign_keys", self._user_defined_foreign_keys
)
parent = self.parent.primary_mapper()
kwargs.setdefault("viewonly", self.viewonly)
kwargs.setdefault("post_update", self.post_update)
kwargs.setdefault("passive_updates", self.passive_updates)
kwargs.setdefault("sync_backref", self.sync_backref)
self.back_populates = backref_key
relationship = RelationshipProperty(
parent,
self.secondary,
pj,
sj,
foreign_keys=foreign_keys,
back_populates=self.key,
**kwargs
)
mapper._configure_property(backref_key, relationship)
if self.back_populates:
self._add_reverse_property(self.back_populates)
def _post_init(self):
if self.uselist is None:
self.uselist = self.direction is not MANYTOONE
if not self.viewonly:
self._dependency_processor = (
dependency.DependencyProcessor.from_relationship
)(self)
@util.memoized_property
def _use_get(self):
"""memoize the 'use_get' attribute of this RelationshipLoader's
lazyloader."""
strategy = self._lazy_strategy
return strategy.use_get
@util.memoized_property
def _is_self_referential(self):
return self.mapper.common_parent(self.parent)
def _create_joins(
self,
source_polymorphic=False,
source_selectable=None,
dest_polymorphic=False,
dest_selectable=None,
of_type_mapper=None,
alias_secondary=False,
):
aliased = False
if alias_secondary and self.secondary is not None:
aliased = True
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
source_selectable = self.parent._with_polymorphic_selectable
if dest_selectable is None:
dest_selectable = self.entity.selectable
if dest_polymorphic and self.mapper.with_polymorphic:
aliased = True
if self._is_self_referential and source_selectable is None:
dest_selectable = dest_selectable.alias()
aliased = True
elif (
dest_selectable is not self.mapper._with_polymorphic_selectable
or self.mapper.with_polymorphic
):
aliased = True
dest_mapper = of_type_mapper or self.mapper
single_crit = dest_mapper._single_table_criterion
aliased = aliased or (
source_selectable is not None
and (
source_selectable
is not self.parent._with_polymorphic_selectable
or source_selectable._is_from_container # e.g an alias
)
)
(
primaryjoin,
secondaryjoin,
secondary,
target_adapter,
dest_selectable,
) = self._join_condition.join_targets(
source_selectable, dest_selectable, aliased, single_crit
)
if source_selectable is None:
source_selectable = self.parent.local_table
if dest_selectable is None:
dest_selectable = self.entity.local_table
return (
primaryjoin,
secondaryjoin,
source_selectable,
dest_selectable,
secondary,
target_adapter,
)
def _annotate_columns(element, annotations):
def clone(elem):
if isinstance(elem, expression.ColumnClause):
elem = elem._annotate(annotations.copy())
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
clone = None # remove gc cycles
return element
class JoinCondition(object):
def __init__(
self,
parent_persist_selectable,
child_persist_selectable,
parent_local_selectable,
child_local_selectable,
primaryjoin=None,
secondary=None,
secondaryjoin=None,
parent_equivalents=None,
child_equivalents=None,
consider_as_foreign_keys=None,
local_remote_pairs=None,
remote_side=None,
self_referential=False,
prop=None,
support_sync=True,
can_be_synced_fn=lambda *c: True,
):
self.parent_persist_selectable = parent_persist_selectable
self.parent_local_selectable = parent_local_selectable
self.child_persist_selectable = child_persist_selectable
self.child_local_selectable = child_local_selectable
self.parent_equivalents = parent_equivalents
self.child_equivalents = child_equivalents
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.secondary = secondary
self.consider_as_foreign_keys = consider_as_foreign_keys
self._local_remote_pairs = local_remote_pairs
self._remote_side = remote_side
self.prop = prop
self.self_referential = self_referential
self.support_sync = support_sync
self.can_be_synced_fn = can_be_synced_fn
self._determine_joins()
self._sanitize_joins()
self._annotate_fks()
self._annotate_remote()
self._annotate_local()
self._annotate_parentmapper()
self._setup_pairs()
self._check_foreign_cols(self.primaryjoin, True)
if self.secondaryjoin is not None:
self._check_foreign_cols(self.secondaryjoin, False)
self._determine_direction()
self._check_remote_side()
self._log_joins()
def _log_joins(self):
if self.prop is None:
return
log = self.prop.logger
log.info("%s setup primary join %s", self.prop, self.primaryjoin)
log.info("%s setup secondary join %s", self.prop, self.secondaryjoin)
log.info(
"%s synchronize pairs [%s]",
self.prop,
",".join(
"(%s => %s)" % (l, r) for (l, r) in self.synchronize_pairs
),
)
log.info(
"%s secondary synchronize pairs [%s]",
self.prop,
",".join(
"(%s => %s)" % (l, r)
for (l, r) in self.secondary_synchronize_pairs or []
),
)
log.info(
"%s local/remote pairs [%s]",
self.prop,
",".join(
"(%s / %s)" % (l, r) for (l, r) in self.local_remote_pairs
),
)
log.info(
"%s remote columns [%s]",
self.prop,
",".join("%s" % col for col in self.remote_columns),
)
log.info(
"%s local columns [%s]",
self.prop,
",".join("%s" % col for col in self.local_columns),
)
log.info("%s relationship direction %s", self.prop, self.direction)
def _sanitize_joins(self):
"""remove the parententity annotation from our join conditions which
can leak in here based on some declarative patterns and maybe others.
We'd want to remove "parentmapper" also, but apparently there's
an exotic use case in _join_fixture_inh_selfref_w_entity
that relies upon it being present, see :ticket:`3364`.
"""
self.primaryjoin = _deep_deannotate(
self.primaryjoin, values=("parententity",)
)
if self.secondaryjoin is not None:
self.secondaryjoin = _deep_deannotate(
self.secondaryjoin, values=("parententity",)
)
def _determine_joins(self):
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError(
"Property %s specified with secondary "
"join condition but "
"no secondary argument" % self.prop
)
# find a join between the given mapper's mapped table and
# the given table. will try the mapper's local table first
# for more specificity, then if not found will try the more
# general mapped table, which in the case of inheritance is
# a join.
try:
consider_as_foreign_keys = self.consider_as_foreign_keys or None
if self.secondary is not None:
if self.secondaryjoin is None:
self.secondaryjoin = join_condition(
self.child_persist_selectable,
self.secondary,
a_subset=self.child_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
if self.primaryjoin is None:
self.primaryjoin = join_condition(
self.parent_persist_selectable,
self.secondary,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
else:
if self.primaryjoin is None:
self.primaryjoin = join_condition(
self.parent_persist_selectable,
self.child_persist_selectable,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
except sa_exc.NoForeignKeysError as nfe:
if self.secondary is not None:
util.raise_(
sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify 'primaryjoin' and 'secondaryjoin' "
"expressions." % (self.prop, self.secondary)
),
from_=nfe,
)
else:
util.raise_(
sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify a 'primaryjoin' expression." % self.prop
),
from_=nfe,
)
except sa_exc.AmbiguousForeignKeysError as afe:
if self.secondary is not None:
util.raise_(
sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables via secondary table '%s'. "
"Specify the 'foreign_keys' "
"argument, providing a list of those columns which "
"should be counted as containing a foreign key "
"reference from the secondary table to each of the "
"parent and child tables."
% (self.prop, self.secondary)
),
from_=afe,
)
else:
util.raise_(
sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables. Specify the "
"'foreign_keys' argument, providing a list of those "
"columns which should be counted as containing a "
"foreign key reference to the parent table."
% self.prop
),
from_=afe,
)
@property
def primaryjoin_minus_local(self):
return _deep_deannotate(self.primaryjoin, values=("local", "remote"))
@property
def secondaryjoin_minus_local(self):
return _deep_deannotate(self.secondaryjoin, values=("local", "remote"))
@util.memoized_property
def primaryjoin_reverse_remote(self):
"""Return the primaryjoin condition suitable for the
"reverse" direction.
If the primaryjoin was delivered here with pre-existing
"remote" annotations, the local/remote annotations
are reversed. Otherwise, the local/remote annotations
are removed.
"""
if self._has_remote_annotations:
def replace(element):
if "remote" in element._annotations:
v = element._annotations.copy()
del v["remote"]
v["local"] = True
return element._with_annotations(v)
elif "local" in element._annotations:
v = element._annotations.copy()
del v["local"]
v["remote"] = True
return element._with_annotations(v)
return visitors.replacement_traverse(self.primaryjoin, {}, replace)
else:
if self._has_foreign_annotations:
# TODO: coverage
return _deep_deannotate(
self.primaryjoin, values=("local", "remote")
)
else:
return _deep_deannotate(self.primaryjoin)
def _has_annotation(self, clause, annotation):
for col in visitors.iterate(clause, {}):
if annotation in col._annotations:
return True
else:
return False
@util.memoized_property
def _has_foreign_annotations(self):
return self._has_annotation(self.primaryjoin, "foreign")
@util.memoized_property
def _has_remote_annotations(self):
return self._has_annotation(self.primaryjoin, "remote")
def _annotate_fks(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'foreign' annotations marking columns
considered as foreign.
"""
if self._has_foreign_annotations:
return
if self.consider_as_foreign_keys:
self._annotate_from_fk_list()
else:
self._annotate_present_fks()
def _annotate_from_fk_list(self):
def check_fk(col):
if col in self.consider_as_foreign_keys:
return col._annotate({"foreign": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, check_fk
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, check_fk
)
def _annotate_present_fks(self):
if self.secondary is not None:
secondarycols = util.column_set(self.secondary.c)
else:
secondarycols = set()
def is_foreign(a, b):
if isinstance(a, schema.Column) and isinstance(b, schema.Column):
if a.references(b):
return a
elif b.references(a):
return b
if secondarycols:
if a in secondarycols and b not in secondarycols:
return a
elif b in secondarycols and a not in secondarycols:
return b
def visit_binary(binary):
if not isinstance(
binary.left, sql.ColumnElement
) or not isinstance(binary.right, sql.ColumnElement):
return
if (
"foreign" not in binary.left._annotations
and "foreign" not in binary.right._annotations
):
col = is_foreign(binary.left, binary.right)
if col is not None:
if col.compare(binary.left):
binary.left = binary.left._annotate({"foreign": True})
elif col.compare(binary.right):
binary.right = binary.right._annotate(
{"foreign": True}
)
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.cloned_traverse(
self.secondaryjoin, {}, {"binary": visit_binary}
)
def _refers_to_parent_table(self):
"""Return True if the join condition contains column
comparisons where both columns are in both tables.
"""
pt = self.parent_persist_selectable
mt = self.child_persist_selectable
result = [False]
def visit_binary(binary):
c, f = binary.left, binary.right
if (
isinstance(c, expression.ColumnClause)
and isinstance(f, expression.ColumnClause)
and pt.is_derived_from(c.table)
and pt.is_derived_from(f.table)
and mt.is_derived_from(c.table)
and mt.is_derived_from(f.table)
):
result[0] = True
visitors.traverse(self.primaryjoin, {}, {"binary": visit_binary})
return result[0]
def _tables_overlap(self):
"""Return True if parent/child tables have some overlap."""
return selectables_overlap(
self.parent_persist_selectable, self.child_persist_selectable
)
def _annotate_remote(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'remote' annotations marking columns
considered as part of the 'remote' side.
"""
if self._has_remote_annotations:
return
if self.secondary is not None:
self._annotate_remote_secondary()
elif self._local_remote_pairs or self._remote_side:
self._annotate_remote_from_args()
elif self._refers_to_parent_table():
self._annotate_selfref(
lambda col: "foreign" in col._annotations, False
)
elif self._tables_overlap():
self._annotate_remote_with_overlap()
else:
self._annotate_remote_distinct_selectables()
def _annotate_remote_secondary(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when 'secondary' is present.
"""
def repl(element):
if self.secondary.c.contains_column(element):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, repl
)
def _annotate_selfref(self, fn, remote_side_given):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the relationship is detected as self-referential.
"""
def visit_binary(binary):
equated = binary.left.compare(binary.right)
if isinstance(binary.left, expression.ColumnClause) and isinstance(
binary.right, expression.ColumnClause
):
# assume one to many - FKs are "remote"
if fn(binary.left):
binary.left = binary.left._annotate({"remote": True})
if fn(binary.right) and not equated:
binary.right = binary.right._annotate({"remote": True})
elif not remote_side_given:
self._warn_non_column_elements()
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
def _annotate_remote_from_args(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the 'remote_side' or '_local_remote_pairs'
arguments are used.
"""
if self._local_remote_pairs:
if self._remote_side:
raise sa_exc.ArgumentError(
"remote_side argument is redundant "
"against more detailed _local_remote_side "
"argument."
)
remote_side = [r for (l, r) in self._local_remote_pairs]
else:
remote_side = self._remote_side
if self._refers_to_parent_table():
self._annotate_selfref(lambda col: col in remote_side, True)
else:
def repl(element):
if element in remote_side:
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
def _annotate_remote_with_overlap(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables have some set of
tables in common, though is not a fully self-referential
relationship.
"""
def visit_binary(binary):
binary.left, binary.right = proc_left_right(
binary.left, binary.right
)
binary.right, binary.left = proc_left_right(
binary.right, binary.left
)
check_entities = (
self.prop is not None and self.prop.mapper is not self.prop.parent
)
def proc_left_right(left, right):
if isinstance(left, expression.ColumnClause) and isinstance(
right, expression.ColumnClause
):
if self.child_persist_selectable.c.contains_column(
right
) and self.parent_persist_selectable.c.contains_column(left):
right = right._annotate({"remote": True})
elif (
check_entities
and right._annotations.get("parentmapper") is self.prop.mapper
):
right = right._annotate({"remote": True})
elif (
check_entities
and left._annotations.get("parentmapper") is self.prop.mapper
):
left = left._annotate({"remote": True})
else:
self._warn_non_column_elements()
return left, right
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
def _annotate_remote_distinct_selectables(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables are entirely
separate.
"""
def repl(element):
if self.child_persist_selectable.c.contains_column(element) and (
not self.parent_local_selectable.c.contains_column(element)
or self.child_local_selectable.c.contains_column(element)
):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
def _warn_non_column_elements(self):
util.warn(
"Non-simple column elements in primary "
"join condition for property %s - consider using "
"remote() annotations to mark the remote side." % self.prop
)
def _annotate_local(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'local' annotations.
This annotates all column elements found
simultaneously in the parent table
and the join condition that don't have a
'remote' annotation set up from
_annotate_remote() or user-defined.
"""
if self._has_annotation(self.primaryjoin, "local"):
return
if self._local_remote_pairs:
local_side = util.column_set(
[l for (l, r) in self._local_remote_pairs]
)
else:
local_side = util.column_set(self.parent_persist_selectable.c)
def locals_(elem):
if "remote" not in elem._annotations and elem in local_side:
return elem._annotate({"local": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, locals_
)
def _annotate_parentmapper(self):
if self.prop is None:
return
def parentmappers_(elem):
if "remote" in elem._annotations:
return elem._annotate({"parentmapper": self.prop.mapper})
elif "local" in elem._annotations:
return elem._annotate({"parentmapper": self.prop.parent})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, parentmappers_
)
def _check_remote_side(self):
if not self.local_remote_pairs:
raise sa_exc.ArgumentError(
"Relationship %s could "
"not determine any unambiguous local/remote column "
"pairs based on join condition and remote_side "
"arguments. "
"Consider using the remote() annotation to "
"accurately mark those elements of the join "
"condition that are on the remote side of "
"the relationship." % (self.prop,)
)
def _check_foreign_cols(self, join_condition, primary):
"""Check the foreign key columns collected and emit error
messages."""
can_sync = False
foreign_cols = self._gather_columns_with_annotation(
join_condition, "foreign"
)
has_foreign = bool(foreign_cols)
if primary:
can_sync = bool(self.synchronize_pairs)
else:
can_sync = bool(self.secondary_synchronize_pairs)
if (
self.support_sync
and can_sync
or (not self.support_sync and has_foreign)
):
return
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if self.support_sync and has_foreign and not can_sync:
err = (
"Could not locate any simple equality expressions "
"involving locally mapped foreign key columns for "
"%s join condition "
"'%s' on relationship %s."
% (
primary and "primary" or "secondary",
join_condition,
self.prop,
)
)
err += (
" Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or are "
"annotated in the join condition with the foreign() "
"annotation. To allow comparison operators other than "
"'==', the relationship can be marked as viewonly=True."
)
raise sa_exc.ArgumentError(err)
else:
err = (
"Could not locate any relevant foreign key columns "
"for %s join condition '%s' on relationship %s."
% (
primary and "primary" or "secondary",
join_condition,
self.prop,
)
)
err += (
" Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or are "
"annotated in the join condition with the foreign() "
"annotation."
)
raise sa_exc.ArgumentError(err)
def _determine_direction(self):
"""Determine if this relationship is one to many, many to one,
many to many.
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
else:
parentcols = util.column_set(self.parent_persist_selectable.c)
targetcols = util.column_set(self.child_persist_selectable.c)
# fk collection which suggests ONETOMANY.
onetomany_fk = targetcols.intersection(self.foreign_key_columns)
# fk collection which suggests MANYTOONE.
manytoone_fk = parentcols.intersection(self.foreign_key_columns)
if onetomany_fk and manytoone_fk:
# fks on both sides. test for overlap of local/remote
# with foreign key.
# we will gather columns directly from their annotations
# without deannotating, so that we can distinguish on a column
# that refers to itself.
# 1. columns that are both remote and FK suggest
# onetomany.
onetomany_local = self._gather_columns_with_annotation(
self.primaryjoin, "remote", "foreign"
)
# 2. columns that are FK but are not remote (e.g. local)
# suggest manytoone.
manytoone_local = set(
[
c
for c in self._gather_columns_with_annotation(
self.primaryjoin, "foreign"
)
if "remote" not in c._annotations
]
)
# 3. if both collections are present, remove columns that
# refer to themselves. This is for the case of
# and_(Me.id == Me.remote_id, Me.version == Me.version)
if onetomany_local and manytoone_local:
self_equated = self.remote_columns.intersection(
self.local_columns
)
onetomany_local = onetomany_local.difference(self_equated)
manytoone_local = manytoone_local.difference(self_equated)
# at this point, if only one or the other collection is
# present, we know the direction, otherwise it's still
# ambiguous.
if onetomany_local and not manytoone_local:
self.direction = ONETOMANY
elif manytoone_local and not onetomany_local:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns within the join condition are present "
"in both the parent and the child's mapped tables. "
"Ensure that only those columns referring "
"to a parent column are marked as foreign, "
"either via the foreign() annotation or "
"via the foreign_keys argument." % self.prop
)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship "
"direction for relationship '%s' - foreign "
"key columns are present in neither the parent "
"nor the child's mapped tables" % self.prop
)
def _deannotate_pairs(self, collection):
"""provide deannotation for the various lists of
pairs, so that using them in hashes doesn't incur
high-overhead __eq__() comparisons against
original columns mapped.
"""
return [(x._deannotate(), y._deannotate()) for x, y in collection]
def _setup_pairs(self):
sync_pairs = []
lrp = util.OrderedSet([])
secondary_sync_pairs = []
def go(joincond, collection):
def visit_binary(binary, left, right):
if (
"remote" in right._annotations
and "remote" not in left._annotations
and self.can_be_synced_fn(left)
):
lrp.add((left, right))
elif (
"remote" in left._annotations
and "remote" not in right._annotations
and self.can_be_synced_fn(right)
):
lrp.add((right, left))
if binary.operator is operators.eq and self.can_be_synced_fn(
left, right
):
if "foreign" in right._annotations:
collection.append((left, right))
elif "foreign" in left._annotations:
collection.append((right, left))
visit_binary_product(visit_binary, joincond)
for joincond, collection in [
(self.primaryjoin, sync_pairs),
(self.secondaryjoin, secondary_sync_pairs),
]:
if joincond is None:
continue
go(joincond, collection)
self.local_remote_pairs = self._deannotate_pairs(lrp)
self.synchronize_pairs = self._deannotate_pairs(sync_pairs)
self.secondary_synchronize_pairs = self._deannotate_pairs(
secondary_sync_pairs
)
_track_overlapping_sync_targets = weakref.WeakKeyDictionary()
def _warn_for_conflicting_sync_targets(self):
if not self.support_sync:
return
# we would like to detect if we are synchronizing any column
# pairs in conflict with another relationship that wishes to sync
# an entirely different column to the same target. This is a
# very rare edge case so we will try to minimize the memory/overhead
# impact of this check
for from_, to_ in [
(from_, to_) for (from_, to_) in self.synchronize_pairs
] + [
(from_, to_) for (from_, to_) in self.secondary_synchronize_pairs
]:
# save ourselves a ton of memory and overhead by only
# considering columns that are subject to a overlapping
# FK constraints at the core level. This condition can arise
# if multiple relationships overlap foreign() directly, but
# we're going to assume it's typically a ForeignKeyConstraint-
# level configuration that benefits from this warning.
if len(to_.foreign_keys) < 2:
continue
if to_ not in self._track_overlapping_sync_targets:
self._track_overlapping_sync_targets[
to_
] = weakref.WeakKeyDictionary({self.prop: from_})
else:
other_props = []
prop_to_from = self._track_overlapping_sync_targets[to_]
for pr, fr_ in prop_to_from.items():
if (
pr.mapper in mapperlib._mapper_registry
and (
self.prop._persists_for(pr.parent)
or pr._persists_for(self.prop.parent)
)
and fr_ is not from_
and pr not in self.prop._reverse_property
):
other_props.append((pr, fr_))
if other_props:
util.warn(
"relationship '%s' will copy column %s to column %s, "
"which conflicts with relationship(s): %s. "
"Consider applying "
"viewonly=True to read-only relationships, or provide "
"a primaryjoin condition marking writable columns "
"with the foreign() annotation."
% (
self.prop,
from_,
to_,
", ".join(
"'%s' (copies %s to %s)" % (pr, fr_, to_)
for (pr, fr_) in other_props
),
)
)
self._track_overlapping_sync_targets[to_][self.prop] = from_
@util.memoized_property
def remote_columns(self):
return self._gather_join_annotations("remote")
@util.memoized_property
def local_columns(self):
return self._gather_join_annotations("local")
@util.memoized_property
def foreign_key_columns(self):
return self._gather_join_annotations("foreign")
def _gather_join_annotations(self, annotation):
s = set(
self._gather_columns_with_annotation(self.primaryjoin, annotation)
)
if self.secondaryjoin is not None:
s.update(
self._gather_columns_with_annotation(
self.secondaryjoin, annotation
)
)
return {x._deannotate() for x in s}
def _gather_columns_with_annotation(self, clause, *annotation):
annotation = set(annotation)
return set(
[
col
for col in visitors.iterate(clause, {})
if annotation.issubset(col._annotations)
]
)
def join_targets(
self, source_selectable, dest_selectable, aliased, single_crit=None
):
"""Given a source and destination selectable, create a
join between them.
This takes into account aliasing the join clause
to reference the appropriate corresponding columns
in the target objects, as well as the extra child
criterion, equivalent column sets, etc.
"""
# place a barrier on the destination such that
# replacement traversals won't ever dig into it.
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable, {"no_replacement_traverse": True}
)
primaryjoin, secondaryjoin, secondary = (
self.primaryjoin,
self.secondaryjoin,
self.secondary,
)
# adjust the join condition for single table inheritance,
# in the case that the join is to a subclass
# this is analogous to the
# "_adjust_for_single_table_inheritance()" method in Query.
if single_crit is not None:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & single_crit
else:
primaryjoin = primaryjoin & single_crit
if aliased:
if secondary is not None:
secondary = secondary.alias(flat=True)
primary_aliasizer = ClauseAdapter(
secondary, exclude_fn=_ColInAnnotations("local")
)
secondary_aliasizer = ClauseAdapter(
dest_selectable, equivalents=self.child_equivalents
).chain(primary_aliasizer)
if source_selectable is not None:
primary_aliasizer = ClauseAdapter(
secondary, exclude_fn=_ColInAnnotations("local")
).chain(
ClauseAdapter(
source_selectable,
equivalents=self.parent_equivalents,
)
)
secondaryjoin = secondary_aliasizer.traverse(secondaryjoin)
else:
primary_aliasizer = ClauseAdapter(
dest_selectable,
exclude_fn=_ColInAnnotations("local"),
equivalents=self.child_equivalents,
)
if source_selectable is not None:
primary_aliasizer.chain(
ClauseAdapter(
source_selectable,
exclude_fn=_ColInAnnotations("remote"),
equivalents=self.parent_equivalents,
)
)
secondary_aliasizer = None
primaryjoin = primary_aliasizer.traverse(primaryjoin)
target_adapter = secondary_aliasizer or primary_aliasizer
target_adapter.exclude_fn = None
else:
target_adapter = None
return (
primaryjoin,
secondaryjoin,
secondary,
target_adapter,
dest_selectable,
)
def create_lazy_clause(self, reverse_direction=False):
binds = util.column_dict()
equated_columns = util.column_dict()
has_secondary = self.secondaryjoin is not None
if has_secondary:
lookup = collections.defaultdict(list)
for l, r in self.local_remote_pairs:
lookup[l].append((l, r))
equated_columns[r] = l
elif not reverse_direction:
for l, r in self.local_remote_pairs:
equated_columns[r] = l
else:
for l, r in self.local_remote_pairs:
equated_columns[l] = r
def col_to_bind(col):
if (
(not reverse_direction and "local" in col._annotations)
or reverse_direction
and (
(has_secondary and col in lookup)
or (not has_secondary and "remote" in col._annotations)
)
):
if col not in binds:
binds[col] = sql.bindparam(
None, None, type_=col.type, unique=True
)
return binds[col]
return None
lazywhere = self.primaryjoin
if self.secondaryjoin is None or not reverse_direction:
lazywhere = visitors.replacement_traverse(
lazywhere, {}, col_to_bind
)
if self.secondaryjoin is not None:
secondaryjoin = self.secondaryjoin
if reverse_direction:
secondaryjoin = visitors.replacement_traverse(
secondaryjoin, {}, col_to_bind
)
lazywhere = sql.and_(lazywhere, secondaryjoin)
bind_to_col = {binds[col].key: col for col in binds}
return lazywhere, bind_to_col, equated_columns
class _ColInAnnotations(object):
"""Seralizable equivalent to:
lambda c: "name" in c._annotations
"""
def __init__(self, name):
self.name = name
def __call__(self, c):
return self.name in c._annotations
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/orm/state.py
|
# orm/state.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines instrumentation of instances.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
"""
import weakref
from . import base
from . import exc as orm_exc
from . import interfaces
from .base import ATTR_WAS_SET
from .base import INIT_OK
from .base import NEVER_SET
from .base import NO_VALUE
from .base import PASSIVE_NO_INITIALIZE
from .base import PASSIVE_NO_RESULT
from .base import PASSIVE_OFF
from .base import SQL_OK
from .path_registry import PathRegistry
from .. import exc as sa_exc
from .. import inspection
from .. import util
@inspection._self_inspects
class InstanceState(interfaces.InspectionAttrInfo):
"""tracks state information at the instance level.
The :class:`.InstanceState` is a key object used by the
SQLAlchemy ORM in order to track the state of an object;
it is created the moment an object is instantiated, typically
as a result of :term:`instrumentation` which SQLAlchemy applies
to the ``__init__()`` method of the class.
:class:`.InstanceState` is also a semi-public object,
available for runtime inspection as to the state of a
mapped instance, including information such as its current
status within a particular :class:`.Session` and details
about data on individual attributes. The public API
in order to acquire a :class:`.InstanceState` object
is to use the :func:`_sa.inspect` system::
>>> from sqlalchemy import inspect
>>> insp = inspect(some_mapped_object)
.. seealso::
:ref:`core_inspection_toplevel`
"""
session_id = None
key = None
runid = None
load_options = util.EMPTY_SET
load_path = PathRegistry.root
insert_order = None
_strong_obj = None
modified = False
expired = False
_deleted = False
_load_pending = False
_orphaned_outside_of_session = False
is_instance = True
identity_token = None
_last_known_values = ()
callables = ()
"""A namespace where a per-state loader callable can be associated.
In SQLAlchemy 1.0, this is only used for lazy loaders / deferred
loaders that were set up via query option.
Previously, callables was used also to indicate expired attributes
by storing a link to the InstanceState itself in this dictionary.
This role is now handled by the expired_attributes set.
"""
def __init__(self, obj, manager):
self.class_ = obj.__class__
self.manager = manager
self.obj = weakref.ref(obj, self._cleanup)
self.committed_state = {}
self.expired_attributes = set()
expired_attributes = None
"""The set of keys which are 'expired' to be loaded by
the manager's deferred scalar loader, assuming no pending
changes.
see also the ``unmodified`` collection which is intersected
against this set when a refresh operation occurs."""
@util.memoized_property
def attrs(self):
"""Return a namespace representing each attribute on
the mapped object, including its current value
and history.
The returned object is an instance of :class:`.AttributeState`.
This object allows inspection of the current data
within an attribute as well as attribute history
since the last flush.
"""
return util.ImmutableProperties(
dict((key, AttributeState(self, key)) for key in self.manager)
)
@property
def transient(self):
"""Return true if the object is :term:`transient`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is None and not self._attached
@property
def pending(self):
"""Return true if the object is :term:`pending`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is None and self._attached
@property
def deleted(self):
"""Return true if the object is :term:`deleted`.
An object that is in the deleted state is guaranteed to
not be within the :attr:`.Session.identity_map` of its parent
:class:`.Session`; however if the session's transaction is rolled
back, the object will be restored to the persistent state and
the identity map.
.. note::
The :attr:`.InstanceState.deleted` attribute refers to a specific
state of the object that occurs between the "persistent" and
"detached" states; once the object is :term:`detached`, the
:attr:`.InstanceState.deleted` attribute **no longer returns
True**; in order to detect that a state was deleted, regardless
of whether or not the object is associated with a
:class:`.Session`, use the :attr:`.InstanceState.was_deleted`
accessor.
.. versionadded: 1.1
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and self._attached and self._deleted
@property
def was_deleted(self):
"""Return True if this object is or was previously in the
"deleted" state and has not been reverted to persistent.
This flag returns True once the object was deleted in flush.
When the object is expunged from the session either explicitly
or via transaction commit and enters the "detached" state,
this flag will continue to report True.
.. versionadded:: 1.1 - added a local method form of
:func:`.orm.util.was_deleted`.
.. seealso::
:attr:`.InstanceState.deleted` - refers to the "deleted" state
:func:`.orm.util.was_deleted` - standalone function
:ref:`session_object_states`
"""
return self._deleted
@property
def persistent(self):
"""Return true if the object is :term:`persistent`.
An object that is in the persistent state is guaranteed to
be within the :attr:`.Session.identity_map` of its parent
:class:`.Session`.
.. versionchanged:: 1.1 The :attr:`.InstanceState.persistent`
accessor no longer returns True for an object that was
"deleted" within a flush; use the :attr:`.InstanceState.deleted`
accessor to detect this state. This allows the "persistent"
state to guarantee membership in the identity map.
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and self._attached and not self._deleted
@property
def detached(self):
"""Return true if the object is :term:`detached`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and not self._attached
@property
@util.dependencies("sqlalchemy.orm.session")
def _attached(self, sessionlib):
return (
self.session_id is not None
and self.session_id in sessionlib._sessions
)
def _track_last_known_value(self, key):
"""Track the last known value of a particular key after expiration
operations.
.. versionadded:: 1.3
"""
if key not in self._last_known_values:
self._last_known_values = dict(self._last_known_values)
self._last_known_values[key] = NO_VALUE
@property
@util.dependencies("sqlalchemy.orm.session")
def session(self, sessionlib):
"""Return the owning :class:`.Session` for this instance,
or ``None`` if none available.
Note that the result here can in some cases be *different*
from that of ``obj in session``; an object that's been deleted
will report as not ``in session``, however if the transaction is
still in progress, this attribute will still refer to that session.
Only when the transaction is completed does the object become
fully detached under normal circumstances.
"""
return sessionlib._state_session(self)
@property
def object(self):
"""Return the mapped object represented by this
:class:`.InstanceState`."""
return self.obj()
@property
def identity(self):
"""Return the mapped identity of the mapped object.
This is the primary key identity as persisted by the ORM
which can always be passed directly to
:meth:`_query.Query.get`.
Returns ``None`` if the object has no primary key identity.
.. note::
An object which is :term:`transient` or :term:`pending`
does **not** have a mapped identity until it is flushed,
even if its attributes include primary key values.
"""
if self.key is None:
return None
else:
return self.key[1]
@property
def identity_key(self):
"""Return the identity key for the mapped object.
This is the key used to locate the object within
the :attr:`.Session.identity_map` mapping. It contains
the identity as returned by :attr:`.identity` within it.
"""
# TODO: just change .key to .identity_key across
# the board ? probably
return self.key
@util.memoized_property
def parents(self):
return {}
@util.memoized_property
def _pending_mutations(self):
return {}
@util.memoized_property
def mapper(self):
"""Return the :class:`_orm.Mapper` used for this mapped object."""
return self.manager.mapper
@property
def has_identity(self):
"""Return ``True`` if this object has an identity key.
This should always have the same value as the
expression ``state.persistent or state.detached``.
"""
return bool(self.key)
@classmethod
def _detach_states(self, states, session, to_transient=False):
persistent_to_detached = (
session.dispatch.persistent_to_detached or None
)
deleted_to_detached = session.dispatch.deleted_to_detached or None
pending_to_transient = session.dispatch.pending_to_transient or None
persistent_to_transient = (
session.dispatch.persistent_to_transient or None
)
for state in states:
deleted = state._deleted
pending = state.key is None
persistent = not pending and not deleted
state.session_id = None
if to_transient and state.key:
del state.key
if persistent:
if to_transient:
if persistent_to_transient is not None:
persistent_to_transient(session, state)
elif persistent_to_detached is not None:
persistent_to_detached(session, state)
elif deleted and deleted_to_detached is not None:
deleted_to_detached(session, state)
elif pending and pending_to_transient is not None:
pending_to_transient(session, state)
state._strong_obj = None
def _detach(self, session=None):
if session:
InstanceState._detach_states([self], session)
else:
self.session_id = self._strong_obj = None
def _dispose(self):
self._detach()
del self.obj
def _cleanup(self, ref):
"""Weakref callback cleanup.
This callable cleans out the state when it is being garbage
collected.
this _cleanup **assumes** that there are no strong refs to us!
Will not work otherwise!
"""
# Python builtins become undefined during interpreter shutdown.
# Guard against exceptions during this phase, as the method cannot
# proceed in any case if builtins have been undefined.
if dict is None:
return
instance_dict = self._instance_dict()
if instance_dict is not None:
instance_dict._fast_discard(self)
del self._instance_dict
# we can't possibly be in instance_dict._modified
# b.c. this is weakref cleanup only, that set
# is strong referencing!
# assert self not in instance_dict._modified
self.session_id = self._strong_obj = None
del self.obj
def obj(self):
return None
@property
def dict(self):
"""Return the instance dict used by the object.
Under normal circumstances, this is always synonymous
with the ``__dict__`` attribute of the mapped object,
unless an alternative instrumentation system has been
configured.
In the case that the actual object has been garbage
collected, this accessor returns a blank dictionary.
"""
o = self.obj()
if o is not None:
return base.instance_dict(o)
else:
return {}
def _initialize_instance(*mixed, **kwargs):
self, instance, args = mixed[0], mixed[1], mixed[2:] # noqa
manager = self.manager
manager.dispatch.init(self, args, kwargs)
try:
return manager.original_init(*mixed[1:], **kwargs)
except:
with util.safe_reraise():
manager.dispatch.init_failure(self, args, kwargs)
def get_history(self, key, passive):
return self.manager[key].impl.get_history(self, self.dict, passive)
def get_impl(self, key):
return self.manager[key].impl
def _get_pending_mutation(self, key):
if key not in self._pending_mutations:
self._pending_mutations[key] = PendingCollection()
return self._pending_mutations[key]
def __getstate__(self):
state_dict = {"instance": self.obj()}
state_dict.update(
(k, self.__dict__[k])
for k in (
"committed_state",
"_pending_mutations",
"modified",
"expired",
"callables",
"key",
"parents",
"load_options",
"class_",
"expired_attributes",
"info",
)
if k in self.__dict__
)
if self.load_path:
state_dict["load_path"] = self.load_path.serialize()
state_dict["manager"] = self.manager._serialize(self, state_dict)
return state_dict
def __setstate__(self, state_dict):
inst = state_dict["instance"]
if inst is not None:
self.obj = weakref.ref(inst, self._cleanup)
self.class_ = inst.__class__
else:
# None being possible here generally new as of 0.7.4
# due to storage of state in "parents". "class_"
# also new.
self.obj = None
self.class_ = state_dict["class_"]
self.committed_state = state_dict.get("committed_state", {})
self._pending_mutations = state_dict.get("_pending_mutations", {})
self.parents = state_dict.get("parents", {})
self.modified = state_dict.get("modified", False)
self.expired = state_dict.get("expired", False)
if "info" in state_dict:
self.info.update(state_dict["info"])
if "callables" in state_dict:
self.callables = state_dict["callables"]
try:
self.expired_attributes = state_dict["expired_attributes"]
except KeyError:
self.expired_attributes = set()
# 0.9 and earlier compat
for k in list(self.callables):
if self.callables[k] is self:
self.expired_attributes.add(k)
del self.callables[k]
else:
if "expired_attributes" in state_dict:
self.expired_attributes = state_dict["expired_attributes"]
else:
self.expired_attributes = set()
self.__dict__.update(
[
(k, state_dict[k])
for k in ("key", "load_options")
if k in state_dict
]
)
if self.key:
try:
self.identity_token = self.key[2]
except IndexError:
# 1.1 and earlier compat before identity_token
assert len(self.key) == 2
self.key = self.key + (None,)
self.identity_token = None
if "load_path" in state_dict:
self.load_path = PathRegistry.deserialize(state_dict["load_path"])
state_dict["manager"](self, inst, state_dict)
def _reset(self, dict_, key):
"""Remove the given attribute and any
callables associated with it."""
old = dict_.pop(key, None)
if old is not None and self.manager[key].impl.collection:
self.manager[key].impl._invalidate_collection(old)
self.expired_attributes.discard(key)
if self.callables:
self.callables.pop(key, None)
def _copy_callables(self, from_):
if "callables" in from_.__dict__:
self.callables = dict(from_.callables)
@classmethod
def _instance_level_callable_processor(cls, manager, fn, key):
impl = manager[key].impl
if impl.collection:
def _set_callable(state, dict_, row):
if "callables" not in state.__dict__:
state.callables = {}
old = dict_.pop(key, None)
if old is not None:
impl._invalidate_collection(old)
state.callables[key] = fn
else:
def _set_callable(state, dict_, row):
if "callables" not in state.__dict__:
state.callables = {}
state.callables[key] = fn
return _set_callable
def _expire(self, dict_, modified_set):
self.expired = True
if self.modified:
modified_set.discard(self)
self.committed_state.clear()
self.modified = False
self._strong_obj = None
if "_pending_mutations" in self.__dict__:
del self.__dict__["_pending_mutations"]
if "parents" in self.__dict__:
del self.__dict__["parents"]
self.expired_attributes.update(
[
impl.key
for impl in self.manager._scalar_loader_impls
if impl.expire_missing or impl.key in dict_
]
)
if self.callables:
for k in self.expired_attributes.intersection(self.callables):
del self.callables[k]
for k in self.manager._collection_impl_keys.intersection(dict_):
collection = dict_.pop(k)
collection._sa_adapter.invalidated = True
if self._last_known_values:
self._last_known_values.update(
(k, dict_[k]) for k in self._last_known_values if k in dict_
)
for key in self.manager._all_key_set.intersection(dict_):
del dict_[key]
self.manager.dispatch.expire(self, None)
def _expire_attributes(self, dict_, attribute_names, no_loader=False):
pending = self.__dict__.get("_pending_mutations", None)
callables = self.callables
for key in attribute_names:
impl = self.manager[key].impl
if impl.accepts_scalar_loader:
if no_loader and (impl.callable_ or key in callables):
continue
self.expired_attributes.add(key)
if callables and key in callables:
del callables[key]
old = dict_.pop(key, NO_VALUE)
if impl.collection and old is not NO_VALUE:
impl._invalidate_collection(old)
if (
self._last_known_values
and key in self._last_known_values
and old is not NO_VALUE
):
self._last_known_values[key] = old
self.committed_state.pop(key, None)
if pending:
pending.pop(key, None)
self.manager.dispatch.expire(self, attribute_names)
def _load_expired(self, state, passive):
"""__call__ allows the InstanceState to act as a deferred
callable for loading expired attributes, which is also
serializable (picklable).
"""
if not passive & SQL_OK:
return PASSIVE_NO_RESULT
toload = self.expired_attributes.intersection(self.unmodified)
self.manager.deferred_scalar_loader(self, toload)
# if the loader failed, or this
# instance state didn't have an identity,
# the attributes still might be in the callables
# dict. ensure they are removed.
self.expired_attributes.clear()
return ATTR_WAS_SET
@property
def unmodified(self):
"""Return the set of keys which have no uncommitted changes"""
return set(self.manager).difference(self.committed_state)
def unmodified_intersection(self, keys):
"""Return self.unmodified.intersection(keys)."""
return (
set(keys)
.intersection(self.manager)
.difference(self.committed_state)
)
@property
def unloaded(self):
"""Return the set of keys which do not have a loaded value.
This includes expired attributes and any other attribute that
was never populated or modified.
"""
return (
set(self.manager)
.difference(self.committed_state)
.difference(self.dict)
)
@property
def unloaded_expirable(self):
"""Return the set of keys which do not have a loaded value.
This includes expired attributes and any other attribute that
was never populated or modified.
"""
return self.unloaded.intersection(
attr
for attr in self.manager
if self.manager[attr].impl.expire_missing
)
@property
def _unloaded_non_object(self):
return self.unloaded.intersection(
attr
for attr in self.manager
if self.manager[attr].impl.accepts_scalar_loader
)
def _instance_dict(self):
return None
def _modified_event(
self, dict_, attr, previous, collection=False, is_userland=False
):
if attr:
if not attr.send_modified_events:
return
if is_userland and attr.key not in dict_:
raise sa_exc.InvalidRequestError(
"Can't flag attribute '%s' modified; it's not present in "
"the object state" % attr.key
)
if attr.key not in self.committed_state or is_userland:
if collection:
if previous is NEVER_SET:
if attr.key in dict_:
previous = dict_[attr.key]
if previous not in (None, NO_VALUE, NEVER_SET):
previous = attr.copy(previous)
self.committed_state[attr.key] = previous
if attr.key in self._last_known_values:
self._last_known_values[attr.key] = NO_VALUE
# assert self._strong_obj is None or self.modified
if (self.session_id and self._strong_obj is None) or not self.modified:
self.modified = True
instance_dict = self._instance_dict()
if instance_dict:
instance_dict._modified.add(self)
# only create _strong_obj link if attached
# to a session
inst = self.obj()
if self.session_id:
self._strong_obj = inst
if inst is None and attr:
raise orm_exc.ObjectDereferencedError(
"Can't emit change event for attribute '%s' - "
"parent object of type %s has been garbage "
"collected."
% (self.manager[attr.key], base.state_class_str(self))
)
def _commit(self, dict_, keys):
"""Commit attributes.
This is used by a partial-attribute load operation to mark committed
those attributes which were refreshed from the database.
Attributes marked as "expired" can potentially remain "expired" after
this step if a value was not populated in state.dict.
"""
for key in keys:
self.committed_state.pop(key, None)
self.expired = False
self.expired_attributes.difference_update(
set(keys).intersection(dict_)
)
# the per-keys commit removes object-level callables,
# while that of commit_all does not. it's not clear
# if this behavior has a clear rationale, however tests do
# ensure this is what it does.
if self.callables:
for key in (
set(self.callables).intersection(keys).intersection(dict_)
):
del self.callables[key]
def _commit_all(self, dict_, instance_dict=None):
"""commit all attributes unconditionally.
This is used after a flush() or a full load/refresh
to remove all pending state from the instance.
- all attributes are marked as "committed"
- the "strong dirty reference" is removed
- the "modified" flag is set to False
- any "expired" markers for scalar attributes loaded are removed.
- lazy load callables for objects / collections *stay*
Attributes marked as "expired" can potentially remain
"expired" after this step if a value was not populated in state.dict.
"""
self._commit_all_states([(self, dict_)], instance_dict)
@classmethod
def _commit_all_states(self, iter_, instance_dict=None):
"""Mass / highly inlined version of commit_all()."""
for state, dict_ in iter_:
state_dict = state.__dict__
state.committed_state.clear()
if "_pending_mutations" in state_dict:
del state_dict["_pending_mutations"]
state.expired_attributes.difference_update(dict_)
if instance_dict and state.modified:
instance_dict._modified.discard(state)
state.modified = state.expired = False
state._strong_obj = None
class AttributeState(object):
"""Provide an inspection interface corresponding
to a particular attribute on a particular mapped object.
The :class:`.AttributeState` object is accessed
via the :attr:`.InstanceState.attrs` collection
of a particular :class:`.InstanceState`::
from sqlalchemy import inspect
insp = inspect(some_mapped_object)
attr_state = insp.attrs.some_attribute
"""
def __init__(self, state, key):
self.state = state
self.key = key
@property
def loaded_value(self):
"""The current value of this attribute as loaded from the database.
If the value has not been loaded, or is otherwise not present
in the object's dictionary, returns NO_VALUE.
"""
return self.state.dict.get(self.key, NO_VALUE)
@property
def value(self):
"""Return the value of this attribute.
This operation is equivalent to accessing the object's
attribute directly or via ``getattr()``, and will fire
off any pending loader callables if needed.
"""
return self.state.manager[self.key].__get__(
self.state.obj(), self.state.class_
)
@property
def history(self):
"""Return the current **pre-flush** change history for
this attribute, via the :class:`.History` interface.
This method will **not** emit loader callables if the value of the
attribute is unloaded.
.. note::
The attribute history system tracks changes on a **per flush
basis**. Each time the :class:`.Session` is flushed, the history
of each attribute is reset to empty. The :class:`.Session` by
default autoflushes each time a :class:`_query.Query` is invoked.
For
options on how to control this, see :ref:`session_flushing`.
.. seealso::
:meth:`.AttributeState.load_history` - retrieve history
using loader callables if the value is not locally present.
:func:`.attributes.get_history` - underlying function
"""
return self.state.get_history(self.key, PASSIVE_NO_INITIALIZE)
def load_history(self):
"""Return the current **pre-flush** change history for
this attribute, via the :class:`.History` interface.
This method **will** emit loader callables if the value of the
attribute is unloaded.
.. note::
The attribute history system tracks changes on a **per flush
basis**. Each time the :class:`.Session` is flushed, the history
of each attribute is reset to empty. The :class:`.Session` by
default autoflushes each time a :class:`_query.Query` is invoked.
For
options on how to control this, see :ref:`session_flushing`.
.. seealso::
:attr:`.AttributeState.history`
:func:`.attributes.get_history` - underlying function
.. versionadded:: 0.9.0
"""
return self.state.get_history(self.key, PASSIVE_OFF ^ INIT_OK)
class PendingCollection(object):
"""A writable placeholder for an unloaded collection.
Stores items appended to and removed from a collection that has not yet
been loaded. When the collection is loaded, the changes stored in
PendingCollection are applied to it to produce the final result.
"""
def __init__(self):
self.deleted_items = util.IdentitySet()
self.added_items = util.OrderedIdentitySet()
def append(self, value):
if value in self.deleted_items:
self.deleted_items.remove(value)
else:
self.added_items.add(value)
def remove(self, value):
if value in self.added_items:
self.added_items.remove(value)
else:
self.deleted_items.add(value)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/engine/interfaces.py
|
# engine/interfaces.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Define core interfaces used by the engine system."""
from .. import util
from ..sql.compiler import Compiled # noqa
from ..sql.compiler import TypeCompiler # noqa
class Dialect(object):
"""Define the behavior of a specific database and DB-API combination.
Any aspect of metadata definition, SQL query generation,
execution, result-set handling, or anything else which varies
between databases is defined under the general category of the
Dialect. The Dialect acts as a factory for other
database-specific object implementations including
ExecutionContext, Compiled, DefaultGenerator, and TypeEngine.
.. note:: Third party dialects should not subclass :class:`.Dialect`
directly. Instead, subclass :class:`.default.DefaultDialect` or
descendant class.
All dialects include the following attributes. There are many other
attributes that may be supported as well:
``name``
identifying name for the dialect from a DBAPI-neutral point of view
(i.e. 'sqlite')
``driver``
identifying name for the dialect's DBAPI
``positional``
True if the paramstyle for this Dialect is positional.
``paramstyle``
the paramstyle to be used (some DB-APIs support multiple
paramstyles).
``encoding``
type of encoding to use for unicode, usually defaults to
'utf-8'.
``statement_compiler``
a :class:`.Compiled` class used to compile SQL statements
``ddl_compiler``
a :class:`.Compiled` class used to compile DDL statements
``server_version_info``
a tuple containing a version number for the DB backend in use.
This value is only available for supporting dialects, and is
typically populated during the initial connection to the database.
``default_schema_name``
the name of the default schema. This value is only available for
supporting dialects, and is typically populated during the
initial connection to the database.
``execution_ctx_cls``
a :class:`.ExecutionContext` class used to handle statement execution
``execute_sequence_format``
either the 'tuple' or 'list' type, depending on what cursor.execute()
accepts for the second argument (they vary).
``preparer``
a :class:`~sqlalchemy.sql.compiler.IdentifierPreparer` class used to
quote identifiers.
``supports_alter``
``True`` if the database supports ``ALTER TABLE`` - used only for
generating foreign key constraints in certain circumstances
``max_identifier_length``
The maximum length of identifier names.
``supports_sane_rowcount``
Indicate whether the dialect properly implements rowcount for
``UPDATE`` and ``DELETE`` statements.
``supports_sane_multi_rowcount``
Indicate whether the dialect properly implements rowcount for
``UPDATE`` and ``DELETE`` statements when executed via
executemany.
``preexecute_autoincrement_sequences``
True if 'implicit' primary key functions must be executed separately
in order to get their value. This is currently oriented towards
PostgreSQL.
``implicit_returning``
use RETURNING or equivalent during INSERT execution in order to load
newly generated primary keys and other column defaults in one execution,
which are then available via inserted_primary_key.
If an insert statement has returning() specified explicitly,
the "implicit" functionality is not used and inserted_primary_key
will not be available.
``colspecs``
A dictionary of TypeEngine classes from sqlalchemy.types mapped
to subclasses that are specific to the dialect class. This
dictionary is class-level only and is not accessed from the
dialect instance itself.
``supports_default_values``
Indicates if the construct ``INSERT INTO tablename DEFAULT
VALUES`` is supported
``supports_sequences``
Indicates if the dialect supports CREATE SEQUENCE or similar.
``sequences_optional``
If True, indicates if the "optional" flag on the Sequence() construct
should signal to not generate a CREATE SEQUENCE. Applies only to
dialects that support sequences. Currently used only to allow PostgreSQL
SERIAL to be used on a column that specifies Sequence() for usage on
other backends.
``supports_native_enum``
Indicates if the dialect supports a native ENUM construct.
This will prevent types.Enum from generating a CHECK
constraint when that type is used.
``supports_native_boolean``
Indicates if the dialect supports a native boolean construct.
This will prevent types.Boolean from generating a CHECK
constraint when that type is used.
``dbapi_exception_translation_map``
A dictionary of names that will contain as values the names of
pep-249 exceptions ("IntegrityError", "OperationalError", etc)
keyed to alternate class names, to support the case where a
DBAPI has exception classes that aren't named as they are
referred to (e.g. IntegrityError = MyException). In the vast
majority of cases this dictionary is empty.
.. versionadded:: 1.0.5
"""
_has_events = False
def create_connect_args(self, url):
"""Build DB-API compatible connection arguments.
Given a :class:`.URL` object, returns a tuple
consisting of a ``(*args, **kwargs)`` suitable to send directly
to the dbapi's connect function. The arguments are sent to the
:meth:`.Dialect.connect` method which then runs the DBAPI-level
``connect()`` function.
The method typically makes use of the
:meth:`.URL.translate_connect_args`
method in order to generate a dictionary of options.
The default implementation is::
def create_connect_args(self, url):
opts = url.translate_connect_args()
opts.update(url.query)
return [[], opts]
:param url: a :class:`.URL` object
:return: a tuple of ``(*args, **kwargs)`` which will be passed to the
:meth:`.Dialect.connect` method.
.. seealso::
:meth:`.URL.translate_connect_args`
"""
raise NotImplementedError()
@classmethod
def type_descriptor(cls, typeobj):
"""Transform a generic type to a dialect-specific type.
Dialect classes will usually use the
:func:`_types.adapt_type` function in the types module to
accomplish this.
The returned result is cached *per dialect class* so can
contain no dialect-instance state.
"""
raise NotImplementedError()
def initialize(self, connection):
"""Called during strategized creation of the dialect with a
connection.
Allows dialects to configure options based on server version info or
other properties.
The connection passed here is a SQLAlchemy Connection object,
with full capabilities.
The initialize() method of the base dialect should be called via
super().
"""
pass
def reflecttable(
self, connection, table, include_columns, exclude_columns, resolve_fks
):
"""Load table description from the database.
Given a :class:`_engine.Connection` and a
:class:`~sqlalchemy.schema.Table` object, reflect its columns and
properties from the database.
The implementation of this method is provided by
:meth:`.DefaultDialect.reflecttable`, which makes use of
:class:`_reflection.Inspector` to retrieve column information.
Dialects should **not** seek to implement this method, and should
instead implement individual schema inspection operations such as
:meth:`.Dialect.get_columns`, :meth:`.Dialect.get_pk_constraint`,
etc.
"""
raise NotImplementedError()
def get_columns(self, connection, table_name, schema=None, **kw):
"""Return information about columns in `table_name`.
Given a :class:`_engine.Connection`, a string
`table_name`, and an optional string `schema`, return column
information as a list of dictionaries with these keys:
name
the column's name
type
[sqlalchemy.types#TypeEngine]
nullable
boolean
default
the column's default value
autoincrement
boolean
sequence
a dictionary of the form
{'name' : str, 'start' :int, 'increment': int, 'minvalue': int,
'maxvalue': int, 'nominvalue': bool, 'nomaxvalue': bool,
'cycle': bool, 'cache': int, 'order': bool}
Additional column attributes may be present.
"""
raise NotImplementedError()
@util.deprecated(
"0.8",
"The :meth:`.Dialect.get_primary_keys` method is deprecated and "
"will be removed in a future release. Please refer to the "
":meth:`.Dialect.get_pk_constraint` method. ",
)
def get_primary_keys(self, connection, table_name, schema=None, **kw):
"""Return information about primary keys in `table_name`.
"""
raise NotImplementedError()
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
"""Return information about the primary key constraint on
table_name`.
Given a :class:`_engine.Connection`, a string
`table_name`, and an optional string `schema`, return primary
key information as a dictionary with these keys:
constrained_columns
a list of column names that make up the primary key
name
optional name of the primary key constraint.
"""
raise NotImplementedError()
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""Return information about foreign_keys in `table_name`.
Given a :class:`_engine.Connection`, a string
`table_name`, and an optional string `schema`, return foreign
key information as a list of dicts with these keys:
name
the constraint's name
constrained_columns
a list of column names that make up the foreign key
referred_schema
the name of the referred schema
referred_table
the name of the referred table
referred_columns
a list of column names in the referred table that correspond to
constrained_columns
"""
raise NotImplementedError()
def get_table_names(self, connection, schema=None, **kw):
"""Return a list of table names for `schema`."""
raise NotImplementedError()
def get_temp_table_names(self, connection, schema=None, **kw):
"""Return a list of temporary table names on the given connection,
if supported by the underlying backend.
"""
raise NotImplementedError()
def get_view_names(self, connection, schema=None, **kw):
"""Return a list of all view names available in the database.
schema:
Optional, retrieve names from a non-default schema.
"""
raise NotImplementedError()
def get_temp_view_names(self, connection, schema=None, **kw):
"""Return a list of temporary view names on the given connection,
if supported by the underlying backend.
"""
raise NotImplementedError()
def get_view_definition(self, connection, view_name, schema=None, **kw):
"""Return view definition.
Given a :class:`_engine.Connection`, a string
`view_name`, and an optional string `schema`, return the view
definition.
"""
raise NotImplementedError()
def get_indexes(self, connection, table_name, schema=None, **kw):
"""Return information about indexes in `table_name`.
Given a :class:`_engine.Connection`, a string
`table_name` and an optional string `schema`, return index
information as a list of dictionaries with these keys:
name
the index's name
column_names
list of column names in order
unique
boolean
"""
raise NotImplementedError()
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
r"""Return information about unique constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
unique constraint information as a list of dicts with these keys:
name
the unique constraint's name
column_names
list of column names in order
\**kw
other options passed to the dialect's get_unique_constraints()
method.
.. versionadded:: 0.9.0
"""
raise NotImplementedError()
def get_check_constraints(self, connection, table_name, schema=None, **kw):
r"""Return information about check constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
check constraint information as a list of dicts with these keys:
name
the check constraint's name
sqltext
the check constraint's SQL expression
\**kw
other options passed to the dialect's get_check_constraints()
method.
.. versionadded:: 1.1.0
"""
raise NotImplementedError()
def get_table_comment(self, connection, table_name, schema=None, **kw):
r"""Return the "comment" for the table identified by `table_name`.
Given a string `table_name` and an optional string `schema`, return
table comment information as a dictionary with this key:
text
text of the comment
Raises ``NotImplementedError`` for dialects that don't support
comments.
.. versionadded:: 1.2
"""
raise NotImplementedError()
def normalize_name(self, name):
"""convert the given name to lowercase if it is detected as
case insensitive.
this method is only used if the dialect defines
requires_name_normalize=True.
"""
raise NotImplementedError()
def denormalize_name(self, name):
"""convert the given name to a case insensitive identifier
for the backend if it is an all-lowercase name.
this method is only used if the dialect defines
requires_name_normalize=True.
"""
raise NotImplementedError()
def has_table(self, connection, table_name, schema=None):
"""Check the existence of a particular table in the database.
Given a :class:`_engine.Connection` object and a string
`table_name`, return True if the given table (possibly within
the specified `schema`) exists in the database, False
otherwise.
"""
raise NotImplementedError()
def has_sequence(self, connection, sequence_name, schema=None):
"""Check the existence of a particular sequence in the database.
Given a :class:`_engine.Connection` object and a string
`sequence_name`, return True if the given sequence exists in
the database, False otherwise.
"""
raise NotImplementedError()
def _get_server_version_info(self, connection):
"""Retrieve the server version info from the given connection.
This is used by the default implementation to populate the
"server_version_info" attribute and is called exactly
once upon first connect.
"""
raise NotImplementedError()
def _get_default_schema_name(self, connection):
"""Return the string name of the currently selected schema from
the given connection.
This is used by the default implementation to populate the
"default_schema_name" attribute and is called exactly
once upon first connect.
"""
raise NotImplementedError()
def do_begin(self, dbapi_connection):
"""Provide an implementation of ``connection.begin()``, given a
DB-API connection.
The DBAPI has no dedicated "begin" method and it is expected
that transactions are implicit. This hook is provided for those
DBAPIs that might need additional help in this area.
Note that :meth:`.Dialect.do_begin` is not called unless a
:class:`.Transaction` object is in use. The
:meth:`.Dialect.do_autocommit`
hook is provided for DBAPIs that need some extra commands emitted
after a commit in order to enter the next transaction, when the
SQLAlchemy :class:`_engine.Connection`
is used in its default "autocommit"
mode.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_rollback(self, dbapi_connection):
"""Provide an implementation of ``connection.rollback()``, given
a DB-API connection.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_commit(self, dbapi_connection):
"""Provide an implementation of ``connection.commit()``, given a
DB-API connection.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_close(self, dbapi_connection):
"""Provide an implementation of ``connection.close()``, given a DBAPI
connection.
This hook is called by the :class:`_pool.Pool`
when a connection has been
detached from the pool, or is being returned beyond the normal
capacity of the pool.
"""
raise NotImplementedError()
def create_xid(self):
"""Create a two-phase transaction ID.
This id will be passed to do_begin_twophase(),
do_rollback_twophase(), do_commit_twophase(). Its format is
unspecified.
"""
raise NotImplementedError()
def do_savepoint(self, connection, name):
"""Create a savepoint with the given name.
:param connection: a :class:`_engine.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_rollback_to_savepoint(self, connection, name):
"""Rollback a connection to the named savepoint.
:param connection: a :class:`_engine.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_release_savepoint(self, connection, name):
"""Release the named savepoint on a connection.
:param connection: a :class:`_engine.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_begin_twophase(self, connection, xid):
"""Begin a two phase transaction on the given connection.
:param connection: a :class:`_engine.Connection`.
:param xid: xid
"""
raise NotImplementedError()
def do_prepare_twophase(self, connection, xid):
"""Prepare a two phase transaction on the given connection.
:param connection: a :class:`_engine.Connection`.
:param xid: xid
"""
raise NotImplementedError()
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
"""Rollback a two phase transaction on the given connection.
:param connection: a :class:`_engine.Connection`.
:param xid: xid
:param is_prepared: whether or not
:meth:`.TwoPhaseTransaction.prepare` was called.
:param recover: if the recover flag was passed.
"""
raise NotImplementedError()
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
"""Commit a two phase transaction on the given connection.
:param connection: a :class:`_engine.Connection`.
:param xid: xid
:param is_prepared: whether or not
:meth:`.TwoPhaseTransaction.prepare` was called.
:param recover: if the recover flag was passed.
"""
raise NotImplementedError()
def do_recover_twophase(self, connection):
"""Recover list of uncommitted prepared two phase transaction
identifiers on the given connection.
:param connection: a :class:`_engine.Connection`.
"""
raise NotImplementedError()
def do_executemany(self, cursor, statement, parameters, context=None):
"""Provide an implementation of ``cursor.executemany(statement,
parameters)``."""
raise NotImplementedError()
def do_execute(self, cursor, statement, parameters, context=None):
"""Provide an implementation of ``cursor.execute(statement,
parameters)``."""
raise NotImplementedError()
def do_execute_no_params(
self, cursor, statement, parameters, context=None
):
"""Provide an implementation of ``cursor.execute(statement)``.
The parameter collection should not be sent.
"""
raise NotImplementedError()
def is_disconnect(self, e, connection, cursor):
"""Return True if the given DB-API error indicates an invalid
connection"""
raise NotImplementedError()
def connect(self, *cargs, **cparams):
r"""Establish a connection using this dialect's DBAPI.
The default implementation of this method is::
def connect(self, *cargs, **cparams):
return self.dbapi.connect(*cargs, **cparams)
The ``*cargs, **cparams`` parameters are generated directly
from this dialect's :meth:`.Dialect.create_connect_args` method.
This method may be used for dialects that need to perform programmatic
per-connection steps when a new connection is procured from the
DBAPI.
:param \*cargs: positional parameters returned from the
:meth:`.Dialect.create_connect_args` method
:param \*\*cparams: keyword parameters returned from the
:meth:`.Dialect.create_connect_args` method.
:return: a DBAPI connection, typically from the :pep:`249` module
level ``.connect()`` function.
.. seealso::
:meth:`.Dialect.create_connect_args`
:meth:`.Dialect.on_connect`
"""
def on_connect(self):
"""return a callable which sets up a newly created DBAPI connection.
The callable should accept a single argument "conn" which is the
DBAPI connection itself. The inner callable has no
return value.
E.g.::
class MyDialect(default.DefaultDialect):
# ...
def on_connect(self):
def do_on_connect(connection):
connection.execute("SET SPECIAL FLAGS etc")
return do_on_connect
This is used to set dialect-wide per-connection options such as
isolation modes, Unicode modes, etc.
The "do_on_connect" callable is invoked by using the
:meth:`_events.PoolEvents.first_connect` and
:meth:`_events.PoolEvents.connect` event
hooks, then unwrapping the DBAPI connection and passing it into the
callable. The reason it is invoked for both events is so that any
dialect-level initialization that occurs upon first connection, which
also makes use of the :meth:`_events.PoolEvents.first_connect` method,
will
proceed after this hook has been called. This currently means the
hook is in fact called twice for the very first connection in which a
dialect creates; and once per connection afterwards.
If None is returned, no event listener is generated.
:return: a callable that accepts a single DBAPI connection as an
argument, or None.
.. seealso::
:meth:`.Dialect.connect` - allows the DBAPI ``connect()`` sequence
itself to be controlled.
"""
return None
def reset_isolation_level(self, dbapi_conn):
"""Given a DBAPI connection, revert its isolation to the default.
Note that this is a dialect-level method which is used as part
of the implementation of the :class:`_engine.Connection` and
:class:`_engine.Engine`
isolation level facilities; these APIs should be preferred for
most typical use cases.
.. seealso::
:meth:`_engine.Connection.get_isolation_level`
- view current level
:attr:`_engine.Connection.default_isolation_level`
- view default level
:paramref:`.Connection.execution_options.isolation_level` -
set per :class:`_engine.Connection` isolation level
:paramref:`_sa.create_engine.isolation_level` -
set per :class:`_engine.Engine` isolation level
"""
raise NotImplementedError()
def set_isolation_level(self, dbapi_conn, level):
"""Given a DBAPI connection, set its isolation level.
Note that this is a dialect-level method which is used as part
of the implementation of the :class:`_engine.Connection` and
:class:`_engine.Engine`
isolation level facilities; these APIs should be preferred for
most typical use cases.
.. seealso::
:meth:`_engine.Connection.get_isolation_level`
- view current level
:attr:`_engine.Connection.default_isolation_level`
- view default level
:paramref:`.Connection.execution_options.isolation_level` -
set per :class:`_engine.Connection` isolation level
:paramref:`_sa.create_engine.isolation_level` -
set per :class:`_engine.Engine` isolation level
"""
raise NotImplementedError()
def get_isolation_level(self, dbapi_conn):
"""Given a DBAPI connection, return its isolation level.
When working with a :class:`_engine.Connection` object,
the corresponding
DBAPI connection may be procured using the
:attr:`_engine.Connection.connection` accessor.
Note that this is a dialect-level method which is used as part
of the implementation of the :class:`_engine.Connection` and
:class:`_engine.Engine` isolation level facilities;
these APIs should be preferred for most typical use cases.
.. seealso::
:meth:`_engine.Connection.get_isolation_level`
- view current level
:attr:`_engine.Connection.default_isolation_level`
- view default level
:paramref:`.Connection.execution_options.isolation_level` -
set per :class:`_engine.Connection` isolation level
:paramref:`_sa.create_engine.isolation_level` -
set per :class:`_engine.Engine` isolation level
"""
raise NotImplementedError()
@classmethod
def get_dialect_cls(cls, url):
"""Given a URL, return the :class:`.Dialect` that will be used.
This is a hook that allows an external plugin to provide functionality
around an existing dialect, by allowing the plugin to be loaded
from the url based on an entrypoint, and then the plugin returns
the actual dialect to be used.
By default this just returns the cls.
.. versionadded:: 1.0.3
"""
return cls
@classmethod
def load_provisioning(cls):
"""set up the provision.py module for this dialect.
For dialects that include a provision.py module that sets up
provisioning followers, this method should initiate that process.
A typical implementation would be::
@classmethod
def load_provisioning(cls):
__import__("mydialect.provision")
The default method assumes a module named ``provision.py`` inside
the owning package of the current dialect, based on the ``__module__``
attribute::
@classmethod
def load_provisioning(cls):
package = ".".join(cls.__module__.split(".")[0:-1])
try:
__import__(package + ".provision")
except ImportError:
pass
.. versionadded:: 1.3.14
"""
@classmethod
def engine_created(cls, engine):
"""A convenience hook called before returning the final
:class:`_engine.Engine`.
If the dialect returned a different class from the
:meth:`.get_dialect_cls`
method, then the hook is called on both classes, first on
the dialect class returned by the :meth:`.get_dialect_cls` method and
then on the class on which the method was called.
The hook should be used by dialects and/or wrappers to apply special
events to the engine or its components. In particular, it allows
a dialect-wrapping class to apply dialect-level events.
.. versionadded:: 1.0.3
"""
pass
class CreateEnginePlugin(object):
"""A set of hooks intended to augment the construction of an
:class:`_engine.Engine` object based on entrypoint names in a URL.
The purpose of :class:`.CreateEnginePlugin` is to allow third-party
systems to apply engine, pool and dialect level event listeners without
the need for the target application to be modified; instead, the plugin
names can be added to the database URL. Target applications for
:class:`.CreateEnginePlugin` include:
* connection and SQL performance tools, e.g. which use events to track
number of checkouts and/or time spent with statements
* connectivity plugins such as proxies
Plugins are registered using entry points in a similar way as that
of dialects::
entry_points={
'sqlalchemy.plugins': [
'myplugin = myapp.plugins:MyPlugin'
]
A plugin that uses the above names would be invoked from a database
URL as in::
from sqlalchemy import create_engine
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?plugin=myplugin")
Alternatively, the :paramref:`.create_engine.plugins" argument may be
passed as a list to :func:`_sa.create_engine`::
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/test",
plugins=["myplugin"])
.. versionadded:: 1.2.3 plugin names can also be specified
to :func:`_sa.create_engine` as a list
The ``plugin`` argument supports multiple instances, so that a URL
may specify multiple plugins; they are loaded in the order stated
in the URL::
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/"
"test?plugin=plugin_one&plugin=plugin_twp&plugin=plugin_three")
A plugin can receive additional arguments from the URL string as
well as from the keyword arguments passed to :func:`_sa.create_engine`.
The :class:`.URL` object and the keyword dictionary are passed to the
constructor so that these arguments can be extracted from the url's
:attr:`.URL.query` collection as well as from the dictionary::
class MyPlugin(CreateEnginePlugin):
def __init__(self, url, kwargs):
self.my_argument_one = url.query.pop('my_argument_one')
self.my_argument_two = url.query.pop('my_argument_two')
self.my_argument_three = kwargs.pop('my_argument_three', None)
Arguments like those illustrated above would be consumed from the
following::
from sqlalchemy import create_engine
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/"
"test?plugin=myplugin&my_argument_one=foo&my_argument_two=bar",
my_argument_three='bat')
The URL and dictionary are used for subsequent setup of the engine
as they are, so the plugin can modify their arguments in-place.
Arguments that are only understood by the plugin should be popped
or otherwise removed so that they aren't interpreted as erroneous
arguments afterwards.
When the engine creation process completes and produces the
:class:`_engine.Engine` object, it is again passed to the plugin via the
:meth:`.CreateEnginePlugin.engine_created` hook. In this hook, additional
changes can be made to the engine, most typically involving setup of
events (e.g. those defined in :ref:`core_event_toplevel`).
.. versionadded:: 1.1
"""
def __init__(self, url, kwargs):
"""Construct a new :class:`.CreateEnginePlugin`.
The plugin object is instantiated individually for each call
to :func:`_sa.create_engine`. A single :class:`_engine.
Engine` will be
passed to the :meth:`.CreateEnginePlugin.engine_created` method
corresponding to this URL.
:param url: the :class:`.URL` object. The plugin should inspect
what it needs here as well as remove its custom arguments from the
:attr:`.URL.query` collection. The URL can be modified in-place
in any other way as well.
:param kwargs: The keyword arguments passed to :func`.create_engine`.
The plugin can read and modify this dictionary in-place, to affect
the ultimate arguments used to create the engine. It should
remove its custom arguments from the dictionary as well.
"""
self.url = url
def handle_dialect_kwargs(self, dialect_cls, dialect_args):
"""parse and modify dialect kwargs"""
def handle_pool_kwargs(self, pool_cls, pool_args):
"""parse and modify pool kwargs"""
def engine_created(self, engine):
"""Receive the :class:`_engine.Engine`
object when it is fully constructed.
The plugin may make additional changes to the engine, such as
registering engine or connection pool events.
"""
class ExecutionContext(object):
"""A messenger object for a Dialect that corresponds to a single
execution.
ExecutionContext should have these data members:
connection
Connection object which can be freely used by default value
generators to execute SQL. This Connection should reference the
same underlying connection/transactional resources of
root_connection.
root_connection
Connection object which is the source of this ExecutionContext. This
Connection may have close_with_result=True set, in which case it can
only be used once.
dialect
dialect which created this ExecutionContext.
cursor
DB-API cursor procured from the connection,
compiled
if passed to constructor, sqlalchemy.engine.base.Compiled object
being executed,
statement
string version of the statement to be executed. Is either
passed to the constructor, or must be created from the
sql.Compiled object by the time pre_exec() has completed.
parameters
bind parameters passed to the execute() method. For compiled
statements, this is a dictionary or list of dictionaries. For
textual statements, it should be in a format suitable for the
dialect's paramstyle (i.e. dict or list of dicts for non
positional, list or list of lists/tuples for positional).
isinsert
True if the statement is an INSERT.
isupdate
True if the statement is an UPDATE.
should_autocommit
True if the statement is a "committable" statement.
prefetch_cols
a list of Column objects for which a client-side default
was fired off. Applies to inserts and updates.
postfetch_cols
a list of Column objects for which a server-side default or
inline SQL expression value was fired off. Applies to inserts
and updates.
"""
exception = None
"""A DBAPI-level exception that was caught when this ExecutionContext
attempted to execute a statement.
This attribute is meaningful only within the
:meth:`_events.ConnectionEvents.dbapi_error` event.
.. versionadded:: 0.9.7
.. seealso::
:attr:`.ExecutionContext.is_disconnect`
:meth:`_events.ConnectionEvents.dbapi_error`
"""
is_disconnect = None
"""Boolean flag set to True or False when a DBAPI-level exception
is caught when this ExecutionContext attempted to execute a statement.
This attribute is meaningful only within the
:meth:`_events.ConnectionEvents.dbapi_error` event.
.. versionadded:: 0.9.7
.. seealso::
:attr:`.ExecutionContext.exception`
:meth:`_events.ConnectionEvents.dbapi_error`
"""
def create_cursor(self):
"""Return a new cursor generated from this ExecutionContext's
connection.
Some dialects may wish to change the behavior of
connection.cursor(), such as postgresql which may return a PG
"server side" cursor.
"""
raise NotImplementedError()
def pre_exec(self):
"""Called before an execution of a compiled statement.
If a compiled statement was passed to this ExecutionContext,
the `statement` and `parameters` datamembers must be
initialized after this statement is complete.
"""
raise NotImplementedError()
def post_exec(self):
"""Called after the execution of a compiled statement.
If a compiled statement was passed to this ExecutionContext,
the `last_insert_ids`, `last_inserted_params`, etc.
datamembers should be available after this method completes.
"""
raise NotImplementedError()
def result(self):
"""Return a result object corresponding to this ExecutionContext.
Returns a ResultProxy.
"""
raise NotImplementedError()
def handle_dbapi_exception(self, e):
"""Receive a DBAPI exception which occurred upon execute, result
fetch, etc."""
raise NotImplementedError()
def should_autocommit_text(self, statement):
"""Parse the given textual statement and return True if it refers to
a "committable" statement"""
raise NotImplementedError()
def lastrow_has_defaults(self):
"""Return True if the last INSERT or UPDATE row contained
inlined or database-side defaults.
"""
raise NotImplementedError()
def get_rowcount(self):
"""Return the DBAPI ``cursor.rowcount`` value, or in some
cases an interpreted value.
See :attr:`_engine.ResultProxy.rowcount` for details on this.
"""
raise NotImplementedError()
class Connectable(object):
"""Interface for an object which supports execution of SQL constructs.
The two implementations of :class:`.Connectable` are
:class:`_engine.Connection` and :class:`_engine.Engine`.
Connectable must also implement the 'dialect' member which references a
:class:`.Dialect` instance.
"""
def connect(self, **kwargs):
"""Return a :class:`_engine.Connection` object.
Depending on context, this may be ``self`` if this object
is already an instance of :class:`_engine.Connection`, or a newly
procured :class:`_engine.Connection` if this object is an instance
of :class:`_engine.Engine`.
"""
engine = None
"""The :class:`_engine.Engine` instance referred to by this
:class:`.Connectable`.
May be ``self`` if this is already an :class:`_engine.Engine`.
"""
@util.deprecated(
"1.3",
"The :meth:`_engine.Engine.contextual_connect` and "
":meth:`_engine.Connection.contextual_connect` methods are "
"deprecated. This "
"method is an artifact of the threadlocal engine strategy which is "
"also to be deprecated. For explicit connections from an "
":class:`_engine.Engine`, use the :meth:`_engine.Engine.connect` "
"method.",
)
def contextual_connect(self, *arg, **kw):
"""Return a :class:`_engine.Connection`
object which may be part of an ongoing
context.
Depending on context, this may be ``self`` if this object
is already an instance of :class:`_engine.Connection`, or a newly
procured :class:`_engine.Connection` if this object is an instance
of :class:`_engine.Engine`.
"""
return self._contextual_connect(*arg, **kw)
def _contextual_connect(self):
raise NotImplementedError()
@util.deprecated(
"0.7",
"The :meth:`.Connectable.create` method is deprecated and will be "
"removed in a future release. Please use the ``.create()`` method "
"on specific schema objects to emit DDL sequences, including "
":meth:`_schema.Table.create`, :meth:`.Index.create`, and "
":meth:`_schema.MetaData.create_all`.",
)
def create(self, entity, **kwargs):
"""Emit CREATE statements for the given schema entity.
"""
raise NotImplementedError()
@util.deprecated(
"0.7",
"The :meth:`.Connectable.drop` method is deprecated and will be "
"removed in a future release. Please use the ``.drop()`` method "
"on specific schema objects to emit DDL sequences, including "
":meth:`_schema.Table.drop`, :meth:`.Index.drop`, and "
":meth:`_schema.MetaData.drop_all`.",
)
def drop(self, entity, **kwargs):
"""Emit DROP statements for the given schema entity.
"""
raise NotImplementedError()
def execute(self, object_, *multiparams, **params):
"""Executes the given construct and returns a """
""":class:`_engine.ResultProxy`."""
raise NotImplementedError()
def scalar(self, object_, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying cursor is closed after execution.
"""
raise NotImplementedError()
def _run_visitor(self, visitorcallable, element, **kwargs):
raise NotImplementedError()
def _execute_clauseelement(self, elem, multiparams=None, params=None):
raise NotImplementedError()
class ExceptionContext(object):
"""Encapsulate information about an error condition in progress.
This object exists solely to be passed to the
:meth:`_events.ConnectionEvents.handle_error` event,
supporting an interface that
can be extended without backwards-incompatibility.
.. versionadded:: 0.9.7
"""
connection = None
"""The :class:`_engine.Connection` in use during the exception.
This member is present, except in the case of a failure when
first connecting.
.. seealso::
:attr:`.ExceptionContext.engine`
"""
engine = None
"""The :class:`_engine.Engine` in use during the exception.
This member should always be present, even in the case of a failure
when first connecting.
.. versionadded:: 1.0.0
"""
cursor = None
"""The DBAPI cursor object.
May be None.
"""
statement = None
"""String SQL statement that was emitted directly to the DBAPI.
May be None.
"""
parameters = None
"""Parameter collection that was emitted directly to the DBAPI.
May be None.
"""
original_exception = None
"""The exception object which was caught.
This member is always present.
"""
sqlalchemy_exception = None
"""The :class:`sqlalchemy.exc.StatementError` which wraps the original,
and will be raised if exception handling is not circumvented by the event.
May be None, as not all exception types are wrapped by SQLAlchemy.
For DBAPI-level exceptions that subclass the dbapi's Error class, this
field will always be present.
"""
chained_exception = None
"""The exception that was returned by the previous handler in the
exception chain, if any.
If present, this exception will be the one ultimately raised by
SQLAlchemy unless a subsequent handler replaces it.
May be None.
"""
execution_context = None
"""The :class:`.ExecutionContext` corresponding to the execution
operation in progress.
This is present for statement execution operations, but not for
operations such as transaction begin/end. It also is not present when
the exception was raised before the :class:`.ExecutionContext`
could be constructed.
Note that the :attr:`.ExceptionContext.statement` and
:attr:`.ExceptionContext.parameters` members may represent a
different value than that of the :class:`.ExecutionContext`,
potentially in the case where a
:meth:`_events.ConnectionEvents.before_cursor_execute` event or similar
modified the statement/parameters to be sent.
May be None.
"""
is_disconnect = None
"""Represent whether the exception as occurred represents a "disconnect"
condition.
This flag will always be True or False within the scope of the
:meth:`_events.ConnectionEvents.handle_error` handler.
SQLAlchemy will defer to this flag in order to determine whether or not
the connection should be invalidated subsequently. That is, by
assigning to this flag, a "disconnect" event which then results in
a connection and pool invalidation can be invoked or prevented by
changing this flag.
"""
invalidate_pool_on_disconnect = True
"""Represent whether all connections in the pool should be invalidated
when a "disconnect" condition is in effect.
Setting this flag to False within the scope of the
:meth:`_events.ConnectionEvents.handle_error`
event will have the effect such
that the full collection of connections in the pool will not be
invalidated during a disconnect; only the current connection that is the
subject of the error will actually be invalidated.
The purpose of this flag is for custom disconnect-handling schemes where
the invalidation of other connections in the pool is to be performed
based on other conditions, or even on a per-connection basis.
.. versionadded:: 1.0.3
"""
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/engine/threadlocal.py
|
# engine/threadlocal.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides a thread-local transactional wrapper around the root Engine class.
The ``threadlocal`` module is invoked when using the
``strategy="threadlocal"`` flag with :func:`~sqlalchemy.engine.create_engine`.
This module is semi-private and is invoked automatically when the threadlocal
engine strategy is used.
"""
import weakref
from . import base
from .. import util
class TLConnection(base.Connection):
def __init__(self, *arg, **kw):
super(TLConnection, self).__init__(*arg, **kw)
self.__opencount = 0
def _increment_connect(self):
self.__opencount += 1
return self
def close(self):
if self.__opencount == 1:
base.Connection.close(self)
self.__opencount -= 1
def _force_close(self):
self.__opencount = 0
base.Connection.close(self)
class TLEngine(base.Engine):
"""An Engine that includes support for thread-local managed
transactions.
"""
_tl_connection_cls = TLConnection
@util.deprecated(
"1.3",
"The 'threadlocal' engine strategy is deprecated, and will be "
"removed in a future release. The strategy is no longer relevant "
"to modern usage patterns (including that of the ORM "
":class:`.Session` object) which make use of a "
":class:`_engine.Connection` "
"object in order to invoke statements.",
)
def __init__(self, *args, **kwargs):
super(TLEngine, self).__init__(*args, **kwargs)
self._connections = util.threading.local()
def contextual_connect(self, **kw):
return self._contextual_connect(**kw)
def _contextual_connect(self, **kw):
if not hasattr(self._connections, "conn"):
connection = None
else:
connection = self._connections.conn()
if connection is None or connection.closed:
# guards against pool-level reapers, if desired.
# or not connection.connection.is_valid:
connection = self._tl_connection_cls(
self,
self._wrap_pool_connect(self.pool.connect, connection),
**kw
)
self._connections.conn = weakref.ref(connection)
return connection._increment_connect()
def begin_twophase(self, xid=None):
if not hasattr(self._connections, "trans"):
self._connections.trans = []
self._connections.trans.append(
self._contextual_connect().begin_twophase(xid=xid)
)
return self
def begin_nested(self):
if not hasattr(self._connections, "trans"):
self._connections.trans = []
self._connections.trans.append(
self._contextual_connect().begin_nested()
)
return self
def begin(self):
if not hasattr(self._connections, "trans"):
self._connections.trans = []
self._connections.trans.append(self._contextual_connect().begin())
return self
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
if type_ is None:
self.commit()
else:
self.rollback()
def prepare(self):
if (
not hasattr(self._connections, "trans")
or not self._connections.trans
):
return
self._connections.trans[-1].prepare()
def commit(self):
if (
not hasattr(self._connections, "trans")
or not self._connections.trans
):
return
trans = self._connections.trans.pop(-1)
trans.commit()
def rollback(self):
if (
not hasattr(self._connections, "trans")
or not self._connections.trans
):
return
trans = self._connections.trans.pop(-1)
trans.rollback()
def dispose(self):
self._connections = util.threading.local()
super(TLEngine, self).dispose()
@property
def closed(self):
return (
not hasattr(self._connections, "conn")
or self._connections.conn() is None
or self._connections.conn().closed
)
def close(self):
if not self.closed:
self._contextual_connect().close()
connection = self._connections.conn()
connection._force_close()
del self._connections.conn
self._connections.trans = []
def __repr__(self):
return "TLEngine(%r)" % self.url
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/engine/strategies.py
|
# engine/strategies.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Strategies for creating new instances of Engine types.
These are semi-private implementation classes which provide the
underlying behavior for the "strategy" keyword argument available on
:func:`~sqlalchemy.engine.create_engine`. Current available options are
``plain``, ``threadlocal``, and ``mock``.
New strategies can be added via new ``EngineStrategy`` classes.
"""
from operator import attrgetter
from . import base
from . import threadlocal
from . import url
from .. import event
from .. import pool as poollib
from .. import util
from ..sql import schema
strategies = {}
class EngineStrategy(object):
"""An adaptor that processes input arguments and produces an Engine.
Provides a ``create`` method that receives input arguments and
produces an instance of base.Engine or a subclass.
"""
def __init__(self):
strategies[self.name] = self
def create(self, *args, **kwargs):
"""Given arguments, returns a new Engine instance."""
raise NotImplementedError()
class DefaultEngineStrategy(EngineStrategy):
"""Base class for built-in strategies."""
def create(self, name_or_url, **kwargs):
# create url.URL object
u = url.make_url(name_or_url)
plugins = u._instantiate_plugins(kwargs)
u.query.pop("plugin", None)
kwargs.pop("plugins", None)
entrypoint = u._get_entrypoint()
dialect_cls = entrypoint.get_dialect_cls(u)
if kwargs.pop("_coerce_config", False):
def pop_kwarg(key, default=None):
value = kwargs.pop(key, default)
if key in dialect_cls.engine_config_types:
value = dialect_cls.engine_config_types[key](value)
return value
else:
pop_kwarg = kwargs.pop
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = pop_kwarg(k)
dbapi = kwargs.pop("module", None)
if dbapi is None:
dbapi_args = {}
for k in util.get_func_kwargs(dialect_cls.dbapi):
if k in kwargs:
dbapi_args[k] = pop_kwarg(k)
dbapi = dialect_cls.dbapi(**dbapi_args)
dialect_args["dbapi"] = dbapi
for plugin in plugins:
plugin.handle_dialect_kwargs(dialect_cls, dialect_args)
# create dialect
dialect = dialect_cls(**dialect_args)
# assemble connection arguments
(cargs, cparams) = dialect.create_connect_args(u)
cparams.update(pop_kwarg("connect_args", {}))
cargs = list(cargs) # allow mutability
# look for existing pool or create
pool = pop_kwarg("pool", None)
if pool is None:
def connect(connection_record=None):
if dialect._has_events:
for fn in dialect.dispatch.do_connect:
connection = fn(
dialect, connection_record, cargs, cparams
)
if connection is not None:
return connection
return dialect.connect(*cargs, **cparams)
creator = pop_kwarg("creator", connect)
poolclass = pop_kwarg("poolclass", None)
if poolclass is None:
poolclass = dialect_cls.get_pool_class(u)
pool_args = {"dialect": dialect}
# consume pool arguments from kwargs, translating a few of
# the arguments
translate = {
"logging_name": "pool_logging_name",
"echo": "echo_pool",
"timeout": "pool_timeout",
"recycle": "pool_recycle",
"events": "pool_events",
"use_threadlocal": "pool_threadlocal",
"reset_on_return": "pool_reset_on_return",
"pre_ping": "pool_pre_ping",
"use_lifo": "pool_use_lifo",
}
for k in util.get_cls_kwargs(poolclass):
tk = translate.get(k, k)
if tk in kwargs:
pool_args[k] = pop_kwarg(tk)
for plugin in plugins:
plugin.handle_pool_kwargs(poolclass, pool_args)
pool = poolclass(creator, **pool_args)
else:
if isinstance(pool, poollib.dbapi_proxy._DBProxy):
pool = pool.get_pool(*cargs, **cparams)
else:
pool = pool
pool._dialect = dialect
# create engine.
engineclass = self.engine_cls
engine_args = {}
for k in util.get_cls_kwargs(engineclass):
if k in kwargs:
engine_args[k] = pop_kwarg(k)
_initialize = kwargs.pop("_initialize", True)
# all kwargs should be consumed
if kwargs:
raise TypeError(
"Invalid argument(s) %s sent to create_engine(), "
"using configuration %s/%s/%s. Please check that the "
"keyword arguments are appropriate for this combination "
"of components."
% (
",".join("'%s'" % k for k in kwargs),
dialect.__class__.__name__,
pool.__class__.__name__,
engineclass.__name__,
)
)
engine = engineclass(pool, dialect, u, **engine_args)
if _initialize:
do_on_connect = dialect.on_connect()
if do_on_connect:
def on_connect(dbapi_connection, connection_record):
conn = getattr(
dbapi_connection, "_sqla_unwrap", dbapi_connection
)
if conn is None:
return
do_on_connect(conn)
event.listen(pool, "first_connect", on_connect)
event.listen(pool, "connect", on_connect)
def first_connect(dbapi_connection, connection_record):
c = base.Connection(
engine, connection=dbapi_connection, _has_events=False
)
c._execution_options = util.immutabledict()
dialect.initialize(c)
dialect.do_rollback(c.connection)
event.listen(
pool,
"first_connect",
first_connect,
_once_unless_exception=True,
)
dialect_cls.engine_created(engine)
if entrypoint is not dialect_cls:
entrypoint.engine_created(engine)
for plugin in plugins:
plugin.engine_created(engine)
return engine
class PlainEngineStrategy(DefaultEngineStrategy):
"""Strategy for configuring a regular Engine."""
name = "plain"
engine_cls = base.Engine
PlainEngineStrategy()
class ThreadLocalEngineStrategy(DefaultEngineStrategy):
"""Strategy for configuring an Engine with threadlocal behavior."""
name = "threadlocal"
engine_cls = threadlocal.TLEngine
ThreadLocalEngineStrategy()
class MockEngineStrategy(EngineStrategy):
"""Strategy for configuring an Engine-like object with mocked execution.
Produces a single mock Connectable object which dispatches
statement execution to a passed-in function.
"""
name = "mock"
def create(self, name_or_url, executor, **kwargs):
# create url.URL object
u = url.make_url(name_or_url)
dialect_cls = u.get_dialect()
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = kwargs.pop(k)
# create dialect
dialect = dialect_cls(**dialect_args)
return MockEngineStrategy.MockConnection(dialect, executor)
class MockConnection(base.Connectable):
def __init__(self, dialect, execute):
self._dialect = dialect
self.execute = execute
engine = property(lambda s: s)
dialect = property(attrgetter("_dialect"))
name = property(lambda s: s._dialect.name)
schema_for_object = schema._schema_getter(None)
def contextual_connect(self, **kwargs):
return self
def connect(self, **kwargs):
return self
def execution_options(self, **kw):
return self
def compiler(self, statement, parameters, **kwargs):
return self._dialect.compiler(
statement, parameters, engine=self, **kwargs
)
def create(self, entity, **kwargs):
kwargs["checkfirst"] = False
from sqlalchemy.engine import ddl
ddl.SchemaGenerator(self.dialect, self, **kwargs).traverse_single(
entity
)
def drop(self, entity, **kwargs):
kwargs["checkfirst"] = False
from sqlalchemy.engine import ddl
ddl.SchemaDropper(self.dialect, self, **kwargs).traverse_single(
entity
)
def _run_visitor(
self, visitorcallable, element, connection=None, **kwargs
):
kwargs["checkfirst"] = False
visitorcallable(self.dialect, self, **kwargs).traverse_single(
element
)
def execute(self, object_, *multiparams, **params):
raise NotImplementedError()
MockEngineStrategy()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/engine/util.py
|
# engine/util.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import util
def connection_memoize(key):
"""Decorator, memoize a function in a connection.info stash.
Only applicable to functions which take no arguments other than a
connection. The memo will be stored in ``connection.info[key]``.
"""
@util.decorator
def decorated(fn, self, connection):
connection = connection.connect()
try:
return connection.info[key]
except KeyError:
connection.info[key] = val = fn(self, connection)
return val
return decorated
def py_fallback():
def _distill_params(multiparams, params): # noqa
r"""Given arguments from the calling form \*multiparams, \**params,
return a list of bind parameter structures, usually a list of
dictionaries.
In the case of 'raw' execution which accepts positional parameters,
it may be a list of tuples or lists.
"""
if not multiparams:
if params:
return [params]
else:
return []
elif len(multiparams) == 1:
zero = multiparams[0]
if isinstance(zero, (list, tuple)):
if (
not zero
or hasattr(zero[0], "__iter__")
and not hasattr(zero[0], "strip")
):
# execute(stmt, [{}, {}, {}, ...])
# execute(stmt, [(), (), (), ...])
return zero
else:
# execute(stmt, ("value", "value"))
return [zero]
elif hasattr(zero, "keys"):
# execute(stmt, {"key":"value"})
return [zero]
else:
# execute(stmt, "value")
return [[zero]]
else:
if hasattr(multiparams[0], "__iter__") and not hasattr(
multiparams[0], "strip"
):
return multiparams
else:
return [multiparams]
return locals()
try:
from sqlalchemy.cutils import _distill_params # noqa
except ImportError:
globals().update(py_fallback())
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/engine/reflection.py
|
# engine/reflection.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides an abstraction for obtaining database schema information.
Usage Notes:
Here are some general conventions when accessing the low level inspector
methods such as get_table_names, get_columns, etc.
1. Inspector methods return lists of dicts in most cases for the following
reasons:
* They're both standard types that can be serialized.
* Using a dict instead of a tuple allows easy expansion of attributes.
* Using a list for the outer structure maintains order and is easy to work
with (e.g. list comprehension [d['name'] for d in cols]).
2. Records that contain a name, such as the column name in a column record
use the key 'name'. So for most return values, each record will have a
'name' attribute..
"""
from .base import Connectable
from .. import exc
from .. import inspection
from .. import sql
from .. import util
from ..sql import operators
from ..sql import schema as sa_schema
from ..sql.type_api import TypeEngine
from ..util import deprecated
from ..util import topological
@util.decorator
def cache(fn, self, con, *args, **kw):
info_cache = kw.get("info_cache", None)
if info_cache is None:
return fn(self, con, *args, **kw)
key = (
fn.__name__,
tuple(a for a in args if isinstance(a, util.string_types)),
tuple((k, v) for k, v in kw.items() if k != "info_cache"),
)
ret = info_cache.get(key)
if ret is None:
ret = fn(self, con, *args, **kw)
info_cache[key] = ret
return ret
class Inspector(object):
"""Performs database schema inspection.
The Inspector acts as a proxy to the reflection methods of the
:class:`~sqlalchemy.engine.interfaces.Dialect`, providing a
consistent interface as well as caching support for previously
fetched metadata.
A :class:`_reflection.Inspector` object is usually created via the
:func:`_sa.inspect` function::
from sqlalchemy import inspect, create_engine
engine = create_engine('...')
insp = inspect(engine)
The inspection method above is equivalent to using the
:meth:`_reflection.Inspector.from_engine` method, i.e.::
engine = create_engine('...')
insp = Inspector.from_engine(engine)
Where above, the :class:`~sqlalchemy.engine.interfaces.Dialect` may opt
to return an :class:`_reflection.Inspector`
subclass that provides additional
methods specific to the dialect's target database.
"""
def __init__(self, bind):
"""Initialize a new :class:`_reflection.Inspector`.
:param bind: a :class:`~sqlalchemy.engine.Connectable`,
which is typically an instance of
:class:`~sqlalchemy.engine.Engine` or
:class:`~sqlalchemy.engine.Connection`.
For a dialect-specific instance of :class:`_reflection.Inspector`, see
:meth:`_reflection.Inspector.from_engine`
"""
# this might not be a connection, it could be an engine.
self.bind = bind
# set the engine
if hasattr(bind, "engine"):
self.engine = bind.engine
else:
self.engine = bind
if self.engine is bind:
# if engine, ensure initialized
bind.connect().close()
self.dialect = self.engine.dialect
self.info_cache = {}
@classmethod
def from_engine(cls, bind):
"""Construct a new dialect-specific Inspector object from the given
engine or connection.
:param bind: a :class:`~sqlalchemy.engine.Connectable`,
which is typically an instance of
:class:`~sqlalchemy.engine.Engine` or
:class:`~sqlalchemy.engine.Connection`.
This method differs from direct a direct constructor call of
:class:`_reflection.Inspector` in that the
:class:`~sqlalchemy.engine.interfaces.Dialect` is given a chance to
provide a dialect-specific :class:`_reflection.Inspector` instance,
which may
provide additional methods.
See the example at :class:`_reflection.Inspector`.
"""
if hasattr(bind.dialect, "inspector"):
return bind.dialect.inspector(bind)
return Inspector(bind)
@inspection._inspects(Connectable)
def _insp(bind):
return Inspector.from_engine(bind)
@property
def default_schema_name(self):
"""Return the default schema name presented by the dialect
for the current engine's database user.
E.g. this is typically ``public`` for PostgreSQL and ``dbo``
for SQL Server.
"""
return self.dialect.default_schema_name
def get_schema_names(self):
"""Return all schema names.
"""
if hasattr(self.dialect, "get_schema_names"):
return self.dialect.get_schema_names(
self.bind, info_cache=self.info_cache
)
return []
@util.deprecated_params(
order_by=(
"1.0",
"The :paramref:`get_table_names.order_by` parameter is deprecated "
"and will be removed in a future release. Please refer to "
":meth:`_reflection.Inspector.get_sorted_table_and_fkc_names` "
"for a "
"more comprehensive solution to resolving foreign key cycles "
"between tables.",
)
)
def get_table_names(self, schema=None, order_by=None):
"""Return all table names in referred to within a particular schema.
The names are expected to be real tables only, not views.
Views are instead returned using the
:meth:`_reflection.Inspector.get_view_names`
method.
:param schema: Schema name. If ``schema`` is left at ``None``, the
database's default schema is
used, else the named schema is searched. If the database does not
support named schemas, behavior is undefined if ``schema`` is not
passed as ``None``. For special quoting, use :class:`.quoted_name`.
:param order_by: Optional, may be the string "foreign_key" to sort
the result on foreign key dependencies. Does not automatically
resolve cycles, and will raise :class:`.CircularDependencyError`
if cycles exist.
.. seealso::
:meth:`_reflection.Inspector.get_sorted_table_and_fkc_names`
:attr:`_schema.MetaData.sorted_tables`
"""
if hasattr(self.dialect, "get_table_names"):
tnames = self.dialect.get_table_names(
self.bind, schema, info_cache=self.info_cache
)
else:
tnames = self.engine.table_names(schema)
if order_by == "foreign_key":
tuples = []
for tname in tnames:
for fkey in self.get_foreign_keys(tname, schema):
if tname != fkey["referred_table"]:
tuples.append((fkey["referred_table"], tname))
tnames = list(topological.sort(tuples, tnames))
return tnames
def get_sorted_table_and_fkc_names(self, schema=None):
"""Return dependency-sorted table and foreign key constraint names in
referred to within a particular schema.
This will yield 2-tuples of
``(tablename, [(tname, fkname), (tname, fkname), ...])``
consisting of table names in CREATE order grouped with the foreign key
constraint names that are not detected as belonging to a cycle.
The final element
will be ``(None, [(tname, fkname), (tname, fkname), ..])``
which will consist of remaining
foreign key constraint names that would require a separate CREATE
step after-the-fact, based on dependencies between tables.
.. versionadded:: 1.0.-
.. seealso::
:meth:`_reflection.Inspector.get_table_names`
:func:`.sort_tables_and_constraints` - similar method which works
with an already-given :class:`_schema.MetaData`.
"""
if hasattr(self.dialect, "get_table_names"):
tnames = self.dialect.get_table_names(
self.bind, schema, info_cache=self.info_cache
)
else:
tnames = self.engine.table_names(schema)
tuples = set()
remaining_fkcs = set()
fknames_for_table = {}
for tname in tnames:
fkeys = self.get_foreign_keys(tname, schema)
fknames_for_table[tname] = set([fk["name"] for fk in fkeys])
for fkey in fkeys:
if tname != fkey["referred_table"]:
tuples.add((fkey["referred_table"], tname))
try:
candidate_sort = list(topological.sort(tuples, tnames))
except exc.CircularDependencyError as err:
for edge in err.edges:
tuples.remove(edge)
remaining_fkcs.update(
(edge[1], fkc) for fkc in fknames_for_table[edge[1]]
)
candidate_sort = list(topological.sort(tuples, tnames))
return [
(tname, fknames_for_table[tname].difference(remaining_fkcs))
for tname in candidate_sort
] + [(None, list(remaining_fkcs))]
def get_temp_table_names(self):
"""return a list of temporary table names for the current bind.
This method is unsupported by most dialects; currently
only SQLite implements it.
.. versionadded:: 1.0.0
"""
return self.dialect.get_temp_table_names(
self.bind, info_cache=self.info_cache
)
def get_temp_view_names(self):
"""return a list of temporary view names for the current bind.
This method is unsupported by most dialects; currently
only SQLite implements it.
.. versionadded:: 1.0.0
"""
return self.dialect.get_temp_view_names(
self.bind, info_cache=self.info_cache
)
def get_table_options(self, table_name, schema=None, **kw):
"""Return a dictionary of options specified when the table of the
given name was created.
This currently includes some options that apply to MySQL tables.
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
if hasattr(self.dialect, "get_table_options"):
return self.dialect.get_table_options(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
return {}
def get_view_names(self, schema=None):
"""Return all view names in `schema`.
:param schema: Optional, retrieve names from a non-default schema.
For special quoting, use :class:`.quoted_name`.
"""
return self.dialect.get_view_names(
self.bind, schema, info_cache=self.info_cache
)
def get_view_definition(self, view_name, schema=None):
"""Return definition for `view_name`.
:param schema: Optional, retrieve names from a non-default schema.
For special quoting, use :class:`.quoted_name`.
"""
return self.dialect.get_view_definition(
self.bind, view_name, schema, info_cache=self.info_cache
)
def get_columns(self, table_name, schema=None, **kw):
"""Return information about columns in `table_name`.
Given a string `table_name` and an optional string `schema`, return
column information as a list of dicts with these keys:
* ``name`` - the column's name
* ``type`` - the type of this column; an instance of
:class:`~sqlalchemy.types.TypeEngine`
* ``nullable`` - boolean flag if the column is NULL or NOT NULL
* ``default`` - the column's server default value - this is returned
as a string SQL expression.
* ``autoincrement`` - indicates that the column is auto incremented -
this is returned as a boolean or 'auto'
* ``comment`` - (optional) the commnet on the column. Only some
dialects return this key
* ``computed`` - (optional) when present it indicates that this column
is computed by the database. Only some dialects return this key.
Returned as a dict with the keys:
* ``sqltext`` - the expression used to generate this column returned
as a string SQL expression
* ``persisted`` - (optional) boolean that indicates if the column is
stored in the table
.. versionadded:: 1.3.16 - added support for computed reflection.
* ``dialect_options`` - (optional) a dict with dialect specific options
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
:return: list of dictionaries, each representing the definition of
a database column.
"""
col_defs = self.dialect.get_columns(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
for col_def in col_defs:
# make this easy and only return instances for coltype
coltype = col_def["type"]
if not isinstance(coltype, TypeEngine):
col_def["type"] = coltype()
return col_defs
@deprecated(
"0.7",
"The :meth:`_reflection.Inspector.get_primary_keys` "
"method is deprecated and "
"will be removed in a future release. Please refer to the "
":meth:`_reflection.Inspector.get_pk_constraint` method.",
)
def get_primary_keys(self, table_name, schema=None, **kw):
"""Return information about primary keys in `table_name`.
Given a string `table_name`, and an optional string `schema`, return
primary key information as a list of column names.
"""
return self.dialect.get_pk_constraint(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)["constrained_columns"]
def get_pk_constraint(self, table_name, schema=None, **kw):
"""Return information about primary key constraint on `table_name`.
Given a string `table_name`, and an optional string `schema`, return
primary key information as a dictionary with these keys:
constrained_columns
a list of column names that make up the primary key
name
optional name of the primary key constraint.
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
return self.dialect.get_pk_constraint(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
def get_foreign_keys(self, table_name, schema=None, **kw):
"""Return information about foreign_keys in `table_name`.
Given a string `table_name`, and an optional string `schema`, return
foreign key information as a list of dicts with these keys:
constrained_columns
a list of column names that make up the foreign key
referred_schema
the name of the referred schema
referred_table
the name of the referred table
referred_columns
a list of column names in the referred table that correspond to
constrained_columns
name
optional name of the foreign key constraint.
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
return self.dialect.get_foreign_keys(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
def get_indexes(self, table_name, schema=None, **kw):
"""Return information about indexes in `table_name`.
Given a string `table_name` and an optional string `schema`, return
index information as a list of dicts with these keys:
name
the index's name
column_names
list of column names in order
unique
boolean
column_sorting
optional dict mapping column names to tuple of sort keywords,
which may include ``asc``, ``desc``, ``nullsfirst``, ``nullslast``.
.. versionadded:: 1.3.5
dialect_options
dict of dialect-specific index options. May not be present
for all dialects.
.. versionadded:: 1.0.0
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
return self.dialect.get_indexes(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
def get_unique_constraints(self, table_name, schema=None, **kw):
"""Return information about unique constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
unique constraint information as a list of dicts with these keys:
name
the unique constraint's name
column_names
list of column names in order
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
return self.dialect.get_unique_constraints(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
def get_table_comment(self, table_name, schema=None, **kw):
"""Return information about the table comment for ``table_name``.
Given a string ``table_name`` and an optional string ``schema``,
return table comment information as a dictionary with these keys:
text
text of the comment.
Raises ``NotImplementedError`` for a dialect that does not support
comments.
.. versionadded:: 1.2
"""
return self.dialect.get_table_comment(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
def get_check_constraints(self, table_name, schema=None, **kw):
"""Return information about check constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
check constraint information as a list of dicts with these keys:
name
the check constraint's name
sqltext
the check constraint's SQL expression
dialect_options
may or may not be present; a dictionary with additional
dialect-specific options for this CHECK constraint
.. versionadded:: 1.3.8
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
.. versionadded:: 1.1.0
"""
return self.dialect.get_check_constraints(
self.bind, table_name, schema, info_cache=self.info_cache, **kw
)
def reflecttable(
self,
table,
include_columns,
exclude_columns=(),
resolve_fks=True,
_extend_on=None,
):
"""Given a Table object, load its internal constructs based on
introspection.
This is the underlying method used by most dialects to produce
table reflection. Direct usage is like::
from sqlalchemy import create_engine, MetaData, Table
from sqlalchemy.engine.reflection import Inspector
engine = create_engine('...')
meta = MetaData()
user_table = Table('user', meta)
insp = Inspector.from_engine(engine)
insp.reflecttable(user_table, None)
:param table: a :class:`~sqlalchemy.schema.Table` instance.
:param include_columns: a list of string column names to include
in the reflection process. If ``None``, all columns are reflected.
"""
if _extend_on is not None:
if table in _extend_on:
return
else:
_extend_on.add(table)
dialect = self.bind.dialect
schema = self.bind.schema_for_object(table)
table_name = table.name
# get table-level arguments that are specifically
# intended for reflection, e.g. oracle_resolve_synonyms.
# these are unconditionally passed to related Table
# objects
reflection_options = dict(
(k, table.dialect_kwargs.get(k))
for k in dialect.reflection_options
if k in table.dialect_kwargs
)
# reflect table options, like mysql_engine
tbl_opts = self.get_table_options(
table_name, schema, **table.dialect_kwargs
)
if tbl_opts:
# add additional kwargs to the Table if the dialect
# returned them
table._validate_dialect_kwargs(tbl_opts)
if util.py2k:
if isinstance(schema, str):
schema = schema.decode(dialect.encoding)
if isinstance(table_name, str):
table_name = table_name.decode(dialect.encoding)
found_table = False
cols_by_orig_name = {}
for col_d in self.get_columns(
table_name, schema, **table.dialect_kwargs
):
found_table = True
self._reflect_column(
table,
col_d,
include_columns,
exclude_columns,
cols_by_orig_name,
)
if not found_table:
raise exc.NoSuchTableError(table.name)
self._reflect_pk(
table_name, schema, table, cols_by_orig_name, exclude_columns
)
self._reflect_fk(
table_name,
schema,
table,
cols_by_orig_name,
exclude_columns,
resolve_fks,
_extend_on,
reflection_options,
)
self._reflect_indexes(
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
)
self._reflect_unique_constraints(
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
)
self._reflect_check_constraints(
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
)
self._reflect_table_comment(
table_name, schema, table, reflection_options
)
def _reflect_column(
self, table, col_d, include_columns, exclude_columns, cols_by_orig_name
):
orig_name = col_d["name"]
table.dispatch.column_reflect(self, table, col_d)
# fetch name again as column_reflect is allowed to
# change it
name = col_d["name"]
if (include_columns and name not in include_columns) or (
exclude_columns and name in exclude_columns
):
return
coltype = col_d["type"]
col_kw = dict(
(k, col_d[k])
for k in [
"nullable",
"autoincrement",
"quote",
"info",
"key",
"comment",
]
if k in col_d
)
if "dialect_options" in col_d:
col_kw.update(col_d["dialect_options"])
colargs = []
if col_d.get("default") is not None:
default = col_d["default"]
if isinstance(default, sql.elements.TextClause):
default = sa_schema.DefaultClause(default, _reflected=True)
elif not isinstance(default, sa_schema.FetchedValue):
default = sa_schema.DefaultClause(
sql.text(col_d["default"]), _reflected=True
)
colargs.append(default)
if "computed" in col_d:
computed = sa_schema.Computed(**col_d["computed"])
colargs.append(computed)
if "sequence" in col_d:
self._reflect_col_sequence(col_d, colargs)
cols_by_orig_name[orig_name] = col = sa_schema.Column(
name, coltype, *colargs, **col_kw
)
if col.key in table.primary_key:
col.primary_key = True
table.append_column(col)
def _reflect_col_sequence(self, col_d, colargs):
if "sequence" in col_d:
# TODO: mssql and sybase are using this.
seq = col_d["sequence"]
sequence = sa_schema.Sequence(seq["name"], 1, 1)
if "start" in seq:
sequence.start = seq["start"]
if "increment" in seq:
sequence.increment = seq["increment"]
colargs.append(sequence)
def _reflect_pk(
self, table_name, schema, table, cols_by_orig_name, exclude_columns
):
pk_cons = self.get_pk_constraint(
table_name, schema, **table.dialect_kwargs
)
if pk_cons:
pk_cols = [
cols_by_orig_name[pk]
for pk in pk_cons["constrained_columns"]
if pk in cols_by_orig_name and pk not in exclude_columns
]
# update pk constraint name
table.primary_key.name = pk_cons.get("name")
# tell the PKConstraint to re-initialize
# its column collection
table.primary_key._reload(pk_cols)
def _reflect_fk(
self,
table_name,
schema,
table,
cols_by_orig_name,
exclude_columns,
resolve_fks,
_extend_on,
reflection_options,
):
fkeys = self.get_foreign_keys(
table_name, schema, **table.dialect_kwargs
)
for fkey_d in fkeys:
conname = fkey_d["name"]
# look for columns by orig name in cols_by_orig_name,
# but support columns that are in-Python only as fallback
constrained_columns = [
cols_by_orig_name[c].key if c in cols_by_orig_name else c
for c in fkey_d["constrained_columns"]
]
if exclude_columns and set(constrained_columns).intersection(
exclude_columns
):
continue
referred_schema = fkey_d["referred_schema"]
referred_table = fkey_d["referred_table"]
referred_columns = fkey_d["referred_columns"]
refspec = []
if referred_schema is not None:
if resolve_fks:
sa_schema.Table(
referred_table,
table.metadata,
autoload=True,
schema=referred_schema,
autoload_with=self.bind,
_extend_on=_extend_on,
**reflection_options
)
for column in referred_columns:
refspec.append(
".".join([referred_schema, referred_table, column])
)
else:
if resolve_fks:
sa_schema.Table(
referred_table,
table.metadata,
autoload=True,
autoload_with=self.bind,
schema=sa_schema.BLANK_SCHEMA,
_extend_on=_extend_on,
**reflection_options
)
for column in referred_columns:
refspec.append(".".join([referred_table, column]))
if "options" in fkey_d:
options = fkey_d["options"]
else:
options = {}
table.append_constraint(
sa_schema.ForeignKeyConstraint(
constrained_columns,
refspec,
conname,
link_to_name=True,
**options
)
)
_index_sort_exprs = [
("asc", operators.asc_op),
("desc", operators.desc_op),
("nullsfirst", operators.nullsfirst_op),
("nullslast", operators.nullslast_op),
]
def _reflect_indexes(
self,
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
):
# Indexes
indexes = self.get_indexes(table_name, schema)
for index_d in indexes:
name = index_d["name"]
columns = index_d["column_names"]
column_sorting = index_d.get("column_sorting", {})
unique = index_d["unique"]
flavor = index_d.get("type", "index")
dialect_options = index_d.get("dialect_options", {})
duplicates = index_d.get("duplicates_constraint")
if include_columns and not set(columns).issubset(include_columns):
util.warn(
"Omitting %s key for (%s), key covers omitted columns."
% (flavor, ", ".join(columns))
)
continue
if duplicates:
continue
# look for columns by orig name in cols_by_orig_name,
# but support columns that are in-Python only as fallback
idx_cols = []
for c in columns:
try:
idx_col = (
cols_by_orig_name[c]
if c in cols_by_orig_name
else table.c[c]
)
except KeyError:
util.warn(
"%s key '%s' was not located in "
"columns for table '%s'" % (flavor, c, table_name)
)
continue
c_sorting = column_sorting.get(c, ())
for k, op in self._index_sort_exprs:
if k in c_sorting:
idx_col = op(idx_col)
idx_cols.append(idx_col)
sa_schema.Index(
name,
*idx_cols,
_table=table,
**dict(list(dialect_options.items()) + [("unique", unique)])
)
def _reflect_unique_constraints(
self,
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
):
# Unique Constraints
try:
constraints = self.get_unique_constraints(table_name, schema)
except NotImplementedError:
# optional dialect feature
return
for const_d in constraints:
conname = const_d["name"]
columns = const_d["column_names"]
duplicates = const_d.get("duplicates_index")
if include_columns and not set(columns).issubset(include_columns):
util.warn(
"Omitting unique constraint key for (%s), "
"key covers omitted columns." % ", ".join(columns)
)
continue
if duplicates:
continue
# look for columns by orig name in cols_by_orig_name,
# but support columns that are in-Python only as fallback
constrained_cols = []
for c in columns:
try:
constrained_col = (
cols_by_orig_name[c]
if c in cols_by_orig_name
else table.c[c]
)
except KeyError:
util.warn(
"unique constraint key '%s' was not located in "
"columns for table '%s'" % (c, table_name)
)
else:
constrained_cols.append(constrained_col)
table.append_constraint(
sa_schema.UniqueConstraint(*constrained_cols, name=conname)
)
def _reflect_check_constraints(
self,
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
):
try:
constraints = self.get_check_constraints(table_name, schema)
except NotImplementedError:
# optional dialect feature
return
for const_d in constraints:
table.append_constraint(sa_schema.CheckConstraint(**const_d))
def _reflect_table_comment(
self, table_name, schema, table, reflection_options
):
try:
comment_dict = self.get_table_comment(table_name, schema)
except NotImplementedError:
return
else:
table.comment = comment_dict.get("text", None)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/engine/__init__.py
|
# engine/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL connections, SQL execution and high-level DB-API interface.
The engine package defines the basic components used to interface
DB-API modules with higher-level statement construction,
connection-management, execution and result contexts. The primary
"entry point" class into this package is the Engine and its public
constructor ``create_engine()``.
This package includes:
base.py
Defines interface classes and some implementation classes which
comprise the basic components used to interface between a DB-API,
constructed and plain-text statements, connections, transactions,
and results.
default.py
Contains default implementations of some of the components defined
in base.py. All current database dialects use the classes in
default.py as base classes for their own database-specific
implementations.
strategies.py
The mechanics of constructing ``Engine`` objects are represented
here. Defines the ``EngineStrategy`` class which represents how
to go from arguments specified to the ``create_engine()``
function, to a fully constructed ``Engine``, including
initialization of connection pooling, dialects, and specific
subclasses of ``Engine``.
threadlocal.py
The ``TLEngine`` class is defined here, which is a subclass of
the generic ``Engine`` and tracks ``Connection`` and
``Transaction`` objects against the identity of the current
thread. This allows certain programming patterns based around
the concept of a "thread-local connection" to be possible.
The ``TLEngine`` is created by using the "threadlocal" engine
strategy in conjunction with the ``create_engine()`` function.
url.py
Defines the ``URL`` class which represents the individual
components of a string URL passed to ``create_engine()``. Also
defines a basic module-loading strategy for the dialect specifier
within a URL.
"""
from . import strategies
from . import util # noqa
from .base import Connection # noqa
from .base import Engine # noqa
from .base import NestedTransaction # noqa
from .base import RootTransaction # noqa
from .base import Transaction # noqa
from .base import TwoPhaseTransaction # noqa
from .interfaces import Compiled # noqa
from .interfaces import Connectable # noqa
from .interfaces import CreateEnginePlugin # noqa
from .interfaces import Dialect # noqa
from .interfaces import ExceptionContext # noqa
from .interfaces import ExecutionContext # noqa
from .interfaces import TypeCompiler # noqa
from .result import BaseRowProxy # noqa
from .result import BufferedColumnResultProxy # noqa
from .result import BufferedColumnRow # noqa
from .result import BufferedRowResultProxy # noqa
from .result import FullyBufferedResultProxy # noqa
from .result import ResultProxy # noqa
from .result import RowProxy # noqa
from .util import connection_memoize # noqa
from ..sql import ddl # noqa
# backwards compat
default_strategy = "plain"
def create_engine(*args, **kwargs):
"""Create a new :class:`_engine.Engine` instance.
The standard calling form is to send the URL as the
first positional argument, usually a string
that indicates database dialect and connection arguments::
engine = create_engine("postgresql://scott:tiger@localhost/test")
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`_engine.Engine`
and its underlying :class:`.Dialect` and :class:`_pool.Pool`
constructs::
engine = create_engine("mysql://scott:tiger@hostname/dbname",
encoding='latin1', echo=True)
The string form of the URL is
``dialect[+driver]://user:password@host/dbname[?key=value..]``, where
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be specific to
the :class:`_engine.Engine`, the underlying :class:`.Dialect`,
as well as the
:class:`_pool.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`_sa.create_engine()` usage.
Once established, the newly resulting :class:`_engine.Engine` will
request a connection from the underlying :class:`_pool.Pool` once
:meth:`_engine.Engine.connect` is called, or a method which depends on it
such as :meth:`_engine.Engine.execute` is invoked. The
:class:`_pool.Pool` in turn
will establish the first actual DBAPI connection when this request
is received. The :func:`_sa.create_engine` call itself does **not**
establish any actual DBAPI connections directly.
.. seealso::
:doc:`/core/engines`
:doc:`/dialects/index`
:ref:`connections_toplevel`
:param case_sensitive=True: if False, result column names
will match in a case-insensitive fashion, that is,
``row['SomeColumn']``.
:param connect_args: a dictionary of options which will be
passed directly to the DBAPI's ``connect()`` method as
additional keyword arguments. See the example
at :ref:`custom_dbapi_args`.
:param convert_unicode=False: if set to True, causes
all :class:`.String` datatypes to act as though the
:paramref:`.String.convert_unicode` flag has been set to ``True``,
regardless of a setting of ``False`` on an individual :class:`.String`
type. This has the effect of causing all :class:`.String` -based
columns to accommodate Python Unicode objects directly as though the
datatype were the :class:`.Unicode` type.
.. deprecated:: 1.3
The :paramref:`_sa.create_engine.convert_unicode` parameter
is deprecated and will be removed in a future release.
All modern DBAPIs now support Python Unicode directly and this
parameter is unnecessary.
:param creator: a callable which returns a DBAPI connection.
This creation function will be passed to the underlying
connection pool and will be used to create all new database
connections. Usage of this function causes connection
parameters specified in the URL argument to be bypassed.
This hook is not as flexible as the newer
:class:`_events.DialectEvents.do_connect` hook which allows complete
control over how a connection is made to the database, given the full
set of URL arguments and state beforehand.
.. seealso::
:class:`_events.DialectEvents.do_connect` - event hook that allows
full control over DBAPI connection mechanics.
:ref:`custom_dbapi_args`
:param echo=False: if True, the Engine will log all statements
as well as a ``repr()`` of their parameter lists to the default log
handler, which defaults to ``sys.stdout`` for output. If set to the
string ``"debug"``, result rows will be printed to the standard output
as well. The ``echo`` attribute of ``Engine`` can be modified at any
time to turn logging on and off; direct control of logging is also
available using the standard Python ``logging`` module.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param echo_pool=False: if True, the connection pool will log
informational output such as when connections are invalidated
as well as when connections are recycled to the default log handler,
which defaults to ``sys.stdout`` for output. If set to the string
``"debug"``, the logging will include pool checkouts and checkins.
Direct control of logging is also available using the standard Python
``logging`` module.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param empty_in_strategy: The SQL compilation strategy to use when
rendering an IN or NOT IN expression for :meth:`.ColumnOperators.in_`
where the right-hand side
is an empty set. This is a string value that may be one of
``static``, ``dynamic``, or ``dynamic_warn``. The ``static``
strategy is the default, and an IN comparison to an empty set
will generate a simple false expression "1 != 1". The ``dynamic``
strategy behaves like that of SQLAlchemy 1.1 and earlier, emitting
a false expression of the form "expr != expr", which has the effect
of evaluting to NULL in the case of a null expression.
``dynamic_warn`` is the same as ``dynamic``, however also emits a
warning when an empty set is encountered; this because the "dynamic"
comparison is typically poorly performing on most databases.
.. versionadded:: 1.2 Added the ``empty_in_strategy`` setting and
additionally defaulted the behavior for empty-set IN comparisons
to a static boolean expression.
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPIs own encoding facilities.**
.. note:: The ``encoding`` parameter deals only with in-Python
encoding issues that were prevalent with many DBAPIs under Python
2. Under Python 3 it is mostly unused. For DBAPIs that require
client encoding configurations, such as those of MySQL and Oracle,
please consult specific :ref:`dialect documentation
<dialect_toplevel>` for details.
All modern DBAPIs that work in Python 3 necessarily feature direct
support for Python unicode strings. Under Python 2, this was not
always the case. For those scenarios where the DBAPI is detected as
not supporting a Python ``unicode`` object under Python 2, this
encoding is used to determine the source/destination encoding. It is
**not used** for those cases where the DBAPI handles unicode directly.
To properly configure a system to accommodate Python ``unicode``
objects, the DBAPI should be configured to handle unicode to the
greatest degree as is appropriate - see the notes on unicode pertaining
to the specific target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI, nearly always under **Python 2 only**,
include zero or more of:
* the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
* the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
When using Python 3, the DBAPI is required to support all of the above
values as Python ``unicode`` objects, which in Python 3 are just known
as ``str``. In Python 2, the DBAPI does not specify unicode behavior
at all, so SQLAlchemy must make decisions for each of the above values
on a per-DBAPI basis - implementations are completely inconsistent in
their behavior.
:param execution_options: Dictionary execution options which will
be applied to all connections. See
:meth:`~sqlalchemy.engine.Connection.execution_options`
:param hide_parameters: Boolean, when set to True, SQL statement parameters
will not be displayed in INFO logging nor will they be formatted into
the string representation of :class:`.StatementError` objects.
.. versionadded:: 1.3.8
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including PostgreSQL, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
:param isolation_level: this string parameter is interpreted by various
dialects in order to affect the transaction isolation level of the
database connection. The parameter essentially accepts some subset of
these string arguments: ``"SERIALIZABLE"``, ``"REPEATABLE_READ"``,
``"READ_COMMITTED"``, ``"READ_UNCOMMITTED"`` and ``"AUTOCOMMIT"``.
Behavior here varies per backend, and
individual dialects should be consulted directly.
Note that the isolation level can also be set on a
per-:class:`_engine.Connection` basis as well, using the
:paramref:`.Connection.execution_options.isolation_level`
feature.
.. seealso::
:attr:`_engine.Connection.default_isolation_level`
- view default level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`_engine.Connection` isolation level
:ref:`SQLite Transaction Isolation <sqlite_isolation_level>`
:ref:`PostgreSQL Transaction Isolation <postgresql_isolation_level>`
:ref:`MySQL Transaction Isolation <mysql_isolation_level>`
:ref:`session_transaction_isolation` - for the ORM
:param json_deserializer: for dialects that support the
:class:`_types.JSON`
datatype, this is a Python callable that will convert a JSON string
to a Python object. By default, the Python ``json.loads`` function is
used.
.. versionchanged:: 1.3.7 The SQLite dialect renamed this from
``_json_deserializer``.
:param json_serializer: for dialects that support the :class:`_types.JSON`
datatype, this is a Python callable that will render a given object
as JSON. By default, the Python ``json.dumps`` function is used.
.. versionchanged:: 1.3.7 The SQLite dialect renamed this from
``_json_serializer``.
:param label_length=None: optional integer value which limits
the size of dynamically generated column labels to that many
characters. If less than 6, labels are generated as
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length``, which may be affected via the
:paramref:`_sa.create_engine.max_identifier_length` parameter,
is used instead. The value of
:paramref:`_sa.create_engine.label_length`
may not be larger than that of
:paramref:`_sa.create_engine.max_identfier_length`.
.. seealso::
:paramref:`_sa.create_engine.max_identifier_length`
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
:param max_identifier_length: integer; override the max_identifier_length
determined by the dialect. if ``None`` or zero, has no effect. This
is the database's configured maximum number of characters that may be
used in a SQL identifier such as a table name, column name, or label
name. All dialects determine this value automatically, however in the
case of a new database version for which this value has changed but
SQLAlchemy's dialect has not been adjusted, the value may be passed
here.
.. versionadded:: 1.3.9
.. seealso::
:paramref:`_sa.create_engine.label_length`
:param max_overflow=10: the number of connections to allow in
connection pool "overflow", that is connections that can be
opened above and beyond the pool_size setting, which defaults
to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`.
:param module=None: reference to a Python module object (the module
itself, not its string name). Specifies an alternate DBAPI module to
be used by the engine's dialect. Each sub-dialect references a
specific DBAPI which will be imported before first connect. This
parameter causes the import to be bypassed, and the given module to
be used instead. Can be used for testing of DBAPIs as well as to
inject "mock" DBAPI implementations into the :class:`_engine.Engine`.
:param paramstyle=None: The `paramstyle <http://legacy.python.org/dev/peps/pep-0249/#paramstyle>`_
to use when rendering bound parameters. This style defaults to the
one recommended by the DBAPI itself, which is retrieved from the
``.paramstyle`` attribute of the DBAPI. However, most DBAPIs accept
more than one paramstyle, and in particular it may be desirable
to change a "named" paramstyle into a "positional" one, or vice versa.
When this attribute is passed, it should be one of the values
``"qmark"``, ``"numeric"``, ``"named"``, ``"format"`` or
``"pyformat"``, and should correspond to a parameter style known
to be supported by the DBAPI in use.
:param pool=None: an already-constructed instance of
:class:`~sqlalchemy.pool.Pool`, such as a
:class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this
pool will be used directly as the underlying connection pool
for the engine, bypassing whatever connection parameters are
present in the URL argument. For information on constructing
connection pools manually, see :ref:`pooling_toplevel`.
:param poolclass=None: a :class:`~sqlalchemy.pool.Pool`
subclass, which will be used to create a connection pool
instance using the connection parameters given in the URL. Note
this differs from ``pool`` in that you don't actually
instantiate the pool in this case, you just indicate what type
of pool to be used.
:param pool_logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param pool_pre_ping: boolean, if True will enable the connection pool
"pre-ping" feature that tests connections for liveness upon
each checkout.
.. versionadded:: 1.2
.. seealso::
:ref:`pool_disconnects_pessimistic`
:param pool_size=5: the number of connections to keep open
inside the connection pool. This used with
:class:`~sqlalchemy.pool.QueuePool` as
well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With
:class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting
of 0 indicates no limit; to disable pooling, set ``poolclass`` to
:class:`~sqlalchemy.pool.NullPool` instead.
:param pool_recycle=-1: this setting causes the pool to recycle
connections after the given number of seconds has passed. It
defaults to -1, or no timeout. For example, setting to 3600
means connections will be recycled after one hour. Note that
MySQL in particular will disconnect automatically if no
activity is detected on a connection for eight hours (although
this is configurable with the MySQLDB connection itself and the
server configuration as well).
.. seealso::
:ref:`pool_setting_recycle`
:param pool_reset_on_return='rollback': set the
:paramref:`_pool.Pool.reset_on_return` parameter of the underlying
:class:`_pool.Pool` object, which can be set to the values
``"rollback"``, ``"commit"``, or ``None``.
.. seealso::
:paramref:`_pool.Pool.reset_on_return`
:param pool_timeout=30: number of seconds to wait before giving
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`.
:param pool_use_lifo=False: use LIFO (last-in-first-out) when retrieving
connections from :class:`.QueuePool` instead of FIFO
(first-in-first-out). Using LIFO, a server-side timeout scheme can
reduce the number of connections used during non- peak periods of
use. When planning for server-side timeouts, ensure that a recycle or
pre-ping strategy is in use to gracefully handle stale connections.
.. versionadded:: 1.3
.. seealso::
:ref:`pool_use_lifo`
:ref:`pool_disconnects`
:param plugins: string list of plugin names to load. See
:class:`.CreateEnginePlugin` for background.
.. versionadded:: 1.2.3
:param strategy='plain': selects alternate engine implementations.
Currently available are:
* the ``threadlocal`` strategy, which is described in
:ref:`threadlocal_strategy`;
* the ``mock`` strategy, which dispatches all statement
execution to a function passed as the argument ``executor``.
See `example in the FAQ
<http://docs.sqlalchemy.org/en/latest/faq/metadata_schema.html#how-can-i-get-the-create-table-drop-table-output-as-a-string>`_.
:param executor=None: a function taking arguments
``(sql, *multiparams, **params)``, to which the ``mock`` strategy will
dispatch all statement execution. Used only by ``strategy='mock'``.
""" # noqa
strategy = kwargs.pop("strategy", default_strategy)
strategy = strategies.strategies[strategy]
return strategy.create(*args, **kwargs)
def engine_from_config(configuration, prefix="sqlalchemy.", **kwargs):
"""Create a new Engine instance using a configuration dictionary.
The dictionary is typically produced from a config file.
The keys of interest to ``engine_from_config()`` should be prefixed, e.g.
``sqlalchemy.url``, ``sqlalchemy.echo``, etc. The 'prefix' argument
indicates the prefix to be searched for. Each matching key (after the
prefix is stripped) is treated as though it were the corresponding keyword
argument to a :func:`_sa.create_engine` call.
The only required key is (assuming the default prefix) ``sqlalchemy.url``,
which provides the :ref:`database URL <database_urls>`.
A select set of keyword arguments will be "coerced" to their
expected type based on string values. The set of arguments
is extensible per-dialect using the ``engine_config_types`` accessor.
:param configuration: A dictionary (typically produced from a config file,
but this is not a requirement). Items whose keys start with the value
of 'prefix' will have that prefix stripped, and will then be passed to
:ref:`create_engine`.
:param prefix: Prefix to match and then strip from keys
in 'configuration'.
:param kwargs: Each keyword argument to ``engine_from_config()`` itself
overrides the corresponding item taken from the 'configuration'
dictionary. Keyword arguments should *not* be prefixed.
"""
options = dict(
(key[len(prefix) :], configuration[key])
for key in configuration
if key.startswith(prefix)
)
options["_coerce_config"] = True
options.update(kwargs)
url = options.pop("url")
return create_engine(url, **options)
__all__ = ("create_engine", "engine_from_config")
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/engine/url.py
|
# engine/url.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the :class:`~sqlalchemy.engine.url.URL` class which encapsulates
information about a database connection specification.
The URL object is created automatically when
:func:`~sqlalchemy.engine.create_engine` is called with a string
argument; alternatively, the URL is a public-facing construct which can
be used directly and is also accepted directly by ``create_engine()``.
"""
import re
from .interfaces import Dialect
from .. import exc
from .. import util
from ..dialects import plugins
from ..dialects import registry
class URL(object):
"""
Represent the components of a URL used to connect to a database.
This object is suitable to be passed directly to a
:func:`~sqlalchemy.create_engine` call. The fields of the URL are parsed
from a string by the :func:`.make_url` function. the string
format of the URL is an RFC-1738-style string.
All initialization parameters are available as public attributes.
:param drivername: the name of the database backend.
This name will correspond to a module in sqlalchemy/databases
or a third party plug-in.
:param username: The user name.
:param password: database password.
:param host: The name of the host.
:param port: The port number.
:param database: The database name.
:param query: A dictionary of options to be passed to the
dialect and/or the DBAPI upon connect.
"""
def __init__(
self,
drivername,
username=None,
password=None,
host=None,
port=None,
database=None,
query=None,
):
self.drivername = drivername
self.username = username
self.password_original = password
self.host = host
if port is not None:
self.port = int(port)
else:
self.port = None
self.database = database
self.query = query or {}
def __to_string__(self, hide_password=True):
s = self.drivername + "://"
if self.username is not None:
s += _rfc_1738_quote(self.username)
if self.password is not None:
s += ":" + (
"***" if hide_password else _rfc_1738_quote(self.password)
)
s += "@"
if self.host is not None:
if ":" in self.host:
s += "[%s]" % self.host
else:
s += self.host
if self.port is not None:
s += ":" + str(self.port)
if self.database is not None:
s += "/" + self.database
if self.query:
keys = list(self.query)
keys.sort()
s += "?" + "&".join(
"%s=%s" % (util.quote_plus(k), util.quote_plus(element))
for k in keys
for element in util.to_list(self.query[k])
)
return s
def __str__(self):
return self.__to_string__(hide_password=False)
def __repr__(self):
return self.__to_string__()
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return (
isinstance(other, URL)
and self.drivername == other.drivername
and self.username == other.username
and self.password == other.password
and self.host == other.host
and self.database == other.database
and self.query == other.query
and self.port == other.port
)
def __ne__(self, other):
return not self == other
@property
def password(self):
if self.password_original is None:
return None
else:
return util.text_type(self.password_original)
@password.setter
def password(self, password):
self.password_original = password
def get_backend_name(self):
if "+" not in self.drivername:
return self.drivername
else:
return self.drivername.split("+")[0]
def get_driver_name(self):
if "+" not in self.drivername:
return self.get_dialect().driver
else:
return self.drivername.split("+")[1]
def _instantiate_plugins(self, kwargs):
plugin_names = util.to_list(self.query.get("plugin", ()))
plugin_names += kwargs.get("plugins", [])
return [
plugins.load(plugin_name)(self, kwargs)
for plugin_name in plugin_names
]
def _get_entrypoint(self):
"""Return the "entry point" dialect class.
This is normally the dialect itself except in the case when the
returned class implements the get_dialect_cls() method.
"""
if "+" not in self.drivername:
name = self.drivername
else:
name = self.drivername.replace("+", ".")
cls = registry.load(name)
# check for legacy dialects that
# would return a module with 'dialect' as the
# actual class
if (
hasattr(cls, "dialect")
and isinstance(cls.dialect, type)
and issubclass(cls.dialect, Dialect)
):
return cls.dialect
else:
return cls
def get_dialect(self):
"""Return the SQLAlchemy database dialect class corresponding
to this URL's driver name.
"""
entrypoint = self._get_entrypoint()
dialect_cls = entrypoint.get_dialect_cls(self)
return dialect_cls
def translate_connect_args(self, names=[], **kw):
r"""Translate url attributes into a dictionary of connection arguments.
Returns attributes of this url (`host`, `database`, `username`,
`password`, `port`) as a plain dictionary. The attribute names are
used as the keys by default. Unset or false attributes are omitted
from the final dictionary.
:param \**kw: Optional, alternate key names for url attributes.
:param names: Deprecated. Same purpose as the keyword-based alternate
names, but correlates the name to the original positionally.
"""
translated = {}
attribute_names = ["host", "database", "username", "password", "port"]
for sname in attribute_names:
if names:
name = names.pop(0)
elif sname in kw:
name = kw[sname]
else:
name = sname
if name is not None and getattr(self, sname, False):
translated[name] = getattr(self, sname)
return translated
def make_url(name_or_url):
"""Given a string or unicode instance, produce a new URL instance.
The given string is parsed according to the RFC 1738 spec. If an
existing URL object is passed, just returns the object.
"""
if isinstance(name_or_url, util.string_types):
return _parse_rfc1738_args(name_or_url)
else:
return name_or_url
def _parse_rfc1738_args(name):
pattern = re.compile(
r"""
(?P<name>[\w\+]+)://
(?:
(?P<username>[^:/]*)
(?::(?P<password>.*))?
@)?
(?:
(?:
\[(?P<ipv6host>[^/]+)\] |
(?P<ipv4host>[^/:]+)
)?
(?::(?P<port>[^/]*))?
)?
(?:/(?P<database>.*))?
""",
re.X,
)
m = pattern.match(name)
if m is not None:
components = m.groupdict()
if components["database"] is not None:
tokens = components["database"].split("?", 2)
components["database"] = tokens[0]
if len(tokens) > 1:
query = {}
for key, value in util.parse_qsl(tokens[1]):
if util.py2k:
key = key.encode("ascii")
if key in query:
query[key] = util.to_list(query[key])
query[key].append(value)
else:
query[key] = value
else:
query = None
else:
query = None
components["query"] = query
if components["username"] is not None:
components["username"] = _rfc_1738_unquote(components["username"])
if components["password"] is not None:
components["password"] = _rfc_1738_unquote(components["password"])
ipv4host = components.pop("ipv4host")
ipv6host = components.pop("ipv6host")
components["host"] = ipv4host or ipv6host
name = components.pop("name")
return URL(name, **components)
else:
raise exc.ArgumentError(
"Could not parse rfc1738 URL from string '%s'" % name
)
def _rfc_1738_quote(text):
return re.sub(r"[:@/]", lambda m: "%%%X" % ord(m.group(0)), text)
def _rfc_1738_unquote(text):
return util.unquote(text)
def _parse_keyvalue_args(name):
m = re.match(r"(\w+)://(.*)", name)
if m is not None:
(name, args) = m.group(1, 2)
opts = dict(util.parse_qsl(args))
return URL(name, *opts)
else:
return None
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/engine/result.py
|
# engine/result.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Define result set constructs including :class:`_engine.ResultProxy`
and :class:`.RowProxy`."""
import collections
import operator
from .. import exc
from .. import util
from ..sql import expression
from ..sql import sqltypes
from ..sql import util as sql_util
# This reconstructor is necessary so that pickles with the C extension or
# without use the same Binary format.
try:
# We need a different reconstructor on the C extension so that we can
# add extra checks that fields have correctly been initialized by
# __setstate__.
from sqlalchemy.cresultproxy import safe_rowproxy_reconstructor
# The extra function embedding is needed so that the
# reconstructor function has the same signature whether or not
# the extension is present.
def rowproxy_reconstructor(cls, state):
return safe_rowproxy_reconstructor(cls, state)
except ImportError:
def rowproxy_reconstructor(cls, state):
obj = cls.__new__(cls)
obj.__setstate__(state)
return obj
try:
from sqlalchemy.cresultproxy import BaseRowProxy
_baserowproxy_usecext = True
except ImportError:
_baserowproxy_usecext = False
class BaseRowProxy(object):
__slots__ = ("_parent", "_row", "_processors", "_keymap")
def __init__(self, parent, row, processors, keymap):
"""RowProxy objects are constructed by ResultProxy objects."""
self._parent = parent
self._row = row
self._processors = processors
self._keymap = keymap
def __reduce__(self):
return (
rowproxy_reconstructor,
(self.__class__, self.__getstate__()),
)
def values(self):
"""Return the values represented by this RowProxy as a list."""
return list(self)
def __iter__(self):
for processor, value in zip(self._processors, self._row):
if processor is None:
yield value
else:
yield processor(value)
def __len__(self):
return len(self._row)
def __getitem__(self, key):
try:
processor, obj, index = self._keymap[key]
except KeyError as err:
processor, obj, index = self._parent._key_fallback(key, err)
except TypeError:
if isinstance(key, slice):
l = []
for processor, value in zip(
self._processors[key], self._row[key]
):
if processor is None:
l.append(value)
else:
l.append(processor(value))
return tuple(l)
else:
raise
if index is None:
raise exc.InvalidRequestError(
"Ambiguous column name '%s' in "
"result set column descriptions" % obj
)
if processor is not None:
return processor(self._row[index])
else:
return self._row[index]
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
util.raise_(AttributeError(e.args[0]), replace_context=e)
class RowProxy(BaseRowProxy):
"""Represent a single result row.
The :class:`.RowProxy` object is retrieved from a database result, from the
:class:`_engine.ResultProxy` object using methods like
:meth:`_engine.ResultProxy.fetchall`.
The :class:`.RowProxy` object seeks to act mostly like a Python named
tuple, but also provides some Python dictionary behaviors at the same time.
.. seealso::
:ref:`coretutorial_selecting` - includes examples of selecting
rows from SELECT statements.
"""
__slots__ = ()
def __contains__(self, key):
return self._parent._has_key(key)
def __getstate__(self):
return {"_parent": self._parent, "_row": tuple(self)}
def __setstate__(self, state):
self._parent = parent = state["_parent"]
self._row = state["_row"]
self._processors = parent._processors
self._keymap = parent._keymap
__hash__ = None
def _op(self, other, op):
return (
op(tuple(self), tuple(other))
if isinstance(other, RowProxy)
else op(tuple(self), other)
)
def __lt__(self, other):
return self._op(other, operator.lt)
def __le__(self, other):
return self._op(other, operator.le)
def __ge__(self, other):
return self._op(other, operator.ge)
def __gt__(self, other):
return self._op(other, operator.gt)
def __eq__(self, other):
return self._op(other, operator.eq)
def __ne__(self, other):
return self._op(other, operator.ne)
def __repr__(self):
return repr(sql_util._repr_row(self))
def has_key(self, key):
"""Return True if this :class:`.RowProxy` contains the given key.
Through the SQLAlchemy 1.x series, the ``__contains__()`` method
of :class:`.RowProxy` also links to :meth:`.RowProxy.has_key`, in that
an expression such as ::
"some_col" in row
Will return True if the row contains a column named ``"some_col"``,
in the way that a Python mapping works.
However, it is planned that the 2.0 series of SQLAlchemy will reverse
this behavior so that ``__contains__()`` will refer to a value being
present in the row, in the way that a Python tuple works.
"""
return self._parent._has_key(key)
def items(self):
"""Return a list of tuples, each tuple containing a key/value pair.
This method is analogous to the Python dictionary ``.items()`` method,
except that it returns a list, not an iterator.
"""
return [(key, self[key]) for key in self.keys()]
def keys(self):
"""Return the list of keys as strings represented by this
:class:`.RowProxy`.
This method is analogous to the Python dictionary ``.keys()`` method,
except that it returns a list, not an iterator.
"""
return self._parent.keys
def iterkeys(self):
"""Return a an iterator against the :meth:`.RowProxy.keys` method.
This method is analogous to the Python-2-only dictionary
``.iterkeys()`` method.
"""
return iter(self._parent.keys)
def itervalues(self):
"""Return a an iterator against the :meth:`.RowProxy.values` method.
This method is analogous to the Python-2-only dictionary
``.itervalues()`` method.
"""
return iter(self)
def values(self):
"""Return the values represented by this :class:`.RowProxy` as a list.
This method is analogous to the Python dictionary ``.values()`` method,
except that it returns a list, not an iterator.
"""
return super(RowProxy, self).values()
try:
# Register RowProxy with Sequence,
# so sequence protocol is implemented
util.collections_abc.Sequence.register(RowProxy)
except ImportError:
pass
class ResultMetaData(object):
"""Handle cursor.description, applying additional info from an execution
context."""
__slots__ = (
"_keymap",
"case_sensitive",
"matched_on_name",
"_processors",
"keys",
"_orig_processors",
)
def __init__(self, parent, cursor_description):
context = parent.context
dialect = context.dialect
self.case_sensitive = dialect.case_sensitive
self.matched_on_name = False
self._orig_processors = None
if context.result_column_struct:
(
result_columns,
cols_are_ordered,
textual_ordered,
) = context.result_column_struct
num_ctx_cols = len(result_columns)
else:
result_columns = (
cols_are_ordered
) = num_ctx_cols = textual_ordered = False
# merge cursor.description with the column info
# present in the compiled structure, if any
raw = self._merge_cursor_description(
context,
cursor_description,
result_columns,
num_ctx_cols,
cols_are_ordered,
textual_ordered,
)
self._keymap = {}
if not _baserowproxy_usecext:
# keymap indexes by integer index: this is only used
# in the pure Python BaseRowProxy.__getitem__
# implementation to avoid an expensive
# isinstance(key, util.int_types) in the most common
# case path
len_raw = len(raw)
self._keymap.update(
[(elem[0], (elem[3], elem[4], elem[0])) for elem in raw]
+ [
(elem[0] - len_raw, (elem[3], elem[4], elem[0]))
for elem in raw
]
)
# processors in key order for certain per-row
# views like __iter__ and slices
self._processors = [elem[3] for elem in raw]
# keymap by primary string...
by_key = dict([(elem[2], (elem[3], elem[4], elem[0])) for elem in raw])
# for compiled SQL constructs, copy additional lookup keys into
# the key lookup map, such as Column objects, labels,
# column keys and other names
if num_ctx_cols:
# if by-primary-string dictionary smaller (or bigger?!) than
# number of columns, assume we have dupes, rewrite
# dupe records with "None" for index which results in
# ambiguous column exception when accessed.
if len(by_key) != num_ctx_cols:
seen = set()
for rec in raw:
key = rec[1]
if key in seen:
# this is an "ambiguous" element, replacing
# the full record in the map
key = key.lower() if not self.case_sensitive else key
by_key[key] = (None, key, None)
seen.add(key)
# copy secondary elements from compiled columns
# into self._keymap, write in the potentially "ambiguous"
# element
self._keymap.update(
[
(obj_elem, by_key[elem[2]])
for elem in raw
if elem[4]
for obj_elem in elem[4]
]
)
# if we did a pure positional match, then reset the
# original "expression element" back to the "unambiguous"
# entry. This is a new behavior in 1.1 which impacts
# TextAsFrom but also straight compiled SQL constructs.
if not self.matched_on_name:
self._keymap.update(
[
(elem[4][0], (elem[3], elem[4], elem[0]))
for elem in raw
if elem[4]
]
)
else:
# no dupes - copy secondary elements from compiled
# columns into self._keymap
self._keymap.update(
[
(obj_elem, (elem[3], elem[4], elem[0]))
for elem in raw
if elem[4]
for obj_elem in elem[4]
]
)
# update keymap with primary string names taking
# precedence
self._keymap.update(by_key)
# update keymap with "translated" names (sqlite-only thing)
if not num_ctx_cols and context._translate_colname:
self._keymap.update(
[(elem[5], self._keymap[elem[2]]) for elem in raw if elem[5]]
)
def _merge_cursor_description(
self,
context,
cursor_description,
result_columns,
num_ctx_cols,
cols_are_ordered,
textual_ordered,
):
"""Merge a cursor.description with compiled result column information.
There are at least four separate strategies used here, selected
depending on the type of SQL construct used to start with.
The most common case is that of the compiled SQL expression construct,
which generated the column names present in the raw SQL string and
which has the identical number of columns as were reported by
cursor.description. In this case, we assume a 1-1 positional mapping
between the entries in cursor.description and the compiled object.
This is also the most performant case as we disregard extracting /
decoding the column names present in cursor.description since we
already have the desired name we generated in the compiled SQL
construct.
The next common case is that of the completely raw string SQL,
such as passed to connection.execute(). In this case we have no
compiled construct to work with, so we extract and decode the
names from cursor.description and index those as the primary
result row target keys.
The remaining fairly common case is that of the textual SQL
that includes at least partial column information; this is when
we use a :class:`.TextAsFrom` construct. This construct may have
unordered or ordered column information. In the ordered case, we
merge the cursor.description and the compiled construct's information
positionally, and warn if there are additional description names
present, however we still decode the names in cursor.description
as we don't have a guarantee that the names in the columns match
on these. In the unordered case, we match names in cursor.description
to that of the compiled construct based on name matching.
In both of these cases, the cursor.description names and the column
expression objects and names are indexed as result row target keys.
The final case is much less common, where we have a compiled
non-textual SQL expression construct, but the number of columns
in cursor.description doesn't match what's in the compiled
construct. We make the guess here that there might be textual
column expressions in the compiled construct that themselves include
a comma in them causing them to split. We do the same name-matching
as with textual non-ordered columns.
The name-matched system of merging is the same as that used by
SQLAlchemy for all cases up through te 0.9 series. Positional
matching for compiled SQL expressions was introduced in 1.0 as a
major performance feature, and positional matching for textual
:class:`.TextAsFrom` objects in 1.1. As name matching is no longer
a common case, it was acceptable to factor it into smaller generator-
oriented methods that are easier to understand, but incur slightly
more performance overhead.
"""
case_sensitive = context.dialect.case_sensitive
if (
num_ctx_cols
and cols_are_ordered
and not textual_ordered
and num_ctx_cols == len(cursor_description)
):
self.keys = [elem[0] for elem in result_columns]
# pure positional 1-1 case; doesn't need to read
# the names from cursor.description
return [
(
idx,
key,
name.lower() if not case_sensitive else name,
context.get_result_processor(
type_, key, cursor_description[idx][1]
),
obj,
None,
)
for idx, (key, name, obj, type_) in enumerate(result_columns)
]
else:
# name-based or text-positional cases, where we need
# to read cursor.description names
if textual_ordered:
# textual positional case
raw_iterator = self._merge_textual_cols_by_position(
context, cursor_description, result_columns
)
elif num_ctx_cols:
# compiled SQL with a mismatch of description cols
# vs. compiled cols, or textual w/ unordered columns
raw_iterator = self._merge_cols_by_name(
context, cursor_description, result_columns
)
else:
# no compiled SQL, just a raw string
raw_iterator = self._merge_cols_by_none(
context, cursor_description
)
return [
(
idx,
colname,
colname,
context.get_result_processor(
mapped_type, colname, coltype
),
obj,
untranslated,
)
for (
idx,
colname,
mapped_type,
coltype,
obj,
untranslated,
) in raw_iterator
]
def _colnames_from_description(self, context, cursor_description):
"""Extract column names and data types from a cursor.description.
Applies unicode decoding, column translation, "normalization",
and case sensitivity rules to the names based on the dialect.
"""
dialect = context.dialect
case_sensitive = dialect.case_sensitive
translate_colname = context._translate_colname
description_decoder = (
dialect._description_decoder
if dialect.description_encoding
else None
)
normalize_name = (
dialect.normalize_name if dialect.requires_name_normalize else None
)
untranslated = None
self.keys = []
for idx, rec in enumerate(cursor_description):
colname = rec[0]
coltype = rec[1]
if description_decoder:
colname = description_decoder(colname)
if translate_colname:
colname, untranslated = translate_colname(colname)
if normalize_name:
colname = normalize_name(colname)
self.keys.append(colname)
if not case_sensitive:
colname = colname.lower()
yield idx, colname, untranslated, coltype
def _merge_textual_cols_by_position(
self, context, cursor_description, result_columns
):
num_ctx_cols = len(result_columns) if result_columns else None
if num_ctx_cols > len(cursor_description):
util.warn(
"Number of columns in textual SQL (%d) is "
"smaller than number of columns requested (%d)"
% (num_ctx_cols, len(cursor_description))
)
seen = set()
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
if idx < num_ctx_cols:
ctx_rec = result_columns[idx]
obj = ctx_rec[2]
mapped_type = ctx_rec[3]
if obj[0] in seen:
raise exc.InvalidRequestError(
"Duplicate column expression requested "
"in textual SQL: %r" % obj[0]
)
seen.add(obj[0])
else:
mapped_type = sqltypes.NULLTYPE
obj = None
yield idx, colname, mapped_type, coltype, obj, untranslated
def _merge_cols_by_name(self, context, cursor_description, result_columns):
dialect = context.dialect
case_sensitive = dialect.case_sensitive
result_map = self._create_result_map(result_columns, case_sensitive)
self.matched_on_name = True
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
try:
ctx_rec = result_map[colname]
except KeyError:
mapped_type = sqltypes.NULLTYPE
obj = None
else:
obj = ctx_rec[1]
mapped_type = ctx_rec[2]
yield idx, colname, mapped_type, coltype, obj, untranslated
def _merge_cols_by_none(self, context, cursor_description):
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
yield idx, colname, sqltypes.NULLTYPE, coltype, None, untranslated
@classmethod
def _create_result_map(cls, result_columns, case_sensitive=True):
d = {}
for elem in result_columns:
key, rec = elem[0], elem[1:]
if not case_sensitive:
key = key.lower()
if key in d:
# conflicting keyname, just double up the list
# of objects. this will cause an "ambiguous name"
# error if an attempt is made by the result set to
# access.
e_name, e_obj, e_type = d[key]
d[key] = e_name, e_obj + rec[1], e_type
else:
d[key] = rec
return d
def _key_fallback(self, key, err, raiseerr=True):
map_ = self._keymap
result = None
if isinstance(key, util.string_types):
result = map_.get(key if self.case_sensitive else key.lower())
# fallback for targeting a ColumnElement to a textual expression
# this is a rare use case which only occurs when matching text()
# or colummn('name') constructs to ColumnElements, or after a
# pickle/unpickle roundtrip
elif isinstance(key, expression.ColumnElement):
if (
key._label
and (key._label if self.case_sensitive else key._label.lower())
in map_
):
result = map_[
key._label if self.case_sensitive else key._label.lower()
]
elif (
hasattr(key, "name")
and (key.name if self.case_sensitive else key.name.lower())
in map_
):
# match is only on name.
result = map_[
key.name if self.case_sensitive else key.name.lower()
]
# search extra hard to make sure this
# isn't a column/label name overlap.
# this check isn't currently available if the row
# was unpickled.
if result is not None and result[1] is not None:
for obj in result[1]:
if key._compare_name_for_result(obj):
break
else:
result = None
if result is None:
if raiseerr:
util.raise_(
exc.NoSuchColumnError(
"Could not locate column in row for column '%s'"
% expression._string_or_unprintable(key)
),
replace_context=err,
)
else:
return None
else:
map_[key] = result
return result
def _has_key(self, key):
if key in self._keymap:
return True
else:
return self._key_fallback(key, None, False) is not None
def _getter(self, key, raiseerr=True):
if key in self._keymap:
processor, obj, index = self._keymap[key]
else:
ret = self._key_fallback(key, None, raiseerr)
if ret is None:
return None
processor, obj, index = ret
if index is None:
util.raise_(
exc.InvalidRequestError(
"Ambiguous column name '%s' in "
"result set column descriptions" % obj
),
from_=None,
)
return operator.itemgetter(index)
def __getstate__(self):
return {
"_pickled_keymap": dict(
(key, index)
for key, (processor, obj, index) in self._keymap.items()
if isinstance(key, util.string_types + util.int_types)
),
"keys": self.keys,
"case_sensitive": self.case_sensitive,
"matched_on_name": self.matched_on_name,
}
def __setstate__(self, state):
# the row has been processed at pickling time so we don't need any
# processor anymore
self._processors = [None for _ in range(len(state["keys"]))]
self._keymap = keymap = {}
for key, index in state["_pickled_keymap"].items():
# not preserving "obj" here, unfortunately our
# proxy comparison fails with the unpickle
keymap[key] = (None, None, index)
self.keys = state["keys"]
self.case_sensitive = state["case_sensitive"]
self.matched_on_name = state["matched_on_name"]
class ResultProxy(object):
"""A facade around a DBAPI cursor object.
Returns database rows via the :class:`.RowProxy` class, which provides
additional API features and behaviors on top of the raw data returned
by the DBAPI.
.. seealso::
:ref:`coretutorial_selecting` - introductory material for accessing
:class:`_engine.ResultProxy` and :class:`.RowProxy` objects.
"""
_process_row = RowProxy
out_parameters = None
_autoclose_connection = False
_metadata = None
_soft_closed = False
closed = False
def __init__(self, context):
self.context = context
self.dialect = context.dialect
self.cursor = self._saved_cursor = context.cursor
self.connection = context.root_connection
self._echo = (
self.connection._echo and context.engine._should_log_debug()
)
self._init_metadata()
def _getter(self, key, raiseerr=True):
try:
getter = self._metadata._getter
except AttributeError as err:
return self._non_result(None, err)
else:
return getter(key, raiseerr)
def _has_key(self, key):
try:
has_key = self._metadata._has_key
except AttributeError as err:
return self._non_result(None, err)
else:
return has_key(key)
def _init_metadata(self):
cursor_description = self._cursor_description()
if cursor_description is not None:
if (
self.context.compiled
and "compiled_cache" in self.context.execution_options
):
if self.context.compiled._cached_metadata:
self._metadata = self.context.compiled._cached_metadata
else:
self._metadata = (
self.context.compiled._cached_metadata
) = ResultMetaData(self, cursor_description)
else:
self._metadata = ResultMetaData(self, cursor_description)
if self._echo:
self.context.engine.logger.debug(
"Col %r", tuple(x[0] for x in cursor_description)
)
def keys(self):
"""Return the list of string keys that would represented by each
:class:`.RowProxy`."""
if self._metadata:
return self._metadata.keys
else:
return []
@util.memoized_property
def rowcount(self):
"""Return the 'rowcount' for this result.
The 'rowcount' reports the number of rows *matched*
by the WHERE criterion of an UPDATE or DELETE statement.
.. note::
Notes regarding :attr:`_engine.ResultProxy.rowcount`:
* This attribute returns the number of rows *matched*,
which is not necessarily the same as the number of rows
that were actually *modified* - an UPDATE statement, for example,
may have no net change on a given row if the SET values
given are the same as those present in the row already.
Such a row would be matched but not modified.
On backends that feature both styles, such as MySQL,
rowcount is configured by default to return the match
count in all cases.
* :attr:`_engine.ResultProxy.rowcount`
is *only* useful in conjunction
with an UPDATE or DELETE statement. Contrary to what the Python
DBAPI says, it does *not* return the
number of rows available from the results of a SELECT statement
as DBAPIs cannot support this functionality when rows are
unbuffered.
* :attr:`_engine.ResultProxy.rowcount`
may not be fully implemented by
all dialects. In particular, most DBAPIs do not support an
aggregate rowcount result from an executemany call.
The :meth:`_engine.ResultProxy.supports_sane_rowcount` and
:meth:`_engine.ResultProxy.supports_sane_multi_rowcount` methods
will report from the dialect if each usage is known to be
supported.
* Statements that use RETURNING may not return a correct
rowcount.
"""
try:
return self.context.rowcount
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
@property
def lastrowid(self):
"""return the 'lastrowid' accessor on the DBAPI cursor.
This is a DBAPI specific method and is only functional
for those backends which support it, for statements
where it is appropriate. It's behavior is not
consistent across backends.
Usage of this method is normally unnecessary when
using insert() expression constructs; the
:attr:`~ResultProxy.inserted_primary_key` attribute provides a
tuple of primary key values for a newly inserted row,
regardless of database backend.
"""
try:
return self._saved_cursor.lastrowid
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self._saved_cursor, self.context
)
@property
def returns_rows(self):
"""True if this :class:`_engine.ResultProxy` returns rows.
I.e. if it is legal to call the methods
:meth:`_engine.ResultProxy.fetchone`,
:meth:`_engine.ResultProxy.fetchmany`
:meth:`_engine.ResultProxy.fetchall`.
"""
return self._metadata is not None
@property
def is_insert(self):
"""True if this :class:`_engine.ResultProxy` is the result
of a executing an expression language compiled
:func:`_expression.insert` construct.
When True, this implies that the
:attr:`inserted_primary_key` attribute is accessible,
assuming the statement did not include
a user defined "returning" construct.
"""
return self.context.isinsert
def _cursor_description(self):
"""May be overridden by subclasses."""
return self._saved_cursor.description
def _soft_close(self):
"""Soft close this :class:`_engine.ResultProxy`.
This releases all DBAPI cursor resources, but leaves the
ResultProxy "open" from a semantic perspective, meaning the
fetchXXX() methods will continue to return empty results.
This method is called automatically when:
* all result rows are exhausted using the fetchXXX() methods.
* cursor.description is None.
This method is **not public**, but is documented in order to clarify
the "autoclose" process used.
.. versionadded:: 1.0.0
.. seealso::
:meth:`_engine.ResultProxy.close`
"""
if self._soft_closed:
return
self._soft_closed = True
cursor = self.cursor
self.connection._safe_close_cursor(cursor)
if self._autoclose_connection:
self.connection.close()
self.cursor = None
def close(self):
"""Close this ResultProxy.
This closes out the underlying DBAPI cursor corresponding
to the statement execution, if one is still present. Note that the
DBAPI cursor is automatically released when the
:class:`_engine.ResultProxy`
exhausts all available rows. :meth:`_engine.ResultProxy.close`
is generally
an optional method except in the case when discarding a
:class:`_engine.ResultProxy`
that still has additional rows pending for fetch.
In the case of a result that is the product of
:ref:`connectionless execution <dbengine_implicit>`,
the underlying :class:`_engine.Connection` object is also closed,
which
:term:`releases` DBAPI connection resources.
After this method is called, it is no longer valid to call upon
the fetch methods, which will raise a :class:`.ResourceClosedError`
on subsequent use.
.. versionchanged:: 1.0.0 - the :meth:`_engine.ResultProxy.close`
method
has been separated out from the process that releases the underlying
DBAPI cursor resource. The "auto close" feature of the
:class:`_engine.Connection` now performs a so-called "soft close",
which
releases the underlying DBAPI cursor, but allows the
:class:`_engine.ResultProxy`
to still behave as an open-but-exhausted
result set; the actual :meth:`_engine.ResultProxy.close`
method is never
called. It is still safe to discard a
:class:`_engine.ResultProxy`
that has been fully exhausted without calling this method.
.. seealso::
:ref:`connections_toplevel`
"""
if not self.closed:
self._soft_close()
self.closed = True
def __iter__(self):
"""Implement iteration protocol."""
while True:
row = self.fetchone()
if row is None:
return
else:
yield row
def __next__(self):
"""Implement the Python next() protocol.
This method, mirrored as both ``.next()`` and ``.__next__()``, is part
of Python's API for producing iterator-like behavior.
.. versionadded:: 1.2
"""
row = self.fetchone()
if row is None:
raise StopIteration()
else:
return row
next = __next__
@util.memoized_property
def inserted_primary_key(self):
"""Return the primary key for the row just inserted.
The return value is a list of scalar values
corresponding to the list of primary key columns
in the target table.
This only applies to single row :func:`_expression.insert`
constructs which did not explicitly specify
:meth:`_expression.Insert.returning`.
Note that primary key columns which specify a
server_default clause,
or otherwise do not qualify as "autoincrement"
columns (see the notes at :class:`_schema.Column`), and were
generated using the database-side default, will
appear in this list as ``None`` unless the backend
supports "returning" and the insert statement executed
with the "implicit returning" enabled.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert:
raise exc.InvalidRequestError(
"Statement is not an insert() " "expression construct."
)
elif self.context._is_explicit_returning:
raise exc.InvalidRequestError(
"Can't call inserted_primary_key "
"when returning() "
"is used."
)
return self.context.inserted_primary_key
def last_updated_params(self):
"""Return the collection of updated parameters from this
execution.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an update() " "expression construct."
)
elif self.context.executemany:
return self.context.compiled_parameters
else:
return self.context.compiled_parameters[0]
def last_inserted_params(self):
"""Return the collection of inserted parameters from this
execution.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert:
raise exc.InvalidRequestError(
"Statement is not an insert() " "expression construct."
)
elif self.context.executemany:
return self.context.compiled_parameters
else:
return self.context.compiled_parameters[0]
@property
def returned_defaults(self):
"""Return the values of default columns that were fetched using
the :meth:`.ValuesBase.return_defaults` feature.
The value is an instance of :class:`.RowProxy`, or ``None``
if :meth:`.ValuesBase.return_defaults` was not used or if the
backend does not support RETURNING.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ValuesBase.return_defaults`
"""
return self.context.returned_defaults
def lastrow_has_defaults(self):
"""Return ``lastrow_has_defaults()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
"""
return self.context.lastrow_has_defaults()
def postfetch_cols(self):
"""Return ``postfetch_cols()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() or update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert and not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an insert() or update() "
"expression construct."
)
return self.context.postfetch_cols
def prefetch_cols(self):
"""Return ``prefetch_cols()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() or update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert and not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an insert() or update() "
"expression construct."
)
return self.context.prefetch_cols
def supports_sane_rowcount(self):
"""Return ``supports_sane_rowcount`` from the dialect.
See :attr:`_engine.ResultProxy.rowcount` for background.
"""
return self.dialect.supports_sane_rowcount
def supports_sane_multi_rowcount(self):
"""Return ``supports_sane_multi_rowcount`` from the dialect.
See :attr:`_engine.ResultProxy.rowcount` for background.
"""
return self.dialect.supports_sane_multi_rowcount
def _fetchone_impl(self):
try:
return self.cursor.fetchone()
except AttributeError as err:
return self._non_result(None, err)
def _fetchmany_impl(self, size=None):
try:
if size is None:
return self.cursor.fetchmany()
else:
return self.cursor.fetchmany(size)
except AttributeError as err:
return self._non_result([], err)
def _fetchall_impl(self):
try:
return self.cursor.fetchall()
except AttributeError as err:
return self._non_result([], err)
def _non_result(self, default, err=None):
if self._metadata is None:
util.raise_(
exc.ResourceClosedError(
"This result object does not return rows. "
"It has been closed automatically."
),
replace_context=err,
)
elif self.closed:
util.raise_(
exc.ResourceClosedError("This result object is closed."),
replace_context=err,
)
else:
return default
def process_rows(self, rows):
process_row = self._process_row
metadata = self._metadata
keymap = metadata._keymap
processors = metadata._processors
if self._echo:
log = self.context.engine.logger.debug
l = []
for row in rows:
log("Row %r", sql_util._repr_row(row))
l.append(process_row(metadata, row, processors, keymap))
return l
else:
return [
process_row(metadata, row, processors, keymap) for row in rows
]
def fetchall(self):
"""Fetch all rows, just like DB-API ``cursor.fetchall()``.
After all rows have been exhausted, the underlying DBAPI
cursor resource is released, and the object may be safely
discarded.
Subsequent calls to :meth:`_engine.ResultProxy.fetchall` will return
an empty list. After the :meth:`_engine.ResultProxy.close` method is
called, the method will raise :class:`.ResourceClosedError`.
:return: a list of :class:`.RowProxy` objects
"""
try:
l = self.process_rows(self._fetchall_impl())
self._soft_close()
return l
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
def fetchmany(self, size=None):
"""Fetch many rows, just like DB-API
``cursor.fetchmany(size=cursor.arraysize)``.
After all rows have been exhausted, the underlying DBAPI
cursor resource is released, and the object may be safely
discarded.
Calls to :meth:`_engine.ResultProxy.fetchmany`
after all rows have been
exhausted will return
an empty list. After the :meth:`_engine.ResultProxy.close` method is
called, the method will raise :class:`.ResourceClosedError`.
:return: a list of :class:`.RowProxy` objects
"""
try:
l = self.process_rows(self._fetchmany_impl(size))
if len(l) == 0:
self._soft_close()
return l
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
def fetchone(self):
"""Fetch one row, just like DB-API ``cursor.fetchone()``.
After all rows have been exhausted, the underlying DBAPI
cursor resource is released, and the object may be safely
discarded.
Calls to :meth:`_engine.ResultProxy.fetchone` after all rows have
been exhausted will return ``None``.
After the :meth:`_engine.ResultProxy.close` method is
called, the method will raise :class:`.ResourceClosedError`.
:return: a :class:`.RowProxy` object, or None if no rows remain
"""
try:
row = self._fetchone_impl()
if row is not None:
return self.process_rows([row])[0]
else:
self._soft_close()
return None
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
def first(self):
"""Fetch the first row and then close the result set unconditionally.
After calling this method, the object is fully closed,
e.g. the :meth:`_engine.ResultProxy.close`
method will have been called.
:return: a :class:`.RowProxy` object, or None if no rows remain
"""
if self._metadata is None:
return self._non_result(None)
try:
row = self._fetchone_impl()
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
try:
if row is not None:
return self.process_rows([row])[0]
else:
return None
finally:
self.close()
def scalar(self):
"""Fetch the first column of the first row, and close the result set.
After calling this method, the object is fully closed,
e.g. the :meth:`_engine.ResultProxy.close`
method will have been called.
:return: a Python scalar value , or None if no rows remain
"""
row = self.first()
if row is not None:
return row[0]
else:
return None
class BufferedRowResultProxy(ResultProxy):
"""A ResultProxy with row buffering behavior.
``ResultProxy`` that buffers the contents of a selection of rows
before ``fetchone()`` is called. This is to allow the results of
``cursor.description`` to be available immediately, when
interfacing with a DB-API that requires rows to be consumed before
this information is available (currently psycopg2, when used with
server-side cursors).
The pre-fetching behavior fetches only one row initially, and then
grows its buffer size by a fixed amount with each successive need
for additional rows up to a size of 1000.
The size argument is configurable using the ``max_row_buffer``
execution option::
with psycopg2_engine.connect() as conn:
result = conn.execution_options(
stream_results=True, max_row_buffer=50
).execute("select * from table")
.. versionadded:: 1.0.6 Added the ``max_row_buffer`` option.
.. seealso::
:ref:`psycopg2_execution_options`
"""
def _init_metadata(self):
self._max_row_buffer = self.context.execution_options.get(
"max_row_buffer", None
)
self.__buffer_rows()
super(BufferedRowResultProxy, self)._init_metadata()
# this is a "growth chart" for the buffering of rows.
# each successive __buffer_rows call will use the next
# value in the list for the buffer size until the max
# is reached
size_growth = {
1: 5,
5: 10,
10: 20,
20: 50,
50: 100,
100: 250,
250: 500,
500: 1000,
}
def __buffer_rows(self):
if self.cursor is None:
return
size = getattr(self, "_bufsize", 1)
self.__rowbuffer = collections.deque(self.cursor.fetchmany(size))
self._bufsize = self.size_growth.get(size, size)
if self._max_row_buffer is not None:
self._bufsize = min(self._max_row_buffer, self._bufsize)
def _soft_close(self, **kw):
self.__rowbuffer.clear()
super(BufferedRowResultProxy, self)._soft_close(**kw)
def _fetchone_impl(self):
if self.cursor is None:
return self._non_result(None)
if not self.__rowbuffer:
self.__buffer_rows()
if not self.__rowbuffer:
return None
return self.__rowbuffer.popleft()
def _fetchmany_impl(self, size=None):
if size is None:
return self._fetchall_impl()
result = []
for x in range(0, size):
row = self._fetchone_impl()
if row is None:
break
result.append(row)
return result
def _fetchall_impl(self):
if self.cursor is None:
return self._non_result([])
self.__rowbuffer.extend(self.cursor.fetchall())
ret = self.__rowbuffer
self.__rowbuffer = collections.deque()
return ret
class FullyBufferedResultProxy(ResultProxy):
"""A result proxy that buffers rows fully upon creation.
Used for operations where a result is to be delivered
after the database conversation can not be continued,
such as MSSQL INSERT...OUTPUT after an autocommit.
"""
def _init_metadata(self):
super(FullyBufferedResultProxy, self)._init_metadata()
self.__rowbuffer = self._buffer_rows()
def _buffer_rows(self):
return collections.deque(self.cursor.fetchall())
def _soft_close(self, **kw):
self.__rowbuffer.clear()
super(FullyBufferedResultProxy, self)._soft_close(**kw)
def _fetchone_impl(self):
if self.__rowbuffer:
return self.__rowbuffer.popleft()
else:
return self._non_result(None)
def _fetchmany_impl(self, size=None):
if size is None:
return self._fetchall_impl()
result = []
for x in range(0, size):
row = self._fetchone_impl()
if row is None:
break
result.append(row)
return result
def _fetchall_impl(self):
if not self.cursor:
return self._non_result([])
ret = self.__rowbuffer
self.__rowbuffer = collections.deque()
return ret
class BufferedColumnRow(RowProxy):
def __init__(self, parent, row, processors, keymap):
# preprocess row
row = list(row)
# this is a tad faster than using enumerate
index = 0
for processor in parent._orig_processors:
if processor is not None:
row[index] = processor(row[index])
index += 1
row = tuple(row)
super(BufferedColumnRow, self).__init__(
parent, row, processors, keymap
)
class BufferedColumnResultProxy(ResultProxy):
"""A ResultProxy with column buffering behavior.
``ResultProxy`` that loads all columns into memory each time
fetchone() is called. If fetchmany() or fetchall() are called,
the full grid of results is fetched. This is to operate with
databases where result rows contain "live" results that fall out
of scope unless explicitly fetched.
.. versionchanged:: 1.2 This :class:`_engine.ResultProxy` is not used by
any SQLAlchemy-included dialects.
"""
_process_row = BufferedColumnRow
def _init_metadata(self):
super(BufferedColumnResultProxy, self)._init_metadata()
metadata = self._metadata
# don't double-replace the processors, in the case
# of a cached ResultMetaData
if metadata._orig_processors is None:
# orig_processors will be used to preprocess each row when
# they are constructed.
metadata._orig_processors = metadata._processors
# replace the all type processors by None processors.
metadata._processors = [None for _ in range(len(metadata.keys))]
keymap = {}
for k, (func, obj, index) in metadata._keymap.items():
keymap[k] = (None, obj, index)
metadata._keymap = keymap
def fetchall(self):
# can't call cursor.fetchall(), since rows must be
# fully processed before requesting more from the DBAPI.
l = []
while True:
row = self.fetchone()
if row is None:
break
l.append(row)
return l
def fetchmany(self, size=None):
# can't call cursor.fetchmany(), since rows must be
# fully processed before requesting more from the DBAPI.
if size is None:
return self.fetchall()
l = []
for i in range(size):
row = self.fetchone()
if row is None:
break
l.append(row)
return l
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/engine/default.py
|
# engine/default.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Default implementations of per-dialect sqlalchemy.engine classes.
These are semi-private implementation classes which are only of importance
to database dialect authors; dialects will usually use the classes here
as the base class for their own corresponding classes.
"""
import codecs
import random
import re
import weakref
from . import interfaces
from . import reflection
from . import result
from .. import event
from .. import exc
from .. import pool
from .. import processors
from .. import types as sqltypes
from .. import util
from ..sql import compiler
from ..sql import expression
from ..sql import schema
from ..sql.elements import quoted_name
AUTOCOMMIT_REGEXP = re.compile(
r"\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER)", re.I | re.UNICODE
)
# When we're handed literal SQL, ensure it's a SELECT query
SERVER_SIDE_CURSOR_RE = re.compile(r"\s*SELECT", re.I | re.UNICODE)
class DefaultDialect(interfaces.Dialect):
"""Default implementation of Dialect"""
statement_compiler = compiler.SQLCompiler
ddl_compiler = compiler.DDLCompiler
type_compiler = compiler.GenericTypeCompiler
preparer = compiler.IdentifierPreparer
supports_alter = True
supports_comments = False
inline_comments = False
# the first value we'd get for an autoincrement
# column.
default_sequence_base = 1
# most DBAPIs happy with this for execute().
# not cx_oracle.
execute_sequence_format = tuple
supports_views = True
supports_sequences = False
sequences_optional = False
preexecute_autoincrement_sequences = False
postfetch_lastrowid = True
implicit_returning = False
supports_right_nested_joins = True
cte_follows_insert = False
supports_native_enum = False
supports_native_boolean = False
non_native_boolean_check_constraint = True
supports_simple_order_by_label = True
tuple_in_values = False
engine_config_types = util.immutabledict(
[
("convert_unicode", util.bool_or_str("force")),
("pool_timeout", util.asint),
("echo", util.bool_or_str("debug")),
("echo_pool", util.bool_or_str("debug")),
("pool_recycle", util.asint),
("pool_size", util.asint),
("max_overflow", util.asint),
("pool_threadlocal", util.asbool),
]
)
# if the NUMERIC type
# returns decimal.Decimal.
# *not* the FLOAT type however.
supports_native_decimal = False
if util.py3k:
supports_unicode_statements = True
supports_unicode_binds = True
returns_unicode_strings = True
description_encoding = None
else:
supports_unicode_statements = False
supports_unicode_binds = False
returns_unicode_strings = False
description_encoding = "use_encoding"
name = "default"
# length at which to truncate
# any identifier.
max_identifier_length = 9999
_user_defined_max_identifier_length = None
# length at which to truncate
# the name of an index.
# Usually None to indicate
# 'use max_identifier_length'.
# thanks to MySQL, sigh
max_index_name_length = None
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
colspecs = {}
default_paramstyle = "named"
supports_default_values = False
supports_empty_insert = True
supports_multivalues_insert = False
supports_is_distinct_from = True
supports_server_side_cursors = False
# extra record-level locking features (#4860)
supports_for_update_of = False
server_version_info = None
construct_arguments = None
"""Optional set of argument specifiers for various SQLAlchemy
constructs, typically schema items.
To implement, establish as a series of tuples, as in::
construct_arguments = [
(schema.Index, {
"using": False,
"where": None,
"ops": None
})
]
If the above construct is established on the PostgreSQL dialect,
the :class:`.Index` construct will now accept the keyword arguments
``postgresql_using``, ``postgresql_where``, nad ``postgresql_ops``.
Any other argument specified to the constructor of :class:`.Index`
which is prefixed with ``postgresql_`` will raise :class:`.ArgumentError`.
A dialect which does not include a ``construct_arguments`` member will
not participate in the argument validation system. For such a dialect,
any argument name is accepted by all participating constructs, within
the namespace of arguments prefixed with that dialect name. The rationale
here is so that third-party dialects that haven't yet implemented this
feature continue to function in the old way.
.. versionadded:: 0.9.2
.. seealso::
:class:`.DialectKWArgs` - implementing base class which consumes
:attr:`.DefaultDialect.construct_arguments`
"""
# indicates symbol names are
# UPPERCASEd if they are case insensitive
# within the database.
# if this is True, the methods normalize_name()
# and denormalize_name() must be provided.
requires_name_normalize = False
reflection_options = ()
dbapi_exception_translation_map = util.immutabledict()
"""mapping used in the extremely unusual case that a DBAPI's
published exceptions don't actually have the __name__ that they
are linked towards.
.. versionadded:: 1.0.5
"""
@util.deprecated_params(
convert_unicode=(
"1.3",
"The :paramref:`_sa.create_engine.convert_unicode` parameter "
"and corresponding dialect-level parameters are deprecated, "
"and will be removed in a future release. Modern DBAPIs support "
"Python Unicode natively and this parameter is unnecessary.",
)
)
def __init__(
self,
convert_unicode=False,
encoding="utf-8",
paramstyle=None,
dbapi=None,
implicit_returning=None,
supports_right_nested_joins=None,
case_sensitive=True,
supports_native_boolean=None,
empty_in_strategy="static",
max_identifier_length=None,
label_length=None,
**kwargs
):
if not getattr(self, "ported_sqla_06", True):
util.warn(
"The %s dialect is not yet ported to the 0.6 format"
% self.name
)
self.convert_unicode = convert_unicode
self.encoding = encoding
self.positional = False
self._ischema = None
self.dbapi = dbapi
if paramstyle is not None:
self.paramstyle = paramstyle
elif self.dbapi is not None:
self.paramstyle = self.dbapi.paramstyle
else:
self.paramstyle = self.default_paramstyle
if implicit_returning is not None:
self.implicit_returning = implicit_returning
self.positional = self.paramstyle in ("qmark", "format", "numeric")
self.identifier_preparer = self.preparer(self)
self.type_compiler = self.type_compiler(self)
if supports_right_nested_joins is not None:
self.supports_right_nested_joins = supports_right_nested_joins
if supports_native_boolean is not None:
self.supports_native_boolean = supports_native_boolean
self.case_sensitive = case_sensitive
self.empty_in_strategy = empty_in_strategy
if empty_in_strategy == "static":
self._use_static_in = True
elif empty_in_strategy in ("dynamic", "dynamic_warn"):
self._use_static_in = False
self._warn_on_empty_in = empty_in_strategy == "dynamic_warn"
else:
raise exc.ArgumentError(
"empty_in_strategy may be 'static', "
"'dynamic', or 'dynamic_warn'"
)
self._user_defined_max_identifier_length = max_identifier_length
if self._user_defined_max_identifier_length:
self.max_identifier_length = (
self._user_defined_max_identifier_length
)
self.label_length = label_length
if self.description_encoding == "use_encoding":
self._description_decoder = (
processors.to_unicode_processor_factory
)(encoding)
elif self.description_encoding is not None:
self._description_decoder = (
processors.to_unicode_processor_factory
)(self.description_encoding)
self._encoder = codecs.getencoder(self.encoding)
self._decoder = processors.to_unicode_processor_factory(self.encoding)
@util.memoized_property
def _type_memos(self):
return weakref.WeakKeyDictionary()
@property
def dialect_description(self):
return self.name + "+" + self.driver
@property
def supports_sane_rowcount_returning(self):
"""True if this dialect supports sane rowcount even if RETURNING is
in use.
For dialects that don't support RETURNING, this is synomous
with supports_sane_rowcount.
"""
return self.supports_sane_rowcount
@classmethod
def get_pool_class(cls, url):
return getattr(cls, "poolclass", pool.QueuePool)
@classmethod
def load_provisioning(cls):
package = ".".join(cls.__module__.split(".")[0:-1])
try:
__import__(package + ".provision")
except ImportError:
pass
def initialize(self, connection):
try:
self.server_version_info = self._get_server_version_info(
connection
)
except NotImplementedError:
self.server_version_info = None
try:
self.default_schema_name = self._get_default_schema_name(
connection
)
except NotImplementedError:
self.default_schema_name = None
try:
self.default_isolation_level = self.get_isolation_level(
connection.connection
)
except NotImplementedError:
self.default_isolation_level = None
self.returns_unicode_strings = self._check_unicode_returns(connection)
if (
self.description_encoding is not None
and self._check_unicode_description(connection)
):
self._description_decoder = self.description_encoding = None
if not self._user_defined_max_identifier_length:
max_ident_length = self._check_max_identifier_length(connection)
if max_ident_length:
self.max_identifier_length = max_ident_length
if (
self.label_length
and self.label_length > self.max_identifier_length
):
raise exc.ArgumentError(
"Label length of %d is greater than this dialect's"
" maximum identifier length of %d"
% (self.label_length, self.max_identifier_length)
)
def on_connect(self):
# inherits the docstring from interfaces.Dialect.on_connect
return None
def _check_max_identifier_length(self, connection):
"""Perform a connection / server version specific check to determine
the max_identifier_length.
If the dialect's class level max_identifier_length should be used,
can return None.
.. versionadded:: 1.3.9
"""
return None
def _check_unicode_returns(self, connection, additional_tests=None):
if util.py2k and not self.supports_unicode_statements:
cast_to = util.binary_type
else:
cast_to = util.text_type
if self.positional:
parameters = self.execute_sequence_format()
else:
parameters = {}
def check_unicode(test):
statement = cast_to(
expression.select([test]).compile(dialect=self)
)
try:
cursor = connection.connection.cursor()
connection._cursor_execute(cursor, statement, parameters)
row = cursor.fetchone()
cursor.close()
except exc.DBAPIError as de:
# note that _cursor_execute() will have closed the cursor
# if an exception is thrown.
util.warn(
"Exception attempting to "
"detect unicode returns: %r" % de
)
return False
else:
return isinstance(row[0], util.text_type)
tests = [
# detect plain VARCHAR
expression.cast(
expression.literal_column("'test plain returns'"),
sqltypes.VARCHAR(60),
),
# detect if there's an NVARCHAR type with different behavior
# available
expression.cast(
expression.literal_column("'test unicode returns'"),
sqltypes.Unicode(60),
),
]
if additional_tests:
tests += additional_tests
results = {check_unicode(test) for test in tests}
if results.issuperset([True, False]):
return "conditional"
else:
return results == {True}
def _check_unicode_description(self, connection):
# all DBAPIs on Py2K return cursor.description as encoded,
# until pypy2.1beta2 with sqlite, so let's just check it -
# it's likely others will start doing this too in Py2k.
if util.py2k and not self.supports_unicode_statements:
cast_to = util.binary_type
else:
cast_to = util.text_type
cursor = connection.connection.cursor()
try:
cursor.execute(
cast_to(
expression.select(
[expression.literal_column("'x'").label("some_label")]
).compile(dialect=self)
)
)
return isinstance(cursor.description[0][0], util.text_type)
finally:
cursor.close()
def type_descriptor(self, typeobj):
"""Provide a database-specific :class:`.TypeEngine` object, given
the generic object which comes from the types module.
This method looks for a dictionary called
``colspecs`` as a class or instance-level variable,
and passes on to :func:`_types.adapt_type`.
"""
return sqltypes.adapt_type(typeobj, self.colspecs)
def reflecttable(
self,
connection,
table,
include_columns,
exclude_columns,
resolve_fks,
**opts
):
insp = reflection.Inspector.from_engine(connection)
return insp.reflecttable(
table, include_columns, exclude_columns, resolve_fks, **opts
)
def get_pk_constraint(self, conn, table_name, schema=None, **kw):
"""Compatibility method, adapts the result of get_primary_keys()
for those dialects which don't implement get_pk_constraint().
"""
return {
"constrained_columns": self.get_primary_keys(
conn, table_name, schema=schema, **kw
)
}
def validate_identifier(self, ident):
if len(ident) > self.max_identifier_length:
raise exc.IdentifierError(
"Identifier '%s' exceeds maximum length of %d characters"
% (ident, self.max_identifier_length)
)
def connect(self, *cargs, **cparams):
# inherits the docstring from interfaces.Dialect.connect
return self.dbapi.connect(*cargs, **cparams)
def create_connect_args(self, url):
# inherits the docstring from interfaces.Dialect.create_connect_args
opts = url.translate_connect_args()
opts.update(url.query)
return [[], opts]
def set_engine_execution_options(self, engine, opts):
if "isolation_level" in opts:
isolation_level = opts["isolation_level"]
@event.listens_for(engine, "engine_connect")
def set_isolation(connection, branch):
if not branch:
self._set_connection_isolation(connection, isolation_level)
if "schema_translate_map" in opts:
getter = schema._schema_getter(opts["schema_translate_map"])
engine.schema_for_object = getter
@event.listens_for(engine, "engine_connect")
def set_schema_translate_map(connection, branch):
connection.schema_for_object = getter
def set_connection_execution_options(self, connection, opts):
if "isolation_level" in opts:
self._set_connection_isolation(connection, opts["isolation_level"])
if "schema_translate_map" in opts:
getter = schema._schema_getter(opts["schema_translate_map"])
connection.schema_for_object = getter
def _set_connection_isolation(self, connection, level):
if connection.in_transaction():
util.warn(
"Connection is already established with a Transaction; "
"setting isolation_level may implicitly rollback or commit "
"the existing transaction, or have no effect until "
"next transaction"
)
self.set_isolation_level(connection.connection, level)
connection.connection._connection_record.finalize_callback.append(
self.reset_isolation_level
)
def do_begin(self, dbapi_connection):
pass
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
@util.memoized_property
def _dialect_specific_select_one(self):
return str(expression.select([1]).compile(dialect=self))
def do_ping(self, dbapi_connection):
cursor = None
try:
cursor = dbapi_connection.cursor()
try:
cursor.execute(self._dialect_specific_select_one)
finally:
cursor.close()
except self.dbapi.Error as err:
if self.is_disconnect(err, dbapi_connection, cursor):
return False
else:
raise
else:
return True
def create_xid(self):
"""Create a random two-phase transaction ID.
This id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). Its format is unspecified.
"""
return "_sa_%032x" % random.randint(0, 2 ** 128)
def do_savepoint(self, connection, name):
connection.execute(expression.SavepointClause(name))
def do_rollback_to_savepoint(self, connection, name):
connection.execute(expression.RollbackToSavepointClause(name))
def do_release_savepoint(self, connection, name):
connection.execute(expression.ReleaseSavepointClause(name))
def do_executemany(self, cursor, statement, parameters, context=None):
cursor.executemany(statement, parameters)
def do_execute(self, cursor, statement, parameters, context=None):
cursor.execute(statement, parameters)
def do_execute_no_params(self, cursor, statement, context=None):
cursor.execute(statement)
def is_disconnect(self, e, connection, cursor):
return False
def reset_isolation_level(self, dbapi_conn):
# default_isolation_level is read from the first connection
# after the initial set of 'isolation_level', if any, so is
# the configured default of this dialect.
self.set_isolation_level(dbapi_conn, self.default_isolation_level)
def normalize_name(self, name):
if name is None:
return None
if util.py2k:
if isinstance(name, str):
name = name.decode(self.encoding)
name_lower = name.lower()
name_upper = name.upper()
if name_upper == name_lower:
# name has no upper/lower conversion, e.g. non-european characters.
# return unchanged
return name
elif name_upper == name and not (
self.identifier_preparer._requires_quotes
)(name_lower):
# name is all uppercase and doesn't require quoting; normalize
# to all lower case
return name_lower
elif name_lower == name:
# name is all lower case, which if denormalized means we need to
# force quoting on it
return quoted_name(name, quote=True)
else:
# name is mixed case, means it will be quoted in SQL when used
# later, no normalizes
return name
def denormalize_name(self, name):
if name is None:
return None
name_lower = name.lower()
name_upper = name.upper()
if name_upper == name_lower:
# name has no upper/lower conversion, e.g. non-european characters.
# return unchanged
return name
elif name_lower == name and not (
self.identifier_preparer._requires_quotes
)(name_lower):
name = name_upper
if util.py2k:
if not self.supports_unicode_binds:
name = name.encode(self.encoding)
else:
name = unicode(name) # noqa
return name
class _RendersLiteral(object):
def literal_processor(self, dialect):
def process(value):
return "'%s'" % value
return process
class _StrDateTime(_RendersLiteral, sqltypes.DateTime):
pass
class _StrDate(_RendersLiteral, sqltypes.Date):
pass
class _StrTime(_RendersLiteral, sqltypes.Time):
pass
class StrCompileDialect(DefaultDialect):
statement_compiler = compiler.StrSQLCompiler
ddl_compiler = compiler.DDLCompiler
type_compiler = compiler.StrSQLTypeCompiler
preparer = compiler.IdentifierPreparer
supports_sequences = True
sequences_optional = True
preexecute_autoincrement_sequences = False
implicit_returning = False
supports_native_boolean = True
supports_simple_order_by_label = True
colspecs = {
sqltypes.DateTime: _StrDateTime,
sqltypes.Date: _StrDate,
sqltypes.Time: _StrTime,
}
class DefaultExecutionContext(interfaces.ExecutionContext):
isinsert = False
isupdate = False
isdelete = False
is_crud = False
is_text = False
isddl = False
executemany = False
compiled = None
statement = None
result_column_struct = None
returned_defaults = None
_is_implicit_returning = False
_is_explicit_returning = False
# a hook for SQLite's translation of
# result column names
_translate_colname = None
_expanded_parameters = util.immutabledict()
@classmethod
def _init_ddl(cls, dialect, connection, dbapi_connection, compiled_ddl):
"""Initialize execution context for a DDLElement construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.compiled = compiled = compiled_ddl
self.isddl = True
self.execution_options = compiled.execution_options
if connection._execution_options:
self.execution_options = dict(self.execution_options)
self.execution_options.update(connection._execution_options)
if not dialect.supports_unicode_statements:
self.unicode_statement = util.text_type(compiled)
self.statement = dialect._encoder(self.unicode_statement)[0]
else:
self.statement = self.unicode_statement = util.text_type(compiled)
self.cursor = self.create_cursor()
self.compiled_parameters = []
if dialect.positional:
self.parameters = [dialect.execute_sequence_format()]
else:
self.parameters = [{}]
return self
@classmethod
def _init_compiled(
cls, dialect, connection, dbapi_connection, compiled, parameters
):
"""Initialize execution context for a Compiled construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.compiled = compiled
# this should be caught in the engine before
# we get here
assert compiled.can_execute
self.execution_options = compiled.execution_options.union(
connection._execution_options
)
self.result_column_struct = (
compiled._result_columns,
compiled._ordered_columns,
compiled._textual_ordered_columns,
)
self.unicode_statement = util.text_type(compiled)
if not dialect.supports_unicode_statements:
self.statement = self.unicode_statement.encode(
self.dialect.encoding
)
else:
self.statement = self.unicode_statement
self.isinsert = compiled.isinsert
self.isupdate = compiled.isupdate
self.isdelete = compiled.isdelete
self.is_text = compiled.isplaintext
if not parameters:
self.compiled_parameters = [compiled.construct_params()]
else:
self.compiled_parameters = [
compiled.construct_params(m, _group_number=grp)
for grp, m in enumerate(parameters)
]
self.executemany = len(parameters) > 1
self.cursor = self.create_cursor()
if self.isinsert or self.isupdate or self.isdelete:
self.is_crud = True
self._is_explicit_returning = bool(compiled.statement._returning)
self._is_implicit_returning = bool(
compiled.returning and not compiled.statement._returning
)
if self.compiled.insert_prefetch or self.compiled.update_prefetch:
if self.executemany:
self._process_executemany_defaults()
else:
self._process_executesingle_defaults()
processors = compiled._bind_processors
if compiled.contains_expanding_parameters:
# copy processors for this case as they will be mutated
processors = dict(processors)
positiontup = self._expand_in_parameters(compiled, processors)
elif compiled.positional:
positiontup = self.compiled.positiontup
# Convert the dictionary of bind parameter values
# into a dict or list to be sent to the DBAPI's
# execute() or executemany() method.
parameters = []
if compiled.positional:
for compiled_params in self.compiled_parameters:
param = []
for key in positiontup:
if key in processors:
param.append(processors[key](compiled_params[key]))
else:
param.append(compiled_params[key])
parameters.append(dialect.execute_sequence_format(param))
else:
encode = not dialect.supports_unicode_statements
for compiled_params in self.compiled_parameters:
if encode:
param = dict(
(
dialect._encoder(key)[0],
processors[key](compiled_params[key])
if key in processors
else compiled_params[key],
)
for key in compiled_params
)
else:
param = dict(
(
key,
processors[key](compiled_params[key])
if key in processors
else compiled_params[key],
)
for key in compiled_params
)
parameters.append(param)
self.parameters = dialect.execute_sequence_format(parameters)
return self
def _expand_in_parameters(self, compiled, processors):
"""handle special 'expanding' parameters, IN tuples that are rendered
on a per-parameter basis for an otherwise fixed SQL statement string.
"""
if self.executemany:
raise exc.InvalidRequestError(
"'expanding' parameters can't be used with " "executemany()"
)
if self.compiled.positional and self.compiled._numeric_binds:
# I'm not familiar with any DBAPI that uses 'numeric'
raise NotImplementedError(
"'expanding' bind parameters not supported with "
"'numeric' paramstyle at this time."
)
self._expanded_parameters = {}
compiled_params = self.compiled_parameters[0]
if compiled.positional:
positiontup = []
else:
positiontup = None
replacement_expressions = {}
to_update_sets = {}
for name in (
self.compiled.positiontup
if compiled.positional
else self.compiled.binds
):
parameter = self.compiled.binds[name]
if parameter.expanding:
if name in replacement_expressions:
to_update = to_update_sets[name]
else:
# we are removing the parameter from compiled_params
# because it is a list value, which is not expected by
# TypeEngine objects that would otherwise be asked to
# process it. the single name is being replaced with
# individual numbered parameters for each value in the
# param.
values = compiled_params.pop(name)
if not values:
to_update = to_update_sets[name] = []
replacement_expressions[
name
] = self.compiled.visit_empty_set_expr(
parameter._expanding_in_types
if parameter._expanding_in_types
else [parameter.type]
)
elif isinstance(values[0], (tuple, list)):
to_update = to_update_sets[name] = [
("%s_%s_%s" % (name, i, j), value)
for i, tuple_element in enumerate(values, 1)
for j, value in enumerate(tuple_element, 1)
]
replacement_expressions[name] = (
"VALUES " if self.dialect.tuple_in_values else ""
) + ", ".join(
"(%s)"
% ", ".join(
self.compiled.bindtemplate
% {
"name": to_update[
i * len(tuple_element) + j
][0]
}
for j, value in enumerate(tuple_element)
)
for i, tuple_element in enumerate(values)
)
else:
to_update = to_update_sets[name] = [
("%s_%s" % (name, i), value)
for i, value in enumerate(values, 1)
]
replacement_expressions[name] = ", ".join(
self.compiled.bindtemplate % {"name": key}
for key, value in to_update
)
compiled_params.update(to_update)
processors.update(
(key, processors[name])
for key, value in to_update
if name in processors
)
if compiled.positional:
positiontup.extend(name for name, value in to_update)
self._expanded_parameters[name] = [
expand_key for expand_key, value in to_update
]
elif compiled.positional:
positiontup.append(name)
def process_expanding(m):
return replacement_expressions[m.group(1)]
self.statement = re.sub(
r"\[EXPANDING_(\S+)\]", process_expanding, self.statement
)
return positiontup
@classmethod
def _init_statement(
cls, dialect, connection, dbapi_connection, statement, parameters
):
"""Initialize execution context for a string SQL statement."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.is_text = True
# plain text statement
self.execution_options = connection._execution_options
if not parameters:
if self.dialect.positional:
self.parameters = [dialect.execute_sequence_format()]
else:
self.parameters = [{}]
elif isinstance(parameters[0], dialect.execute_sequence_format):
self.parameters = parameters
elif isinstance(parameters[0], dict):
if dialect.supports_unicode_statements:
self.parameters = parameters
else:
self.parameters = [
{dialect._encoder(k)[0]: d[k] for k in d}
for d in parameters
] or [{}]
else:
self.parameters = [
dialect.execute_sequence_format(p) for p in parameters
]
self.executemany = len(parameters) > 1
if not dialect.supports_unicode_statements and isinstance(
statement, util.text_type
):
self.unicode_statement = statement
self.statement = dialect._encoder(statement)[0]
else:
self.statement = self.unicode_statement = statement
self.cursor = self.create_cursor()
return self
@classmethod
def _init_default(cls, dialect, connection, dbapi_connection):
"""Initialize execution context for a ColumnDefault construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.execution_options = connection._execution_options
self.cursor = self.create_cursor()
return self
@util.memoized_property
def engine(self):
return self.root_connection.engine
@util.memoized_property
def postfetch_cols(self):
return self.compiled.postfetch
@util.memoized_property
def prefetch_cols(self):
if self.isinsert:
return self.compiled.insert_prefetch
elif self.isupdate:
return self.compiled.update_prefetch
else:
return ()
@util.memoized_property
def returning_cols(self):
self.compiled.returning
@util.memoized_property
def no_parameters(self):
return self.execution_options.get("no_parameters", False)
@util.memoized_property
def should_autocommit(self):
autocommit = self.execution_options.get(
"autocommit",
not self.compiled
and self.statement
and expression.PARSE_AUTOCOMMIT
or False,
)
if autocommit is expression.PARSE_AUTOCOMMIT:
return self.should_autocommit_text(self.unicode_statement)
else:
return autocommit
def _execute_scalar(self, stmt, type_):
"""Execute a string statement on the current cursor, returning a
scalar result.
Used to fire off sequences, default phrases, and "select lastrowid"
types of statements individually or in the context of a parent INSERT
or UPDATE statement.
"""
conn = self.root_connection
if (
isinstance(stmt, util.text_type)
and not self.dialect.supports_unicode_statements
):
stmt = self.dialect._encoder(stmt)[0]
if self.dialect.positional:
default_params = self.dialect.execute_sequence_format()
else:
default_params = {}
conn._cursor_execute(self.cursor, stmt, default_params, context=self)
r = self.cursor.fetchone()[0]
if type_ is not None:
# apply type post processors to the result
proc = type_._cached_result_processor(
self.dialect, self.cursor.description[0][1]
)
if proc:
return proc(r)
return r
@property
def connection(self):
return self.root_connection._branch()
def should_autocommit_text(self, statement):
return AUTOCOMMIT_REGEXP.match(statement)
def _use_server_side_cursor(self):
if not self.dialect.supports_server_side_cursors:
return False
if self.dialect.server_side_cursors:
use_server_side = self.execution_options.get(
"stream_results", True
) and (
(
self.compiled
and isinstance(
self.compiled.statement, expression.Selectable
)
or (
(
not self.compiled
or isinstance(
self.compiled.statement, expression.TextClause
)
)
and self.statement
and SERVER_SIDE_CURSOR_RE.match(self.statement)
)
)
)
else:
use_server_side = self.execution_options.get(
"stream_results", False
)
return use_server_side
def create_cursor(self):
if self._use_server_side_cursor():
self._is_server_side = True
return self.create_server_side_cursor()
else:
self._is_server_side = False
return self._dbapi_connection.cursor()
def create_server_side_cursor(self):
raise NotImplementedError()
def pre_exec(self):
pass
def post_exec(self):
pass
def get_result_processor(self, type_, colname, coltype):
"""Return a 'result processor' for a given type as present in
cursor.description.
This has a default implementation that dialects can override
for context-sensitive result type handling.
"""
return type_._cached_result_processor(self.dialect, coltype)
def get_lastrowid(self):
"""return self.cursor.lastrowid, or equivalent, after an INSERT.
This may involve calling special cursor functions,
issuing a new SELECT on the cursor (or a new one),
or returning a stored value that was
calculated within post_exec().
This function will only be called for dialects
which support "implicit" primary key generation,
keep preexecute_autoincrement_sequences set to False,
and when no explicit id value was bound to the
statement.
The function is called once, directly after
post_exec() and before the transaction is committed
or ResultProxy is generated. If the post_exec()
method assigns a value to `self._lastrowid`, the
value is used in place of calling get_lastrowid().
Note that this method is *not* equivalent to the
``lastrowid`` method on ``ResultProxy``, which is a
direct proxy to the DBAPI ``lastrowid`` accessor
in all cases.
"""
return self.cursor.lastrowid
def handle_dbapi_exception(self, e):
pass
def get_result_proxy(self):
if self._is_server_side:
return result.BufferedRowResultProxy(self)
else:
return result.ResultProxy(self)
@property
def rowcount(self):
return self.cursor.rowcount
def supports_sane_rowcount(self):
return self.dialect.supports_sane_rowcount
def supports_sane_multi_rowcount(self):
return self.dialect.supports_sane_multi_rowcount
def _setup_crud_result_proxy(self):
if self.isinsert and not self.executemany:
if (
not self._is_implicit_returning
and not self.compiled.inline
and self.dialect.postfetch_lastrowid
):
self._setup_ins_pk_from_lastrowid()
elif not self._is_implicit_returning:
self._setup_ins_pk_from_empty()
result = self.get_result_proxy()
if self.isinsert:
if self._is_implicit_returning:
row = result.fetchone()
self.returned_defaults = row
self._setup_ins_pk_from_implicit_returning(row)
result._soft_close()
result._metadata = None
elif not self._is_explicit_returning:
result._soft_close()
result._metadata = None
elif self.isupdate and self._is_implicit_returning:
row = result.fetchone()
self.returned_defaults = row
result._soft_close()
result._metadata = None
elif result._metadata is None:
# no results, get rowcount
# (which requires open cursor on some drivers
# such as kintersbasdb, mxodbc)
result.rowcount
result._soft_close()
return result
def _setup_ins_pk_from_lastrowid(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
table = self.compiled.statement.table
compiled_params = self.compiled_parameters[0]
lastrowid = self.get_lastrowid()
if lastrowid is not None:
autoinc_col = table._autoincrement_column
if autoinc_col is not None:
# apply type post processors to the lastrowid
proc = autoinc_col.type._cached_result_processor(
self.dialect, None
)
if proc is not None:
lastrowid = proc(lastrowid)
self.inserted_primary_key = [
lastrowid
if c is autoinc_col
else compiled_params.get(key_getter(c), None)
for c in table.primary_key
]
else:
# don't have a usable lastrowid, so
# do the same as _setup_ins_pk_from_empty
self.inserted_primary_key = [
compiled_params.get(key_getter(c), None)
for c in table.primary_key
]
def _setup_ins_pk_from_empty(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
table = self.compiled.statement.table
compiled_params = self.compiled_parameters[0]
self.inserted_primary_key = [
compiled_params.get(key_getter(c), None) for c in table.primary_key
]
def _setup_ins_pk_from_implicit_returning(self, row):
if row is None:
self.inserted_primary_key = None
return
key_getter = self.compiled._key_getters_for_crud_column[2]
table = self.compiled.statement.table
compiled_params = self.compiled_parameters[0]
self.inserted_primary_key = [
row[col] if value is None else value
for col, value in [
(col, compiled_params.get(key_getter(col), None))
for col in table.primary_key
]
]
def lastrow_has_defaults(self):
return (self.isinsert or self.isupdate) and bool(
self.compiled.postfetch
)
def set_input_sizes(
self, translate=None, include_types=None, exclude_types=None
):
"""Given a cursor and ClauseParameters, call the appropriate
style of ``setinputsizes()`` on the cursor, using DB-API types
from the bind parameter's ``TypeEngine`` objects.
This method only called by those dialects which require it,
currently cx_oracle.
"""
if not hasattr(self.compiled, "bind_names"):
return
inputsizes = {}
for bindparam in self.compiled.bind_names:
dialect_impl = bindparam.type._unwrapped_dialect_impl(self.dialect)
dialect_impl_cls = type(dialect_impl)
dbtype = dialect_impl.get_dbapi_type(self.dialect.dbapi)
if (
dbtype is not None
and (
not exclude_types
or dbtype not in exclude_types
and dialect_impl_cls not in exclude_types
)
and (
not include_types
or dbtype in include_types
or dialect_impl_cls in include_types
)
):
inputsizes[bindparam] = dbtype
else:
inputsizes[bindparam] = None
if self.dialect._has_events:
self.dialect.dispatch.do_setinputsizes(
inputsizes, self.cursor, self.statement, self.parameters, self
)
if self.dialect.positional:
positional_inputsizes = []
for key in self.compiled.positiontup:
bindparam = self.compiled.binds[key]
dbtype = inputsizes.get(bindparam, None)
if dbtype is not None:
if key in self._expanded_parameters:
positional_inputsizes.extend(
[dbtype] * len(self._expanded_parameters[key])
)
else:
positional_inputsizes.append(dbtype)
try:
self.cursor.setinputsizes(*positional_inputsizes)
except BaseException as e:
self.root_connection._handle_dbapi_exception(
e, None, None, None, self
)
else:
keyword_inputsizes = {}
for bindparam, key in self.compiled.bind_names.items():
dbtype = inputsizes.get(bindparam, None)
if dbtype is not None:
if translate:
# TODO: this part won't work w/ the
# expanded_parameters feature, e.g. for cx_oracle
# quoted bound names
key = translate.get(key, key)
if not self.dialect.supports_unicode_binds:
key = self.dialect._encoder(key)[0]
if key in self._expanded_parameters:
keyword_inputsizes.update(
(expand_key, dbtype)
for expand_key in self._expanded_parameters[key]
)
else:
keyword_inputsizes[key] = dbtype
try:
self.cursor.setinputsizes(**keyword_inputsizes)
except BaseException as e:
self.root_connection._handle_dbapi_exception(
e, None, None, None, self
)
def _exec_default(self, column, default, type_):
if default.is_sequence:
return self.fire_sequence(default, type_)
elif default.is_callable:
self.current_column = column
return default.arg(self)
elif default.is_clause_element:
# TODO: expensive branching here should be
# pulled into _exec_scalar()
conn = self.connection
if not default._arg_is_typed:
default_arg = expression.type_coerce(default.arg, type_)
else:
default_arg = default.arg
c = expression.select([default_arg]).compile(bind=conn)
return conn._execute_compiled(c, (), {}).scalar()
else:
return default.arg
current_parameters = None
"""A dictionary of parameters applied to the current row.
This attribute is only available in the context of a user-defined default
generation function, e.g. as described at :ref:`context_default_functions`.
It consists of a dictionary which includes entries for each column/value
pair that is to be part of the INSERT or UPDATE statement. The keys of the
dictionary will be the key value of each :class:`_schema.Column`,
which is usually
synonymous with the name.
Note that the :attr:`.DefaultExecutionContext.current_parameters` attribute
does not accommodate for the "multi-values" feature of the
:meth:`_expression.Insert.values` method. The
:meth:`.DefaultExecutionContext.get_current_parameters` method should be
preferred.
.. seealso::
:meth:`.DefaultExecutionContext.get_current_parameters`
:ref:`context_default_functions`
"""
def get_current_parameters(self, isolate_multiinsert_groups=True):
"""Return a dictionary of parameters applied to the current row.
This method can only be used in the context of a user-defined default
generation function, e.g. as described at
:ref:`context_default_functions`. When invoked, a dictionary is
returned which includes entries for each column/value pair that is part
of the INSERT or UPDATE statement. The keys of the dictionary will be
the key value of each :class:`_schema.Column`,
which is usually synonymous
with the name.
:param isolate_multiinsert_groups=True: indicates that multi-valued
INSERT constructs created using :meth:`_expression.Insert.values`
should be
handled by returning only the subset of parameters that are local
to the current column default invocation. When ``False``, the
raw parameters of the statement are returned including the
naming convention used in the case of multi-valued INSERT.
.. versionadded:: 1.2 added
:meth:`.DefaultExecutionContext.get_current_parameters`
which provides more functionality over the existing
:attr:`.DefaultExecutionContext.current_parameters`
attribute.
.. seealso::
:attr:`.DefaultExecutionContext.current_parameters`
:ref:`context_default_functions`
"""
try:
parameters = self.current_parameters
column = self.current_column
except AttributeError:
raise exc.InvalidRequestError(
"get_current_parameters() can only be invoked in the "
"context of a Python side column default function"
)
if (
isolate_multiinsert_groups
and self.isinsert
and self.compiled.statement._has_multi_parameters
):
if column._is_multiparam_column:
index = column.index + 1
d = {column.original.key: parameters[column.key]}
else:
d = {column.key: parameters[column.key]}
index = 0
keys = self.compiled.statement.parameters[0].keys()
d.update(
(key, parameters["%s_m%d" % (key, index)]) for key in keys
)
return d
else:
return parameters
def get_insert_default(self, column):
if column.default is None:
return None
else:
return self._exec_default(column, column.default, column.type)
def get_update_default(self, column):
if column.onupdate is None:
return None
else:
return self._exec_default(column, column.onupdate, column.type)
def _process_executemany_defaults(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
scalar_defaults = {}
insert_prefetch = self.compiled.insert_prefetch
update_prefetch = self.compiled.update_prefetch
# pre-determine scalar Python-side defaults
# to avoid many calls of get_insert_default()/
# get_update_default()
for c in insert_prefetch:
if c.default and c.default.is_scalar:
scalar_defaults[c] = c.default.arg
for c in update_prefetch:
if c.onupdate and c.onupdate.is_scalar:
scalar_defaults[c] = c.onupdate.arg
for param in self.compiled_parameters:
self.current_parameters = param
for c in insert_prefetch:
if c in scalar_defaults:
val = scalar_defaults[c]
else:
val = self.get_insert_default(c)
if val is not None:
param[key_getter(c)] = val
for c in update_prefetch:
if c in scalar_defaults:
val = scalar_defaults[c]
else:
val = self.get_update_default(c)
if val is not None:
param[key_getter(c)] = val
del self.current_parameters
def _process_executesingle_defaults(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
self.current_parameters = (
compiled_parameters
) = self.compiled_parameters[0]
for c in self.compiled.insert_prefetch:
if c.default and not c.default.is_sequence and c.default.is_scalar:
val = c.default.arg
else:
val = self.get_insert_default(c)
if val is not None:
compiled_parameters[key_getter(c)] = val
for c in self.compiled.update_prefetch:
val = self.get_update_default(c)
if val is not None:
compiled_parameters[key_getter(c)] = val
del self.current_parameters
DefaultDialect.execution_ctx_cls = DefaultExecutionContext
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/engine/base.py
|
# engine/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import with_statement
import contextlib
import sys
from .interfaces import Connectable
from .interfaces import ExceptionContext
from .util import _distill_params
from .. import exc
from .. import interfaces
from .. import log
from .. import util
from ..sql import schema
from ..sql import util as sql_util
"""Defines :class:`_engine.Connection` and :class:`_engine.Engine`.
"""
class Connection(Connectable):
"""Provides high-level functionality for a wrapped DB-API connection.
Provides execution support for string-based SQL statements as well as
:class:`_expression.ClauseElement`, :class:`.Compiled` and
:class:`.DefaultGenerator`
objects. Provides a :meth:`begin` method to return :class:`.Transaction`
objects.
The Connection object is **not** thread-safe. While a Connection can be
shared among threads using properly synchronized access, it is still
possible that the underlying DBAPI connection may not support shared
access between threads. Check the DBAPI documentation for details.
The Connection object represents a single dbapi connection checked out
from the connection pool. In this state, the connection pool has no affect
upon the connection, including its expiration or timeout state. For the
connection pool to properly manage connections, connections should be
returned to the connection pool (i.e. ``connection.close()``) whenever the
connection is not in use.
.. index::
single: thread safety; Connection
"""
schema_for_object = schema._schema_getter(None)
"""Return the ".schema" attribute for an object.
Used for :class:`_schema.Table`, :class:`.Sequence` and similar objects,
and takes into account
the :paramref:`.Connection.execution_options.schema_translate_map`
parameter.
.. versionadded:: 1.1
.. seealso::
:ref:`schema_translating`
"""
def __init__(
self,
engine,
connection=None,
close_with_result=False,
_branch_from=None,
_execution_options=None,
_dispatch=None,
_has_events=None,
):
"""Construct a new Connection.
The constructor here is not public and is only called only by an
:class:`_engine.Engine`. See :meth:`_engine.Engine.connect` and
:meth:`_engine.Engine.contextual_connect` methods.
"""
self.engine = engine
self.dialect = engine.dialect
self.__branch_from = _branch_from
self.__branch = _branch_from is not None
if _branch_from:
self.__connection = connection
self._execution_options = _execution_options
self._echo = _branch_from._echo
self.should_close_with_result = False
self.dispatch = _dispatch
self._has_events = _branch_from._has_events
self.schema_for_object = _branch_from.schema_for_object
else:
self.__connection = (
connection
if connection is not None
else engine.raw_connection()
)
self.__transaction = None
self.__savepoint_seq = 0
self.should_close_with_result = close_with_result
self.__invalid = False
self.__can_reconnect = True
self._echo = self.engine._should_log_info()
if _has_events is None:
# if _has_events is sent explicitly as False,
# then don't join the dispatch of the engine; we don't
# want to handle any of the engine's events in that case.
self.dispatch = self.dispatch._join(engine.dispatch)
self._has_events = _has_events or (
_has_events is None and engine._has_events
)
assert not _execution_options
self._execution_options = engine._execution_options
if self._has_events or self.engine._has_events:
self.dispatch.engine_connect(self, self.__branch)
def _branch(self):
"""Return a new Connection which references this Connection's
engine and connection; but does not have close_with_result enabled,
and also whose close() method does nothing.
The Core uses this very sparingly, only in the case of
custom SQL default functions that are to be INSERTed as the
primary key of a row where we need to get the value back, so we have
to invoke it distinctly - this is a very uncommon case.
Userland code accesses _branch() when the connect() or
contextual_connect() methods are called. The branched connection
acts as much as possible like the parent, except that it stays
connected when a close() event occurs.
"""
if self.__branch_from:
return self.__branch_from._branch()
else:
return self.engine._connection_cls(
self.engine,
self.__connection,
_branch_from=self,
_execution_options=self._execution_options,
_has_events=self._has_events,
_dispatch=self.dispatch,
)
@property
def _root(self):
"""return the 'root' connection.
Returns 'self' if this connection is not a branch, else
returns the root connection from which we ultimately branched.
"""
if self.__branch_from:
return self.__branch_from
else:
return self
def _clone(self):
"""Create a shallow copy of this Connection.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
return c
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
def execution_options(self, **opt):
r""" Set non-SQL options for the connection which take effect
during execution.
The method returns a copy of this :class:`_engine.Connection`
which references
the same underlying DBAPI connection, but also defines the given
execution options which will take effect for a call to
:meth:`execute`. As the new :class:`_engine.Connection`
references the same
underlying resource, it's usually a good idea to ensure that the copies
will be discarded immediately, which is implicit if used as in::
result = connection.execution_options(stream_results=True).\
execute(stmt)
Note that any key/value can be passed to
:meth:`_engine.Connection.execution_options`,
and it will be stored in the
``_execution_options`` dictionary of the :class:`_engine.Connection`.
It
is suitable for usage by end-user schemes to communicate with
event listeners, for example.
The keywords that are currently recognized by SQLAlchemy itself
include all those listed under :meth:`.Executable.execution_options`,
as well as others that are specific to :class:`_engine.Connection`.
:param autocommit: Available on: Connection, statement.
When True, a COMMIT will be invoked after execution
when executed in 'autocommit' mode, i.e. when an explicit
transaction is not begun on the connection. Note that DBAPI
connections by default are always in a transaction - SQLAlchemy uses
rules applied to different kinds of statements to determine if
COMMIT will be invoked in order to provide its "autocommit" feature.
Typically, all INSERT/UPDATE/DELETE statements as well as
CREATE/DROP statements have autocommit behavior enabled; SELECT
constructs do not. Use this option when invoking a SELECT or other
specific SQL construct where COMMIT is desired (typically when
calling stored procedures and such), and an explicit
transaction is not in progress.
:param compiled_cache: Available on: Connection.
A dictionary where :class:`.Compiled` objects
will be cached when the :class:`_engine.Connection`
compiles a clause
expression into a :class:`.Compiled` object.
It is the user's responsibility to
manage the size of this dictionary, which will have keys
corresponding to the dialect, clause element, the column
names within the VALUES or SET clause of an INSERT or UPDATE,
as well as the "batch" mode for an INSERT or UPDATE statement.
The format of this dictionary is not guaranteed to stay the
same in future releases.
Note that the ORM makes use of its own "compiled" caches for
some operations, including flush operations. The caching
used by the ORM internally supersedes a cache dictionary
specified here.
:param isolation_level: Available on: :class:`_engine.Connection`.
Set the transaction isolation level for the lifespan of this
:class:`_engine.Connection` object.
Valid values include those string
values accepted by the :paramref:`_sa.create_engine.isolation_level`
parameter passed to :func:`_sa.create_engine`. These levels are
semi-database specific; see individual dialect documentation for
valid levels.
The isolation level option applies the isolation level by emitting
statements on the DBAPI connection, and **necessarily affects the
original Connection object overall**, not just the copy that is
returned by the call to :meth:`_engine.Connection.execution_options`
method. The isolation level will remain at the given setting until
the DBAPI connection itself is returned to the connection pool, i.e.
the :meth:`_engine.Connection.close` method on the original
:class:`_engine.Connection` is called,
where an event handler will emit
additional statements on the DBAPI connection in order to revert the
isolation level change.
.. warning:: The ``isolation_level`` execution option should
**not** be used when a transaction is already established, that
is, the :meth:`_engine.Connection.begin`
method or similar has been
called. A database cannot change the isolation level on a
transaction in progress, and different DBAPIs and/or
SQLAlchemy dialects may implicitly roll back or commit
the transaction, or not affect the connection at all.
.. note:: The ``isolation_level`` execution option is implicitly
reset if the :class:`_engine.Connection` is invalidated, e.g. via
the :meth:`_engine.Connection.invalidate` method, or if a
disconnection error occurs. The new connection produced after
the invalidation will not have the isolation level re-applied
to it automatically.
.. seealso::
:paramref:`_sa.create_engine.isolation_level`
- set per :class:`_engine.Engine` isolation level
:meth:`_engine.Connection.get_isolation_level`
- view current level
:ref:`SQLite Transaction Isolation <sqlite_isolation_level>`
:ref:`PostgreSQL Transaction Isolation <postgresql_isolation_level>`
:ref:`MySQL Transaction Isolation <mysql_isolation_level>`
:ref:`SQL Server Transaction Isolation <mssql_isolation_level>`
:ref:`session_transaction_isolation` - for the ORM
:param no_parameters: When ``True``, if the final parameter
list or dictionary is totally empty, will invoke the
statement on the cursor as ``cursor.execute(statement)``,
not passing the parameter collection at all.
Some DBAPIs such as psycopg2 and mysql-python consider
percent signs as significant only when parameters are
present; this option allows code to generate SQL
containing percent signs (and possibly other characters)
that is neutral regarding whether it's executed by the DBAPI
or piped into a script that's later invoked by
command line tools.
:param stream_results: Available on: Connection, statement.
Indicate to the dialect that results should be
"streamed" and not pre-buffered, if possible. This is a limitation
of many DBAPIs. The flag is currently understood only by the
psycopg2, mysqldb and pymysql dialects.
:param schema_translate_map: Available on: Connection, Engine.
A dictionary mapping schema names to schema names, that will be
applied to the :paramref:`_schema.Table.schema` element of each
:class:`_schema.Table`
encountered when SQL or DDL expression elements
are compiled into strings; the resulting schema name will be
converted based on presence in the map of the original name.
.. versionadded:: 1.1
.. seealso::
:ref:`schema_translating`
.. seealso::
:meth:`_engine.Engine.execution_options`
:meth:`.Executable.execution_options`
:meth:`_engine.Connection.get_execution_options`
""" # noqa
c = self._clone()
c._execution_options = c._execution_options.union(opt)
if self._has_events or self.engine._has_events:
self.dispatch.set_connection_execution_options(c, opt)
self.dialect.set_connection_execution_options(c, opt)
return c
def get_execution_options(self):
""" Get the non-SQL options which will take effect during execution.
.. versionadded:: 1.3
.. seealso::
:meth:`_engine.Connection.execution_options`
"""
return self._execution_options
@property
def closed(self):
"""Return True if this connection is closed."""
return (
"_Connection__connection" not in self.__dict__
and not self.__can_reconnect
)
@property
def invalidated(self):
"""Return True if this connection was invalidated."""
return self._root.__invalid
@property
def connection(self):
"""The underlying DB-API connection managed by this Connection.
.. seealso::
:ref:`dbapi_connections`
"""
try:
return self.__connection
except AttributeError:
# escape "except AttributeError" before revalidating
# to prevent misleading stacktraces in Py3K
pass
try:
return self._revalidate_connection()
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def get_isolation_level(self):
"""Return the current isolation level assigned to this
:class:`_engine.Connection`.
This will typically be the default isolation level as determined
by the dialect, unless if the
:paramref:`.Connection.execution_options.isolation_level`
feature has been used to alter the isolation level on a
per-:class:`_engine.Connection` basis.
This attribute will typically perform a live SQL operation in order
to procure the current isolation level, so the value returned is the
actual level on the underlying DBAPI connection regardless of how
this state was set. Compare to the
:attr:`_engine.Connection.default_isolation_level` accessor
which returns the dialect-level setting without performing a SQL
query.
.. versionadded:: 0.9.9
.. seealso::
:attr:`_engine.Connection.default_isolation_level`
- view default level
:paramref:`_sa.create_engine.isolation_level`
- set per :class:`_engine.Engine` isolation level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`_engine.Connection` isolation level
"""
try:
return self.dialect.get_isolation_level(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
@property
def default_isolation_level(self):
"""The default isolation level assigned to this
:class:`_engine.Connection`.
This is the isolation level setting that the
:class:`_engine.Connection`
has when first procured via the :meth:`_engine.Engine.connect` method.
This level stays in place until the
:paramref:`.Connection.execution_options.isolation_level` is used
to change the setting on a per-:class:`_engine.Connection` basis.
Unlike :meth:`_engine.Connection.get_isolation_level`,
this attribute is set
ahead of time from the first connection procured by the dialect,
so SQL query is not invoked when this accessor is called.
.. versionadded:: 0.9.9
.. seealso::
:meth:`_engine.Connection.get_isolation_level`
- view current level
:paramref:`_sa.create_engine.isolation_level`
- set per :class:`_engine.Engine` isolation level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`_engine.Connection` isolation level
"""
return self.dialect.default_isolation_level
def _revalidate_connection(self):
if self.__branch_from:
return self.__branch_from._revalidate_connection()
if self.__can_reconnect and self.__invalid:
if self.__transaction is not None:
raise exc.InvalidRequestError(
"Can't reconnect until invalid "
"transaction is rolled back"
)
self.__connection = self.engine.raw_connection(_connection=self)
self.__invalid = False
return self.__connection
raise exc.ResourceClosedError("This Connection is closed")
@property
def _connection_is_valid(self):
# use getattr() for is_valid to support exceptions raised in
# dialect initializer, where the connection is not wrapped in
# _ConnectionFairy
return getattr(self.__connection, "is_valid", False)
@property
def _still_open_and_connection_is_valid(self):
return (
not self.closed
and not self.invalidated
and getattr(self.__connection, "is_valid", False)
)
@property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`_engine.Connection`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`_engine.Connection`.
"""
return self.connection.info
def connect(self):
"""Returns a branched version of this :class:`_engine.Connection`.
The :meth:`_engine.Connection.close` method on the returned
:class:`_engine.Connection` can be called and this
:class:`_engine.Connection` will remain open.
This method provides usage symmetry with
:meth:`_engine.Engine.connect`, including for usage
with context managers.
"""
return self._branch()
def _contextual_connect(self, **kwargs):
return self._branch()
def invalidate(self, exception=None):
"""Invalidate the underlying DBAPI connection associated with
this :class:`_engine.Connection`.
The underlying DBAPI connection is literally closed (if
possible), and is discarded. Its source connection pool will
typically lazily create a new connection to replace it.
Upon the next use (where "use" typically means using the
:meth:`_engine.Connection.execute` method or similar),
this :class:`_engine.Connection` will attempt to
procure a new DBAPI connection using the services of the
:class:`_pool.Pool` as a source of connectivity (e.g.
a "reconnection").
If a transaction was in progress (e.g. the
:meth:`_engine.Connection.begin` method has been called) when
:meth:`_engine.Connection.invalidate` method is called, at the DBAPI
level all state associated with this transaction is lost, as
the DBAPI connection is closed. The :class:`_engine.Connection`
will not allow a reconnection to proceed until the
:class:`.Transaction` object is ended, by calling the
:meth:`.Transaction.rollback` method; until that point, any attempt at
continuing to use the :class:`_engine.Connection` will raise an
:class:`~sqlalchemy.exc.InvalidRequestError`.
This is to prevent applications from accidentally
continuing an ongoing transactional operations despite the
fact that the transaction has been lost due to an
invalidation.
The :meth:`_engine.Connection.invalidate` method,
just like auto-invalidation,
will at the connection pool level invoke the
:meth:`_events.PoolEvents.invalidate` event.
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.invalidated:
return
if self.closed:
raise exc.ResourceClosedError("This Connection is closed")
if self._root._connection_is_valid:
self._root.__connection.invalidate(exception)
del self._root.__connection
self._root.__invalid = True
def detach(self):
"""Detach the underlying DB-API connection from its connection pool.
E.g.::
with engine.connect() as conn:
conn.detach()
conn.execute("SET search_path TO schema1, schema2")
# work with connection
# connection is fully closed (since we used "with:", can
# also call .close())
This :class:`_engine.Connection` instance will remain usable.
When closed
(or exited from a context manager context as above),
the DB-API connection will be literally closed and not
returned to its originating pool.
This method can be used to insulate the rest of an application
from a modified state on a connection (such as a transaction
isolation level or similar).
"""
self.__connection.detach()
def begin(self):
"""Begin a transaction and return a transaction handle.
The returned object is an instance of :class:`.Transaction`.
This object represents the "scope" of the transaction,
which completes when either the :meth:`.Transaction.rollback`
or :meth:`.Transaction.commit` method is called.
Nested calls to :meth:`.begin` on the same :class:`_engine.Connection`
will return new :class:`.Transaction` objects that represent
an emulated transaction within the scope of the enclosing
transaction, that is::
trans = conn.begin() # outermost transaction
trans2 = conn.begin() # "nested"
trans2.commit() # does nothing
trans.commit() # actually commits
Calls to :meth:`.Transaction.commit` only have an effect
when invoked via the outermost :class:`.Transaction` object, though the
:meth:`.Transaction.rollback` method of any of the
:class:`.Transaction` objects will roll back the
transaction.
.. seealso::
:meth:`_engine.Connection.begin_nested` - use a SAVEPOINT
:meth:`_engine.Connection.begin_twophase` -
use a two phase /XID transaction
:meth:`_engine.Engine.begin` - context manager available from
:class:`_engine.Engine`
"""
if self.__branch_from:
return self.__branch_from.begin()
if self.__transaction is None:
self.__transaction = RootTransaction(self)
return self.__transaction
else:
return Transaction(self, self.__transaction)
def begin_nested(self):
"""Begin a nested transaction and return a transaction handle.
The returned object is an instance of :class:`.NestedTransaction`.
Nested transactions require SAVEPOINT support in the
underlying database. Any transaction in the hierarchy may
``commit`` and ``rollback``, however the outermost transaction
still controls the overall ``commit`` or ``rollback`` of the
transaction of a whole.
.. seealso::
:meth:`_engine.Connection.begin`
:meth:`_engine.Connection.begin_twophase`
"""
if self.__branch_from:
return self.__branch_from.begin_nested()
if self.__transaction is None:
self.__transaction = RootTransaction(self)
else:
self.__transaction = NestedTransaction(self, self.__transaction)
return self.__transaction
def begin_twophase(self, xid=None):
"""Begin a two-phase or XA transaction and return a transaction
handle.
The returned object is an instance of :class:`.TwoPhaseTransaction`,
which in addition to the methods provided by
:class:`.Transaction`, also provides a
:meth:`~.TwoPhaseTransaction.prepare` method.
:param xid: the two phase transaction id. If not supplied, a
random id will be generated.
.. seealso::
:meth:`_engine.Connection.begin`
:meth:`_engine.Connection.begin_twophase`
"""
if self.__branch_from:
return self.__branch_from.begin_twophase(xid=xid)
if self.__transaction is not None:
raise exc.InvalidRequestError(
"Cannot start a two phase transaction when a transaction "
"is already in progress."
)
if xid is None:
xid = self.engine.dialect.create_xid()
self.__transaction = TwoPhaseTransaction(self, xid)
return self.__transaction
def recover_twophase(self):
return self.engine.dialect.do_recover_twophase(self)
def rollback_prepared(self, xid, recover=False):
self.engine.dialect.do_rollback_twophase(self, xid, recover=recover)
def commit_prepared(self, xid, recover=False):
self.engine.dialect.do_commit_twophase(self, xid, recover=recover)
def in_transaction(self):
"""Return True if a transaction is in progress."""
return self._root.__transaction is not None
def _begin_impl(self, transaction):
assert not self.__branch_from
if self._echo:
self.engine.logger.info("BEGIN (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin(self)
try:
self.engine.dialect.do_begin(self.connection)
if self.connection._reset_agent is None:
self.connection._reset_agent = transaction
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _rollback_impl(self):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback(self)
if self._still_open_and_connection_is_valid:
if self._echo:
self.engine.logger.info("ROLLBACK")
try:
self.engine.dialect.do_rollback(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
if (
not self.__invalid
and self.connection._reset_agent is self.__transaction
):
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _commit_impl(self, autocommit=False):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.commit(self)
if self._echo:
self.engine.logger.info("COMMIT")
try:
self.engine.dialect.do_commit(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
if (
not self.__invalid
and self.connection._reset_agent is self.__transaction
):
self.connection._reset_agent = None
self.__transaction = None
def _savepoint_impl(self, name=None):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.savepoint(self, name)
if name is None:
self.__savepoint_seq += 1
name = "sa_savepoint_%s" % self.__savepoint_seq
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_savepoint(self, name)
return name
def _discard_transaction(self, trans):
if trans is self.__transaction:
if trans._parent is trans:
self.__transaction = None
else:
self.__transaction = trans._parent
if self._still_open_and_connection_is_valid:
if self.__connection._reset_agent is trans:
self.__connection._reset_agent = None
def _rollback_to_savepoint_impl(self, name, context):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback_savepoint(self, name, context)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_rollback_to_savepoint(self, name)
self.__transaction = context
def _release_savepoint_impl(self, name, context):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.release_savepoint(self, name, context)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_release_savepoint(self, name)
self.__transaction = context
def _begin_twophase_impl(self, transaction):
assert not self.__branch_from
if self._echo:
self.engine.logger.info("BEGIN TWOPHASE (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin_twophase(self, transaction.xid)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_begin_twophase(self, transaction.xid)
if self.connection._reset_agent is None:
self.connection._reset_agent = transaction
def _prepare_twophase_impl(self, xid):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.prepare_twophase(self, xid)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
self.engine.dialect.do_prepare_twophase(self, xid)
def _rollback_twophase_impl(self, xid, is_prepared):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback_twophase(self, xid, is_prepared)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_rollback_twophase(
self, xid, is_prepared
)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _commit_twophase_impl(self, xid, is_prepared):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.commit_twophase(self, xid, is_prepared)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_commit_twophase(self, xid, is_prepared)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _autorollback(self):
if not self._root.in_transaction():
self._root._rollback_impl()
def close(self):
"""Close this :class:`_engine.Connection`.
This results in a release of the underlying database
resources, that is, the DBAPI connection referenced
internally. The DBAPI connection is typically restored
back to the connection-holding :class:`_pool.Pool` referenced
by the :class:`_engine.Engine` that produced this
:class:`_engine.Connection`. Any transactional state present on
the DBAPI connection is also unconditionally released via
the DBAPI connection's ``rollback()`` method, regardless
of any :class:`.Transaction` object that may be
outstanding with regards to this :class:`_engine.Connection`.
After :meth:`_engine.Connection.close` is called, the
:class:`_engine.Connection` is permanently in a closed state,
and will allow no further operations.
"""
if self.__branch_from:
try:
del self.__connection
except AttributeError:
pass
finally:
self.__can_reconnect = False
return
try:
conn = self.__connection
except AttributeError:
pass
else:
conn.close()
if conn._reset_agent is self.__transaction:
conn._reset_agent = None
# the close() process can end up invalidating us,
# as the pool will call our transaction as the "reset_agent"
# for rollback(), which can then cause an invalidation
if not self.__invalid:
del self.__connection
self.__can_reconnect = False
self.__transaction = None
def scalar(self, object_, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying result/cursor is closed after execution.
"""
return self.execute(object_, *multiparams, **params).scalar()
def execute(self, object_, *multiparams, **params):
r"""Executes a SQL statement construct and returns a
:class:`_engine.ResultProxy`.
:param object: The statement to be executed. May be
one of:
* a plain string
* any :class:`_expression.ClauseElement` construct that is also
a subclass of :class:`.Executable`, such as a
:func:`_expression.select` construct
* a :class:`.FunctionElement`, such as that generated
by :data:`.func`, will be automatically wrapped in
a SELECT statement, which is then executed.
* a :class:`.DDLElement` object
* a :class:`.DefaultGenerator` object
* a :class:`.Compiled` object
:param \*multiparams/\**params: represent bound parameter
values to be used in the execution. Typically,
the format is either a collection of one or more
dictionaries passed to \*multiparams::
conn.execute(
table.insert(),
{"id":1, "value":"v1"},
{"id":2, "value":"v2"}
)
...or individual key/values interpreted by \**params::
conn.execute(
table.insert(), id=1, value="v1"
)
In the case that a plain SQL string is passed, and the underlying
DBAPI accepts positional bind parameters, a collection of tuples
or individual values in \*multiparams may be passed::
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
(1, "v1"), (2, "v2")
)
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
1, "v1"
)
Note above, the usage of a question mark "?" or other
symbol is contingent upon the "paramstyle" accepted by the DBAPI
in use, which may be any of "qmark", "named", "pyformat", "format",
"numeric". See `pep-249 <http://www.python.org/dev/peps/pep-0249/>`_
for details on paramstyle.
To execute a textual SQL statement which uses bound parameters in a
DBAPI-agnostic way, use the :func:`_expression.text` construct.
"""
if isinstance(object_, util.string_types[0]):
return self._execute_text(object_, multiparams, params)
try:
meth = object_._execute_on_connection
except AttributeError as err:
util.raise_(
exc.ObjectNotExecutableError(object_), replace_context=err
)
else:
return meth(self, multiparams, params)
def _execute_function(self, func, multiparams, params):
"""Execute a sql.FunctionElement object."""
return self._execute_clauseelement(func.select(), multiparams, params)
def _execute_default(self, default, multiparams, params):
"""Execute a schema.ColumnDefault object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
default, multiparams, params = fn(
self, default, multiparams, params
)
try:
try:
conn = self.__connection
except AttributeError:
# escape "except AttributeError" before revalidating
# to prevent misleading stacktraces in Py3K
conn = None
if conn is None:
conn = self._revalidate_connection()
dialect = self.dialect
ctx = dialect.execution_ctx_cls._init_default(dialect, self, conn)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
ret = ctx._exec_default(None, default, None)
if self.should_close_with_result:
self.close()
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self, default, multiparams, params, ret
)
return ret
def _execute_ddl(self, ddl, multiparams, params):
"""Execute a schema.DDL object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
ddl, multiparams, params = fn(self, ddl, multiparams, params)
dialect = self.dialect
compiled = ddl.compile(
dialect=dialect,
schema_translate_map=self.schema_for_object
if not self.schema_for_object.is_default
else None,
)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_ddl,
compiled,
None,
compiled,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self, ddl, multiparams, params, ret)
return ret
def _execute_clauseelement(self, elem, multiparams, params):
"""Execute a sql.ClauseElement object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
elem, multiparams, params = fn(self, elem, multiparams, params)
distilled_params = _distill_params(multiparams, params)
if distilled_params:
# ensure we don't retain a link to the view object for keys()
# which links to the values, which we don't want to cache
keys = list(distilled_params[0].keys())
else:
keys = []
dialect = self.dialect
if "compiled_cache" in self._execution_options:
key = (
dialect,
elem,
tuple(sorted(keys)),
self.schema_for_object.hash_key,
len(distilled_params) > 1,
)
compiled_sql = self._execution_options["compiled_cache"].get(key)
if compiled_sql is None:
compiled_sql = elem.compile(
dialect=dialect,
column_keys=keys,
inline=len(distilled_params) > 1,
schema_translate_map=self.schema_for_object
if not self.schema_for_object.is_default
else None,
)
self._execution_options["compiled_cache"][key] = compiled_sql
else:
compiled_sql = elem.compile(
dialect=dialect,
column_keys=keys,
inline=len(distilled_params) > 1,
schema_translate_map=self.schema_for_object
if not self.schema_for_object.is_default
else None,
)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled_sql,
distilled_params,
compiled_sql,
distilled_params,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self, elem, multiparams, params, ret)
return ret
def _execute_compiled(self, compiled, multiparams, params):
"""Execute a sql.Compiled object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
compiled, multiparams, params = fn(
self, compiled, multiparams, params
)
dialect = self.dialect
parameters = _distill_params(multiparams, params)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled,
parameters,
compiled,
parameters,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self, compiled, multiparams, params, ret
)
return ret
def _execute_text(self, statement, multiparams, params):
"""Execute a string SQL statement."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
statement, multiparams, params = fn(
self, statement, multiparams, params
)
dialect = self.dialect
parameters = _distill_params(multiparams, params)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_statement,
statement,
parameters,
statement,
parameters,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self, statement, multiparams, params, ret
)
return ret
def _execute_context(
self, dialect, constructor, statement, parameters, *args
):
"""Create an :class:`.ExecutionContext` and execute, returning
a :class:`_engine.ResultProxy`."""
try:
try:
conn = self.__connection
except AttributeError:
# escape "except AttributeError" before revalidating
# to prevent misleading stacktraces in Py3K
conn = None
if conn is None:
conn = self._revalidate_connection()
context = constructor(dialect, self, conn, *args)
except BaseException as e:
self._handle_dbapi_exception(
e, util.text_type(statement), parameters, None, None
)
if context.compiled:
context.pre_exec()
cursor, statement, parameters = (
context.cursor,
context.statement,
context.parameters,
)
if not context.executemany:
parameters = parameters[0]
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = fn(
self,
cursor,
statement,
parameters,
context,
context.executemany,
)
if self._echo:
self.engine.logger.info(statement)
if not self.engine.hide_parameters:
self.engine.logger.info(
"%r",
sql_util._repr_params(
parameters, batches=10, ismulti=context.executemany
),
)
else:
self.engine.logger.info(
"[SQL parameters hidden due to hide_parameters=True]"
)
evt_handled = False
try:
if context.executemany:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_executemany:
if fn(cursor, statement, parameters, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_executemany(
cursor, statement, parameters, context
)
elif not parameters and context.no_parameters:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute_no_params:
if fn(cursor, statement, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute_no_params(
cursor, statement, context
)
else:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute:
if fn(cursor, statement, parameters, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute(
cursor, statement, parameters, context
)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(
self,
cursor,
statement,
parameters,
context,
context.executemany,
)
if context.compiled:
context.post_exec()
if context.is_crud or context.is_text:
result = context._setup_crud_result_proxy()
else:
result = context.get_result_proxy()
if result._metadata is None:
result._soft_close()
if context.should_autocommit and self._root.__transaction is None:
self._root._commit_impl(autocommit=True)
# for "connectionless" execution, we have to close this
# Connection after the statement is complete.
if self.should_close_with_result:
# ResultProxy already exhausted rows / has no rows.
# close us now
if result._soft_closed:
self.close()
else:
# ResultProxy will close this Connection when no more
# rows to fetch.
result._autoclose_connection = True
except BaseException as e:
self._handle_dbapi_exception(
e, statement, parameters, cursor, context
)
return result
def _cursor_execute(self, cursor, statement, parameters, context=None):
"""Execute a statement + params on the given cursor.
Adds appropriate logging and exception handling.
This method is used by DefaultDialect for special-case
executions, such as for sequences and column defaults.
The path of statement execution in the majority of cases
terminates at _execute_context().
"""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = fn(
self, cursor, statement, parameters, context, False
)
if self._echo:
self.engine.logger.info(statement)
self.engine.logger.info("%r", parameters)
try:
for fn in (
()
if not self.dialect._has_events
else self.dialect.dispatch.do_execute
):
if fn(cursor, statement, parameters, context):
break
else:
self.dialect.do_execute(cursor, statement, parameters, context)
except BaseException as e:
self._handle_dbapi_exception(
e, statement, parameters, cursor, context
)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(
self, cursor, statement, parameters, context, False
)
def _safe_close_cursor(self, cursor):
"""Close the given cursor, catching exceptions
and turning into log warnings.
"""
try:
cursor.close()
except Exception:
# log the error through the connection pool's logger.
self.engine.pool.logger.error(
"Error closing cursor", exc_info=True
)
_reentrant_error = False
_is_disconnect = False
def _handle_dbapi_exception(
self, e, statement, parameters, cursor, context
):
exc_info = sys.exc_info()
if context and context.exception is None:
context.exception = e
is_exit_exception = not isinstance(e, Exception)
if not self._is_disconnect:
self._is_disconnect = (
isinstance(e, self.dialect.dbapi.Error)
and not self.closed
and self.dialect.is_disconnect(
e,
self.__connection if not self.invalidated else None,
cursor,
)
) or (is_exit_exception and not self.closed)
if context:
context.is_disconnect = self._is_disconnect
invalidate_pool_on_disconnect = not is_exit_exception
if self._reentrant_error:
util.raise_(
exc.DBAPIError.instance(
statement,
parameters,
e,
self.dialect.dbapi.Error,
hide_parameters=self.engine.hide_parameters,
dialect=self.dialect,
ismulti=context.executemany
if context is not None
else None,
),
with_traceback=exc_info[2],
from_=e,
)
self._reentrant_error = True
try:
# non-DBAPI error - if we already got a context,
# or there's no string statement, don't wrap it
should_wrap = isinstance(e, self.dialect.dbapi.Error) or (
statement is not None
and context is None
and not is_exit_exception
)
if should_wrap:
sqlalchemy_exception = exc.DBAPIError.instance(
statement,
parameters,
e,
self.dialect.dbapi.Error,
hide_parameters=self.engine.hide_parameters,
connection_invalidated=self._is_disconnect,
dialect=self.dialect,
ismulti=context.executemany
if context is not None
else None,
)
else:
sqlalchemy_exception = None
newraise = None
if (
self._has_events or self.engine._has_events
) and not self._execution_options.get(
"skip_user_error_events", False
):
# legacy dbapi_error event
if should_wrap and context:
self.dispatch.dbapi_error(
self, cursor, statement, parameters, context, e
)
# new handle_error event
ctx = ExceptionContextImpl(
e,
sqlalchemy_exception,
self.engine,
self,
cursor,
statement,
parameters,
context,
self._is_disconnect,
invalidate_pool_on_disconnect,
)
for fn in self.dispatch.handle_error:
try:
# handler returns an exception;
# call next handler in a chain
per_fn = fn(ctx)
if per_fn is not None:
ctx.chained_exception = newraise = per_fn
except Exception as _raised:
# handler raises an exception - stop processing
newraise = _raised
break
if self._is_disconnect != ctx.is_disconnect:
self._is_disconnect = ctx.is_disconnect
if sqlalchemy_exception:
sqlalchemy_exception.connection_invalidated = (
ctx.is_disconnect
)
# set up potentially user-defined value for
# invalidate pool.
invalidate_pool_on_disconnect = (
ctx.invalidate_pool_on_disconnect
)
if should_wrap and context:
context.handle_dbapi_exception(e)
if not self._is_disconnect:
if cursor:
self._safe_close_cursor(cursor)
with util.safe_reraise(warn_only=True):
self._autorollback()
if newraise:
util.raise_(newraise, with_traceback=exc_info[2], from_=e)
elif should_wrap:
util.raise_(
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
)
else:
util.raise_(exc_info[1], with_traceback=exc_info[2])
finally:
del self._reentrant_error
if self._is_disconnect:
del self._is_disconnect
if not self.invalidated:
dbapi_conn_wrapper = self.__connection
if invalidate_pool_on_disconnect:
self.engine.pool._invalidate(dbapi_conn_wrapper, e)
self.invalidate(e)
if self.should_close_with_result:
self.close()
@classmethod
def _handle_dbapi_exception_noconnection(cls, e, dialect, engine):
exc_info = sys.exc_info()
is_disconnect = dialect.is_disconnect(e, None, None)
should_wrap = isinstance(e, dialect.dbapi.Error)
if should_wrap:
sqlalchemy_exception = exc.DBAPIError.instance(
None,
None,
e,
dialect.dbapi.Error,
hide_parameters=engine.hide_parameters,
connection_invalidated=is_disconnect,
)
else:
sqlalchemy_exception = None
newraise = None
if engine._has_events:
ctx = ExceptionContextImpl(
e,
sqlalchemy_exception,
engine,
None,
None,
None,
None,
None,
is_disconnect,
True,
)
for fn in engine.dispatch.handle_error:
try:
# handler returns an exception;
# call next handler in a chain
per_fn = fn(ctx)
if per_fn is not None:
ctx.chained_exception = newraise = per_fn
except Exception as _raised:
# handler raises an exception - stop processing
newraise = _raised
break
if sqlalchemy_exception and is_disconnect != ctx.is_disconnect:
sqlalchemy_exception.connection_invalidated = (
is_disconnect
) = ctx.is_disconnect
if newraise:
util.raise_(newraise, with_traceback=exc_info[2], from_=e)
elif should_wrap:
util.raise_(
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
)
else:
util.raise_(exc_info[1], with_traceback=exc_info[2])
def transaction(self, callable_, *args, **kwargs):
r"""Execute the given function within a transaction boundary.
The function is passed this :class:`_engine.Connection`
as the first argument, followed by the given \*args and \**kwargs,
e.g.::
def do_something(conn, x, y):
conn.execute("some statement", {'x':x, 'y':y})
conn.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`_engine.Connection.begin`::
with conn.begin():
conn.execute("some statement", {'x':5, 'y':10})
As well as with :meth:`_engine.Engine.begin`::
with engine.begin() as conn:
conn.execute("some statement", {'x':5, 'y':10})
.. seealso::
:meth:`_engine.Engine.begin` - engine-level transactional
context
:meth:`_engine.Engine.transaction` - engine-level version of
:meth:`_engine.Connection.transaction`
"""
trans = self.begin()
try:
ret = self.run_callable(callable_, *args, **kwargs)
trans.commit()
return ret
except:
with util.safe_reraise():
trans.rollback()
def run_callable(self, callable_, *args, **kwargs):
r"""Given a callable object or function, execute it, passing
a :class:`_engine.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`_engine.Connection` argument.
This function, along with :meth:`_engine.Engine.run_callable`,
allows a function to be run with a :class:`_engine.Connection`
or :class:`_engine.Engine` object without the need to know
which one is being dealt with.
"""
return callable_(self, *args, **kwargs)
def _run_visitor(self, visitorcallable, element, **kwargs):
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
class ExceptionContextImpl(ExceptionContext):
"""Implement the :class:`.ExceptionContext` interface."""
def __init__(
self,
exception,
sqlalchemy_exception,
engine,
connection,
cursor,
statement,
parameters,
context,
is_disconnect,
invalidate_pool_on_disconnect,
):
self.engine = engine
self.connection = connection
self.sqlalchemy_exception = sqlalchemy_exception
self.original_exception = exception
self.execution_context = context
self.statement = statement
self.parameters = parameters
self.is_disconnect = is_disconnect
self.invalidate_pool_on_disconnect = invalidate_pool_on_disconnect
class Transaction(object):
"""Represent a database transaction in progress.
The :class:`.Transaction` object is procured by
calling the :meth:`_engine.Connection.begin` method of
:class:`_engine.Connection`::
from sqlalchemy import create_engine
engine = create_engine("postgresql://scott:tiger@localhost/test")
connection = engine.connect()
trans = connection.begin()
connection.execute("insert into x (a, b) values (1, 2)")
trans.commit()
The object provides :meth:`.rollback` and :meth:`.commit`
methods in order to control transaction boundaries. It
also implements a context manager interface so that
the Python ``with`` statement can be used with the
:meth:`_engine.Connection.begin` method::
with connection.begin():
connection.execute("insert into x (a, b) values (1, 2)")
The Transaction object is **not** threadsafe.
.. seealso::
:meth:`_engine.Connection.begin`
:meth:`_engine.Connection.begin_twophase`
:meth:`_engine.Connection.begin_nested`
.. index::
single: thread safety; Transaction
"""
def __init__(self, connection, parent):
self.connection = connection
self._actual_parent = parent
self.is_active = True
@property
def _parent(self):
return self._actual_parent or self
def close(self):
"""Close this :class:`.Transaction`.
If this transaction is the base transaction in a begin/commit
nesting, the transaction will rollback(). Otherwise, the
method returns.
This is used to cancel a Transaction without affecting the scope of
an enclosing transaction.
"""
if self._parent.is_active and self._parent is self:
self.rollback()
self.connection._discard_transaction(self)
def rollback(self):
"""Roll back this :class:`.Transaction`.
"""
if self._parent.is_active:
self._do_rollback()
self.is_active = False
def _do_rollback(self):
self._parent.rollback()
def commit(self):
"""Commit this :class:`.Transaction`."""
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self._do_commit()
self.is_active = False
def _do_commit(self):
pass
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
if type_ is None and self.is_active:
try:
self.commit()
except:
with util.safe_reraise():
self.rollback()
else:
self.rollback()
class RootTransaction(Transaction):
def __init__(self, connection):
super(RootTransaction, self).__init__(connection, None)
self.connection._begin_impl(self)
def _do_rollback(self):
if self.is_active:
self.connection._rollback_impl()
def _do_commit(self):
if self.is_active:
self.connection._commit_impl()
class NestedTransaction(Transaction):
"""Represent a 'nested', or SAVEPOINT transaction.
A new :class:`.NestedTransaction` object may be procured
using the :meth:`_engine.Connection.begin_nested` method.
The interface is the same as that of :class:`.Transaction`.
"""
def __init__(self, connection, parent):
super(NestedTransaction, self).__init__(connection, parent)
self._savepoint = self.connection._savepoint_impl()
def _do_rollback(self):
if self.is_active:
self.connection._rollback_to_savepoint_impl(
self._savepoint, self._parent
)
def _do_commit(self):
if self.is_active:
self.connection._release_savepoint_impl(
self._savepoint, self._parent
)
class TwoPhaseTransaction(Transaction):
"""Represent a two-phase transaction.
A new :class:`.TwoPhaseTransaction` object may be procured
using the :meth:`_engine.Connection.begin_twophase` method.
The interface is the same as that of :class:`.Transaction`
with the addition of the :meth:`prepare` method.
"""
def __init__(self, connection, xid):
super(TwoPhaseTransaction, self).__init__(connection, None)
self._is_prepared = False
self.xid = xid
self.connection._begin_twophase_impl(self)
def prepare(self):
"""Prepare this :class:`.TwoPhaseTransaction`.
After a PREPARE, the transaction can be committed.
"""
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self.connection._prepare_twophase_impl(self.xid)
self._is_prepared = True
def _do_rollback(self):
self.connection._rollback_twophase_impl(self.xid, self._is_prepared)
def _do_commit(self):
self.connection._commit_twophase_impl(self.xid, self._is_prepared)
class Engine(Connectable, log.Identified):
"""
Connects a :class:`~sqlalchemy.pool.Pool` and
:class:`~sqlalchemy.engine.interfaces.Dialect` together to provide a
source of database connectivity and behavior.
An :class:`_engine.Engine` object is instantiated publicly using the
:func:`~sqlalchemy.create_engine` function.
.. seealso::
:doc:`/core/engines`
:ref:`connections_toplevel`
"""
_execution_options = util.immutabledict()
_has_events = False
_connection_cls = Connection
schema_for_object = schema._schema_getter(None)
"""Return the ".schema" attribute for an object.
Used for :class:`_schema.Table`, :class:`.Sequence` and similar objects,
and takes into account
the :paramref:`.Connection.execution_options.schema_translate_map`
parameter.
.. versionadded:: 1.1
.. seealso::
:ref:`schema_translating`
"""
def __init__(
self,
pool,
dialect,
url,
logging_name=None,
echo=None,
proxy=None,
execution_options=None,
hide_parameters=False,
):
self.pool = pool
self.url = url
self.dialect = dialect
if logging_name:
self.logging_name = logging_name
self.echo = echo
self.hide_parameters = hide_parameters
log.instance_logger(self, echoflag=echo)
if proxy:
interfaces.ConnectionProxy._adapt_listener(self, proxy)
if execution_options:
self.update_execution_options(**execution_options)
@property
def engine(self):
return self
def update_execution_options(self, **opt):
r"""Update the default execution_options dictionary
of this :class:`_engine.Engine`.
The given keys/values in \**opt are added to the
default execution options that will be used for
all connections. The initial contents of this dictionary
can be sent via the ``execution_options`` parameter
to :func:`_sa.create_engine`.
.. seealso::
:meth:`_engine.Connection.execution_options`
:meth:`_engine.Engine.execution_options`
"""
self._execution_options = self._execution_options.union(opt)
self.dispatch.set_engine_execution_options(self, opt)
self.dialect.set_engine_execution_options(self, opt)
def execution_options(self, **opt):
"""Return a new :class:`_engine.Engine` that will provide
:class:`_engine.Connection` objects with the given execution options.
The returned :class:`_engine.Engine` remains related to the original
:class:`_engine.Engine` in that it shares the same connection pool and
other state:
* The :class:`_pool.Pool` used by the new :class:`_engine.Engine`
is the
same instance. The :meth:`_engine.Engine.dispose`
method will replace
the connection pool instance for the parent engine as well
as this one.
* Event listeners are "cascaded" - meaning, the new
:class:`_engine.Engine`
inherits the events of the parent, and new events can be associated
with the new :class:`_engine.Engine` individually.
* The logging configuration and logging_name is copied from the parent
:class:`_engine.Engine`.
The intent of the :meth:`_engine.Engine.execution_options` method is
to implement "sharding" schemes where multiple :class:`_engine.Engine`
objects refer to the same connection pool, but are differentiated
by options that would be consumed by a custom event::
primary_engine = create_engine("mysql://")
shard1 = primary_engine.execution_options(shard_id="shard1")
shard2 = primary_engine.execution_options(shard_id="shard2")
Above, the ``shard1`` engine serves as a factory for
:class:`_engine.Connection`
objects that will contain the execution option
``shard_id=shard1``, and ``shard2`` will produce
:class:`_engine.Connection`
objects that contain the execution option ``shard_id=shard2``.
An event handler can consume the above execution option to perform
a schema switch or other operation, given a connection. Below
we emit a MySQL ``use`` statement to switch databases, at the same
time keeping track of which database we've established using the
:attr:`_engine.Connection.info` dictionary,
which gives us a persistent
storage space that follows the DBAPI connection::
from sqlalchemy import event
from sqlalchemy.engine import Engine
shards = {"default": "base", shard_1: "db1", "shard_2": "db2"}
@event.listens_for(Engine, "before_cursor_execute")
def _switch_shard(conn, cursor, stmt,
params, context, executemany):
shard_id = conn._execution_options.get('shard_id', "default")
current_shard = conn.info.get("current_shard", None)
if current_shard != shard_id:
cursor.execute("use %s" % shards[shard_id])
conn.info["current_shard"] = shard_id
.. seealso::
:meth:`_engine.Connection.execution_options`
- update execution options
on a :class:`_engine.Connection` object.
:meth:`_engine.Engine.update_execution_options`
- update the execution
options for a given :class:`_engine.Engine` in place.
:meth:`_engine.Engine.get_execution_options`
"""
return OptionEngine(self, opt)
def get_execution_options(self):
""" Get the non-SQL options which will take effect during execution.
.. versionadded: 1.3
.. seealso::
:meth:`_engine.Engine.execution_options`
"""
return self._execution_options
@property
def name(self):
"""String name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.name
@property
def driver(self):
"""Driver name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.driver
echo = log.echo_property()
def __repr__(self):
return "Engine(%r)" % self.url
def dispose(self):
"""Dispose of the connection pool used by this :class:`_engine.Engine`
.
This has the effect of fully closing all **currently checked in**
database connections. Connections that are still checked out
will **not** be closed, however they will no longer be associated
with this :class:`_engine.Engine`,
so when they are closed individually,
eventually the :class:`_pool.Pool` which they are associated with will
be garbage collected and they will be closed out fully, if
not already closed on checkin.
A new connection pool is created immediately after the old one has
been disposed. This new pool, like all SQLAlchemy connection pools,
does not make any actual connections to the database until one is
first requested, so as long as the :class:`_engine.Engine`
isn't used again,
no new connections will be made.
.. seealso::
:ref:`engine_disposal`
"""
self.pool.dispose()
self.pool = self.pool.recreate()
self.dispatch.engine_disposed(self)
def _execute_default(self, default):
with self._contextual_connect() as conn:
return conn._execute_default(default, (), {})
@contextlib.contextmanager
def _optional_conn_ctx_manager(self, connection=None):
if connection is None:
with self._contextual_connect() as conn:
yield conn
else:
yield connection
def _run_visitor(
self, visitorcallable, element, connection=None, **kwargs
):
with self._optional_conn_ctx_manager(connection) as conn:
conn._run_visitor(visitorcallable, element, **kwargs)
class _trans_ctx(object):
def __init__(self, conn, transaction, close_with_result):
self.conn = conn
self.transaction = transaction
self.close_with_result = close_with_result
def __enter__(self):
return self.conn
def __exit__(self, type_, value, traceback):
if type_ is not None:
self.transaction.rollback()
else:
self.transaction.commit()
if not self.close_with_result:
self.conn.close()
def begin(self, close_with_result=False):
"""Return a context manager delivering a :class:`_engine.Connection`
with a :class:`.Transaction` established.
E.g.::
with engine.begin() as conn:
conn.execute("insert into table (x, y, z) values (1, 2, 3)")
conn.execute("my_special_procedure(5)")
Upon successful operation, the :class:`.Transaction`
is committed. If an error is raised, the :class:`.Transaction`
is rolled back.
The ``close_with_result`` flag is normally ``False``, and indicates
that the :class:`_engine.Connection` will be closed when the operation
is complete. When set to ``True``, it indicates the
:class:`_engine.Connection` is in "single use" mode, where the
:class:`_engine.ResultProxy` returned by the first call to
:meth:`_engine.Connection.execute` will close the
:class:`_engine.Connection` when
that :class:`_engine.ResultProxy` has exhausted all result rows.
.. seealso::
:meth:`_engine.Engine.connect` - procure a
:class:`_engine.Connection` from
an :class:`_engine.Engine`.
:meth:`_engine.Connection.begin` - start a :class:`.Transaction`
for a particular :class:`_engine.Connection`.
"""
conn = self._contextual_connect(close_with_result=close_with_result)
try:
trans = conn.begin()
except:
with util.safe_reraise():
conn.close()
return Engine._trans_ctx(conn, trans, close_with_result)
def transaction(self, callable_, *args, **kwargs):
r"""Execute the given function within a transaction boundary.
The function is passed a :class:`_engine.Connection` newly procured
from :meth:`_engine.Engine.contextual_connect` as the first argument,
followed by the given \*args and \**kwargs.
e.g.::
def do_something(conn, x, y):
conn.execute("some statement", {'x':x, 'y':y})
engine.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`_engine.Engine.begin`::
with engine.begin() as conn:
conn.execute("some statement", {'x':5, 'y':10})
.. seealso::
:meth:`_engine.Engine.begin` - engine-level transactional
context
:meth:`_engine.Connection.transaction`
- connection-level version of
:meth:`_engine.Engine.transaction`
"""
with self._contextual_connect() as conn:
return conn.transaction(callable_, *args, **kwargs)
def run_callable(self, callable_, *args, **kwargs):
r"""Given a callable object or function, execute it, passing
a :class:`_engine.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`_engine.Connection` argument.
This function, along with :meth:`_engine.Connection.run_callable`,
allows a function to be run with a :class:`_engine.Connection`
or :class:`_engine.Engine` object without the need to know
which one is being dealt with.
"""
with self._contextual_connect() as conn:
return conn.run_callable(callable_, *args, **kwargs)
def execute(self, statement, *multiparams, **params):
"""Executes the given construct and returns a
:class:`_engine.ResultProxy`.
The arguments are the same as those used by
:meth:`_engine.Connection.execute`.
Here, a :class:`_engine.Connection` is acquired using the
:meth:`_engine.Engine.contextual_connect` method,
and the statement executed
with that connection. The returned :class:`_engine.ResultProxy`
is flagged
such that when the :class:`_engine.ResultProxy` is exhausted and its
underlying cursor is closed, the :class:`_engine.Connection`
created here
will also be closed, which allows its associated DBAPI connection
resource to be returned to the connection pool.
"""
connection = self._contextual_connect(close_with_result=True)
return connection.execute(statement, *multiparams, **params)
def scalar(self, statement, *multiparams, **params):
return self.execute(statement, *multiparams, **params).scalar()
def _execute_clauseelement(self, elem, multiparams=None, params=None):
connection = self._contextual_connect(close_with_result=True)
return connection._execute_clauseelement(elem, multiparams, params)
def _execute_compiled(self, compiled, multiparams, params):
connection = self._contextual_connect(close_with_result=True)
return connection._execute_compiled(compiled, multiparams, params)
def connect(self, **kwargs):
"""Return a new :class:`_engine.Connection` object.
The :class:`_engine.Connection` object is a facade that uses a DBAPI
connection internally in order to communicate with the database. This
connection is procured from the connection-holding :class:`_pool.Pool`
referenced by this :class:`_engine.Engine`. When the
:meth:`_engine.Connection.close` method of the
:class:`_engine.Connection` object
is called, the underlying DBAPI connection is then returned to the
connection pool, where it may be used again in a subsequent call to
:meth:`_engine.Engine.connect`.
"""
return self._connection_cls(self, **kwargs)
@util.deprecated(
"1.3",
"The :meth:`_engine.Engine.contextual_connect` method is deprecated. "
"This "
"method is an artifact of the threadlocal engine strategy which is "
"also to be deprecated. For explicit connections from an "
":class:`_engine.Engine`, use the :meth:`_engine.Engine.connect` "
"method.",
)
def contextual_connect(self, close_with_result=False, **kwargs):
"""Return a :class:`_engine.Connection`
object which may be part of some
ongoing context.
By default, this method does the same thing as
:meth:`_engine.Engine.connect`.
Subclasses of :class:`_engine.Engine` may override this method
to provide contextual behavior.
:param close_with_result: When True, the first
:class:`_engine.ResultProxy`
created by the :class:`_engine.Connection` will call the
:meth:`_engine.Connection.close`
method of that connection as soon as any
pending result rows are exhausted. This is used to supply the
"connectionless execution" behavior provided by the
:meth:`_engine.Engine.execute` method.
"""
return self._contextual_connect(
close_with_result=close_with_result, **kwargs
)
def _contextual_connect(self, close_with_result=False, **kwargs):
return self._connection_cls(
self,
self._wrap_pool_connect(self.pool.connect, None),
close_with_result=close_with_result,
**kwargs
)
def table_names(self, schema=None, connection=None):
"""Return a list of all table names available in the database.
:param schema: Optional, retrieve names from a non-default schema.
:param connection: Optional, use a specified connection. Default is
the ``contextual_connect`` for this ``Engine``.
"""
with self._optional_conn_ctx_manager(connection) as conn:
return self.dialect.get_table_names(conn, schema)
def has_table(self, table_name, schema=None):
"""Return True if the given backend has a table of the given name.
.. seealso::
:ref:`metadata_reflection_inspector` - detailed schema inspection
using the :class:`_reflection.Inspector` interface.
:class:`.quoted_name` - used to pass quoting information along
with a schema identifier.
"""
return self.run_callable(self.dialect.has_table, table_name, schema)
def _wrap_pool_connect(self, fn, connection):
dialect = self.dialect
try:
return fn()
except dialect.dbapi.Error as e:
if connection is None:
Connection._handle_dbapi_exception_noconnection(
e, dialect, self
)
else:
util.raise_(
sys.exc_info()[1], with_traceback=sys.exc_info()[2]
)
def raw_connection(self, _connection=None):
"""Return a "raw" DBAPI connection from the connection pool.
The returned object is a proxied version of the DBAPI
connection object used by the underlying driver in use.
The object will have all the same behavior as the real DBAPI
connection, except that its ``close()`` method will result in the
connection being returned to the pool, rather than being closed
for real.
This method provides direct DBAPI connection access for
special situations when the API provided by
:class:`_engine.Connection`
is not needed. When a :class:`_engine.Connection` object is already
present, the DBAPI connection is available using
the :attr:`_engine.Connection.connection` accessor.
.. seealso::
:ref:`dbapi_connections`
"""
return self._wrap_pool_connect(
self.pool.unique_connection, _connection
)
class OptionEngine(Engine):
_sa_propagate_class_events = False
def __init__(self, proxied, execution_options):
self._proxied = proxied
self.url = proxied.url
self.dialect = proxied.dialect
self.logging_name = proxied.logging_name
self.echo = proxied.echo
self.hide_parameters = proxied.hide_parameters
log.instance_logger(self, echoflag=self.echo)
# note: this will propagate events that are assigned to the parent
# engine after this OptionEngine is created. Since we share
# the events of the parent we also disallow class-level events
# to apply to the OptionEngine class directly.
#
# the other way this can work would be to transfer existing
# events only, using:
# self.dispatch._update(proxied.dispatch)
#
# that might be more appropriate however it would be a behavioral
# change for logic that assigns events to the parent engine and
# would like it to take effect for the already-created sub-engine.
self.dispatch = self.dispatch._join(proxied.dispatch)
self._execution_options = proxied._execution_options
self.update_execution_options(**execution_options)
def _get_pool(self):
return self._proxied.pool
def _set_pool(self, pool):
self._proxied.pool = pool
pool = property(_get_pool, _set_pool)
def _get_has_events(self):
return self._proxied._has_events or self.__dict__.get(
"_has_events", False
)
def _set_has_events(self, value):
self.__dict__["_has_events"] = value
_has_events = property(_get_has_events, _set_has_events)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/pool/dbapi_proxy.py
|
# sqlalchemy/pool/dbapi_proxy.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""DBAPI proxy utility.
Provides transparent connection pooling on top of a Python DBAPI.
This is legacy SQLAlchemy functionality that is not typically used
today.
"""
from .impl import QueuePool
from .. import util
from ..util import threading
proxies = {}
@util.deprecated(
"1.3",
"The :func:`.pool.manage` function is deprecated, and will be "
"removed in a future release.",
)
def manage(module, **params):
r"""Return a proxy for a DB-API module that automatically
pools connections.
Given a DB-API 2.0 module and pool management parameters, returns
a proxy for the module that will automatically pool connections,
creating new connection pools for each distinct set of connection
arguments sent to the decorated module's connect() function.
:param module: a DB-API 2.0 database module
:param poolclass: the class used by the pool module to provide
pooling. Defaults to :class:`.QueuePool`.
:param \**params: will be passed through to *poolclass*
"""
try:
return proxies[module]
except KeyError:
return proxies.setdefault(module, _DBProxy(module, **params))
def clear_managers():
"""Remove all current DB-API 2.0 managers.
All pools and connections are disposed.
"""
for manager in proxies.values():
manager.close()
proxies.clear()
class _DBProxy(object):
"""Layers connection pooling behavior on top of a standard DB-API module.
Proxies a DB-API 2.0 connect() call to a connection pool keyed to the
specific connect parameters. Other functions and attributes are delegated
to the underlying DB-API module.
"""
def __init__(self, module, poolclass=QueuePool, **kw):
"""Initializes a new proxy.
module
a DB-API 2.0 module
poolclass
a Pool class, defaulting to QueuePool
Other parameters are sent to the Pool object's constructor.
"""
self.module = module
self.kw = kw
self.poolclass = poolclass
self.pools = {}
self._create_pool_mutex = threading.Lock()
def close(self):
for key in list(self.pools):
del self.pools[key]
def __del__(self):
self.close()
def __getattr__(self, key):
return getattr(self.module, key)
def get_pool(self, *args, **kw):
key = self._serialize(*args, **kw)
try:
return self.pools[key]
except KeyError:
self._create_pool_mutex.acquire()
try:
if key not in self.pools:
kw.pop("sa_pool_key", None)
pool = self.poolclass(
lambda: self.module.connect(*args, **kw), **self.kw
)
self.pools[key] = pool
return pool
else:
return self.pools[key]
finally:
self._create_pool_mutex.release()
def connect(self, *args, **kw):
"""Activate a connection to the database.
Connect to the database using this DBProxy's module and the given
connect arguments. If the arguments match an existing pool, the
connection will be returned from the pool's current thread-local
connection instance, or if there is no thread-local connection
instance it will be checked out from the set of pooled connections.
If the pool has no available connections and allows new connections
to be created, a new database connection will be made.
"""
return self.get_pool(*args, **kw).connect()
def dispose(self, *args, **kw):
"""Dispose the pool referenced by the given connect arguments."""
key = self._serialize(*args, **kw)
try:
del self.pools[key]
except KeyError:
pass
def _serialize(self, *args, **kw):
if "sa_pool_key" in kw:
return kw["sa_pool_key"]
return tuple(list(args) + [(k, kw[k]) for k in sorted(kw)])
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/pool/__init__.py
|
# sqlalchemy/pool/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Connection pooling for DB-API connections.
Provides a number of connection pool implementations for a variety of
usage scenarios and thread behavior requirements imposed by the
application, DB-API or database itself.
Also provides a DB-API 2.0 connection proxying mechanism allowing
regular DB-API connect() methods to be transparently managed by a
SQLAlchemy connection pool.
"""
from .base import _ConnectionFairy # noqa
from .base import _ConnectionRecord # noqa
from .base import _finalize_fairy # noqa
from .base import _refs # noqa
from .base import Pool
from .base import reset_commit
from .base import reset_none
from .base import reset_rollback
from .dbapi_proxy import clear_managers
from .dbapi_proxy import manage
from .impl import AssertionPool
from .impl import NullPool
from .impl import QueuePool
from .impl import SingletonThreadPool
from .impl import StaticPool
__all__ = [
"Pool",
"reset_commit",
"reset_none",
"reset_rollback",
"clear_managers",
"manage",
"AssertionPool",
"NullPool",
"QueuePool",
"SingletonThreadPool",
"StaticPool",
]
# as these are likely to be used in various test suites, debugging
# setups, keep them in the sqlalchemy.pool namespace
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/pool/impl.py
|
# sqlalchemy/pool.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Pool implementation classes.
"""
import traceback
import weakref
from .base import _ConnectionFairy
from .base import _ConnectionRecord
from .base import Pool
from .. import exc
from .. import util
from ..util import chop_traceback
from ..util import queue as sqla_queue
from ..util import threading
class QueuePool(Pool):
"""A :class:`_pool.Pool`
that imposes a limit on the number of open connections.
:class:`.QueuePool` is the default pooling implementation used for
all :class:`_engine.Engine` objects, unless the SQLite dialect is in use.
"""
def __init__(
self,
creator,
pool_size=5,
max_overflow=10,
timeout=30,
use_lifo=False,
**kw
):
r"""
Construct a QueuePool.
:param creator: a callable function that returns a DB-API
connection object, same as that of :paramref:`_pool.Pool.creator`.
:param pool_size: The size of the pool to be maintained,
defaults to 5. This is the largest number of connections that
will be kept persistently in the pool. Note that the pool
begins with no connections; once this number of connections
is requested, that number of connections will remain.
``pool_size`` can be set to 0 to indicate no size limit; to
disable pooling, use a :class:`~sqlalchemy.pool.NullPool`
instead.
:param max_overflow: The maximum overflow size of the
pool. When the number of checked-out connections reaches the
size set in pool_size, additional connections will be
returned up to this limit. When those additional connections
are returned to the pool, they are disconnected and
discarded. It follows then that the total number of
simultaneous connections the pool will allow is pool_size +
`max_overflow`, and the total number of "sleeping"
connections the pool will allow is pool_size. `max_overflow`
can be set to -1 to indicate no overflow limit; no limit
will be placed on the total number of concurrent
connections. Defaults to 10.
:param timeout: The number of seconds to wait before giving up
on returning a connection. Defaults to 30.
:param use_lifo: use LIFO (last-in-first-out) when retrieving
connections instead of FIFO (first-in-first-out). Using LIFO, a
server-side timeout scheme can reduce the number of connections used
during non-peak periods of use. When planning for server-side
timeouts, ensure that a recycle or pre-ping strategy is in use to
gracefully handle stale connections.
.. versionadded:: 1.3
.. seealso::
:ref:`pool_use_lifo`
:ref:`pool_disconnects`
:param \**kw: Other keyword arguments including
:paramref:`_pool.Pool.recycle`, :paramref:`_pool.Pool.echo`,
:paramref:`_pool.Pool.reset_on_return` and others are passed to the
:class:`_pool.Pool` constructor.
"""
Pool.__init__(self, creator, **kw)
self._pool = sqla_queue.Queue(pool_size, use_lifo=use_lifo)
self._overflow = 0 - pool_size
self._max_overflow = max_overflow
self._timeout = timeout
self._overflow_lock = threading.Lock()
def _do_return_conn(self, conn):
try:
self._pool.put(conn, False)
except sqla_queue.Full:
try:
conn.close()
finally:
self._dec_overflow()
def _do_get(self):
use_overflow = self._max_overflow > -1
try:
wait = use_overflow and self._overflow >= self._max_overflow
return self._pool.get(wait, self._timeout)
except sqla_queue.Empty:
# don't do things inside of "except Empty", because when we say
# we timed out or can't connect and raise, Python 3 tells
# people the real error is queue.Empty which it isn't.
pass
if use_overflow and self._overflow >= self._max_overflow:
if not wait:
return self._do_get()
else:
raise exc.TimeoutError(
"QueuePool limit of size %d overflow %d reached, "
"connection timed out, timeout %d"
% (self.size(), self.overflow(), self._timeout),
code="3o7r",
)
if self._inc_overflow():
try:
return self._create_connection()
except:
with util.safe_reraise():
self._dec_overflow()
else:
return self._do_get()
def _inc_overflow(self):
if self._max_overflow == -1:
self._overflow += 1
return True
with self._overflow_lock:
if self._overflow < self._max_overflow:
self._overflow += 1
return True
else:
return False
def _dec_overflow(self):
if self._max_overflow == -1:
self._overflow -= 1
return True
with self._overflow_lock:
self._overflow -= 1
return True
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(
self._creator,
pool_size=self._pool.maxsize,
max_overflow=self._max_overflow,
timeout=self._timeout,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
dialect=self._dialect,
)
def dispose(self):
while True:
try:
conn = self._pool.get(False)
conn.close()
except sqla_queue.Empty:
break
self._overflow = 0 - self.size()
self.logger.info("Pool disposed. %s", self.status())
def status(self):
return (
"Pool size: %d Connections in pool: %d "
"Current Overflow: %d Current Checked out "
"connections: %d"
% (
self.size(),
self.checkedin(),
self.overflow(),
self.checkedout(),
)
)
def size(self):
return self._pool.maxsize
def timeout(self):
return self._timeout
def checkedin(self):
return self._pool.qsize()
def overflow(self):
return self._overflow
def checkedout(self):
return self._pool.maxsize - self._pool.qsize() + self._overflow
class NullPool(Pool):
"""A Pool which does not pool connections.
Instead it literally opens and closes the underlying DB-API connection
per each connection open/close.
Reconnect-related functions such as ``recycle`` and connection
invalidation are not supported by this Pool implementation, since
no connections are held persistently.
"""
def status(self):
return "NullPool"
def _do_return_conn(self, conn):
conn.close()
def _do_get(self):
return self._create_connection()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(
self._creator,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
dialect=self._dialect,
)
def dispose(self):
pass
class SingletonThreadPool(Pool):
"""A Pool that maintains one connection per thread.
Maintains one connection per each thread, never moving a connection to a
thread other than the one which it was created in.
.. warning:: the :class:`.SingletonThreadPool` will call ``.close()``
on arbitrary connections that exist beyond the size setting of
``pool_size``, e.g. if more unique **thread identities**
than what ``pool_size`` states are used. This cleanup is
non-deterministic and not sensitive to whether or not the connections
linked to those thread identities are currently in use.
:class:`.SingletonThreadPool` may be improved in a future release,
however in its current status it is generally used only for test
scenarios using a SQLite ``:memory:`` database and is not recommended
for production use.
Options are the same as those of :class:`_pool.Pool`, as well as:
:param pool_size: The number of threads in which to maintain connections
at once. Defaults to five.
:class:`.SingletonThreadPool` is used by the SQLite dialect
automatically when a memory-based database is used.
See :ref:`sqlite_toplevel`.
"""
def __init__(self, creator, pool_size=5, **kw):
Pool.__init__(self, creator, **kw)
self._conn = threading.local()
self._fairy = threading.local()
self._all_conns = set()
self.size = pool_size
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(
self._creator,
pool_size=self.size,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
dialect=self._dialect,
)
def dispose(self):
"""Dispose of this pool."""
for conn in self._all_conns:
try:
conn.close()
except Exception:
# pysqlite won't even let you close a conn from a thread
# that didn't create it
pass
self._all_conns.clear()
def _cleanup(self):
while len(self._all_conns) >= self.size:
c = self._all_conns.pop()
c.close()
def status(self):
return "SingletonThreadPool id:%d size: %d" % (
id(self),
len(self._all_conns),
)
def _do_return_conn(self, conn):
pass
def _do_get(self):
try:
c = self._conn.current()
if c:
return c
except AttributeError:
pass
c = self._create_connection()
self._conn.current = weakref.ref(c)
if len(self._all_conns) >= self.size:
self._cleanup()
self._all_conns.add(c)
return c
def connect(self):
# vendored from Pool to include use_threadlocal behavior
try:
rec = self._fairy.current()
except AttributeError:
pass
else:
if rec is not None:
return rec._checkout_existing()
return _ConnectionFairy._checkout(self, self._fairy)
def _return_conn(self, record):
try:
del self._fairy.current
except AttributeError:
pass
self._do_return_conn(record)
class StaticPool(Pool):
"""A Pool of exactly one connection, used for all requests.
Reconnect-related functions such as ``recycle`` and connection
invalidation (which is also used to support auto-reconnect) are not
currently supported by this Pool implementation but may be implemented
in a future release.
"""
@util.memoized_property
def _conn(self):
return self._creator()
@util.memoized_property
def connection(self):
return _ConnectionRecord(self)
def status(self):
return "StaticPool"
def dispose(self):
if "_conn" in self.__dict__:
self._conn.close()
self._conn = None
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(
creator=self._creator,
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
dialect=self._dialect,
)
def _create_connection(self):
return self._conn
def _do_return_conn(self, conn):
pass
def _do_get(self):
return self.connection
class AssertionPool(Pool):
"""A :class:`_pool.Pool` that allows at most one checked out connection at
any given time.
This will raise an exception if more than one connection is checked out
at a time. Useful for debugging code that is using more connections
than desired.
"""
def __init__(self, *args, **kw):
self._conn = None
self._checked_out = False
self._store_traceback = kw.pop("store_traceback", True)
self._checkout_traceback = None
Pool.__init__(self, *args, **kw)
def status(self):
return "AssertionPool"
def _do_return_conn(self, conn):
if not self._checked_out:
raise AssertionError("connection is not checked out")
self._checked_out = False
assert conn is self._conn
def dispose(self):
self._checked_out = False
if self._conn:
self._conn.close()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(
self._creator,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
dialect=self._dialect,
)
def _do_get(self):
if self._checked_out:
if self._checkout_traceback:
suffix = " at:\n%s" % "".join(
chop_traceback(self._checkout_traceback)
)
else:
suffix = ""
raise AssertionError("connection is already checked out" + suffix)
if not self._conn:
self._conn = self._create_connection()
self._checked_out = True
if self._store_traceback:
self._checkout_traceback = traceback.format_stack()
return self._conn
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/pool/base.py
|
# sqlalchemy/pool.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base constructs for connection pools.
"""
from collections import deque
import time
import weakref
from .. import event
from .. import exc
from .. import interfaces
from .. import log
from .. import util
from ..util import threading
reset_rollback = util.symbol("reset_rollback")
reset_commit = util.symbol("reset_commit")
reset_none = util.symbol("reset_none")
class _ConnDialect(object):
"""partial implementation of :class:`.Dialect`
which provides DBAPI connection methods.
When a :class:`_pool.Pool` is combined with an :class:`_engine.Engine`,
the :class:`_engine.Engine` replaces this with its own
:class:`.Dialect`.
"""
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
def do_ping(self, dbapi_connection):
raise NotImplementedError(
"The ping feature requires that a dialect is "
"passed to the connection pool."
)
class Pool(log.Identified):
"""Abstract base class for connection pools."""
_dialect = _ConnDialect()
@util.deprecated_params(
use_threadlocal=(
"1.3",
"The :paramref:`_pool.Pool.use_threadlocal` parameter is "
"deprecated and will be removed in a future release.",
),
listeners=(
"0.7",
":class:`.PoolListener` is deprecated in favor of the "
":class:`_events.PoolEvents` listener interface. The "
":paramref:`_pool.Pool.listeners` parameter will be removed in a "
"future release.",
),
)
def __init__(
self,
creator,
recycle=-1,
echo=None,
use_threadlocal=False,
logging_name=None,
reset_on_return=True,
listeners=None,
events=None,
dialect=None,
pre_ping=False,
_dispatch=None,
):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to a value other than -1, number of
seconds between connection recycling, which means upon
checkout, if this timeout is surpassed the connection will be
closed and replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: if True, the connection pool will log
informational output such as when connections are invalidated
as well as when connections are recycled to the default log handler,
which defaults to ``sys.stdout`` for output.. If set to the string
``"debug"``, the logging will include pool checkouts and checkins.
The :paramref:`_pool.Pool.echo` parameter can also be set from the
:func:`_sa.create_engine` call by using the
:paramref:`_sa.create_engine.echo_pool` parameter.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object that is already
checked out. This is a legacy use case and the flag has no
effect when using the pool with a :class:`_engine.Engine` object.
:param reset_on_return: Determine steps to take on
connections as they are returned to the pool.
reset_on_return can have any of these values:
* ``"rollback"`` - call rollback() on the connection,
to release locks and transaction resources.
This is the default value. The vast majority
of use cases should leave this value set.
* ``True`` - same as 'rollback', this is here for
backwards compatibility.
* ``"commit"`` - call commit() on the connection,
to release locks and transaction resources.
A commit here may be desirable for databases that
cache query plans if a commit is emitted,
such as Microsoft SQL Server. However, this
value is more dangerous than 'rollback' because
any data changes present on the transaction
are committed unconditionally.
* ``None`` - don't do anything on the connection.
This setting should generally only be made on a database
that has no transaction support at all,
namely MySQL MyISAM; when used on this backend, performance
can be improved as the "rollback" call is still expensive on
MySQL. It is **strongly recommended** that this setting not be
used for transaction-supporting databases in conjunction with
a persistent pool such as :class:`.QueuePool`, as it opens
the possibility for connections still in a transaction to be
idle in the pool. The setting may be appropriate in the
case of :class:`.NullPool` or special circumstances where
the connection pool in use is not being used to maintain connection
lifecycle.
* ``False`` - same as None, this is here for
backwards compatibility.
:param events: a list of 2-tuples, each of the form
``(callable, target)`` which will be passed to :func:`.event.listen`
upon construction. Provided here so that event listeners
can be assigned via :func:`_sa.create_engine` before dialect-level
listeners are applied.
:param listeners: A list of :class:`.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool.
:param dialect: a :class:`.Dialect` that will handle the job
of calling rollback(), close(), or commit() on DBAPI connections.
If omitted, a built-in "stub" dialect is used. Applications that
make use of :func:`_sa.create_engine` should not use this parameter
as it is handled by the engine creation strategy.
.. versionadded:: 1.1 - ``dialect`` is now a public parameter
to the :class:`_pool.Pool`.
:param pre_ping: if True, the pool will emit a "ping" (typically
"SELECT 1", but is dialect-specific) on the connection
upon checkout, to test if the connection is alive or not. If not,
the connection is transparently re-connected and upon success, all
other pooled connections established prior to that timestamp are
invalidated. Requires that a dialect is passed as well to
interpret the disconnection error.
.. versionadded:: 1.2
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
self._recycle = recycle
self._invalidate_time = 0
self._use_threadlocal = use_threadlocal
self._pre_ping = pre_ping
self._reset_on_return = util.symbol.parse_user_argument(
reset_on_return,
{
reset_rollback: ["rollback", True],
reset_none: ["none", None, False],
reset_commit: ["commit"],
},
"reset_on_return",
resolve_symbol_names=False,
)
self.echo = echo
if _dispatch:
self.dispatch._update(_dispatch, only_propagate=False)
if dialect:
self._dialect = dialect
if events:
for fn, target in events:
event.listen(self, target, fn)
if listeners:
for l in listeners:
self.add_listener(l)
@property
def _creator(self):
return self.__dict__["_creator"]
@_creator.setter
def _creator(self, creator):
self.__dict__["_creator"] = creator
self._invoke_creator = self._should_wrap_creator(creator)
def _should_wrap_creator(self, creator):
"""Detect if creator accepts a single argument, or is sent
as a legacy style no-arg function.
"""
try:
argspec = util.get_callable_argspec(self._creator, no_self=True)
except TypeError:
return lambda crec: creator()
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
# look for the exact arg signature that DefaultStrategy
# sends us
if (argspec[0], argspec[3]) == (["connection_record"], (None,)):
return creator
# or just a single positional
elif positionals == 1:
return creator
# all other cases, just wrap and assume legacy "creator" callable
# thing
else:
return lambda crec: creator()
def _close_connection(self, connection):
self.logger.debug("Closing connection %r", connection)
try:
self._dialect.do_close(connection)
except Exception:
self.logger.error(
"Exception closing connection %r", connection, exc_info=True
)
@util.deprecated(
"0.7",
"The :meth:`_pool.Pool.add_listener` method is deprecated and "
"will be removed in a future release. Please use the "
":class:`_events.PoolEvents` listener interface.",
)
def add_listener(self, listener):
"""Add a :class:`.PoolListener`-like object to this pool.
``listener`` may be an object that implements some or all of
PoolListener, or a dictionary of callables containing implementations
of some or all of the named methods in PoolListener.
"""
interfaces.PoolListener._adapt_listener(self, listener)
def unique_connection(self):
"""Produce a DBAPI connection that is not referenced by any
thread-local context.
This method is equivalent to :meth:`_pool.Pool.connect` when the
:paramref:`_pool.Pool.use_threadlocal` flag is not set to True.
When :paramref:`_pool.Pool.use_threadlocal` is True, the
:meth:`_pool.Pool.unique_connection`
method provides a means of bypassing
the threadlocal context.
"""
return _ConnectionFairy._checkout(self)
def _create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def _invalidate(self, connection, exception=None, _checkin=True):
"""Mark all connections established within the generation
of the given connection as invalidated.
If this pool's last invalidate time is before when the given
connection was created, update the timestamp til now. Otherwise,
no action is performed.
Connections with a start time prior to this pool's invalidation
time will be recycled upon next checkout.
"""
rec = getattr(connection, "_connection_record", None)
if not rec or self._invalidate_time < rec.starttime:
self._invalidate_time = time.time()
if _checkin and getattr(connection, "is_valid", False):
connection.invalidate(exception)
def recreate(self):
"""Return a new :class:`_pool.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunction with :meth:`dispose`
to close out an entire :class:`_pool.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, as it only affects connections that are
idle in the pool.
.. seealso::
:meth:`Pool.recreate`
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
if not self._use_threadlocal:
return _ConnectionFairy._checkout(self)
try:
rec = self._threadconns.current()
except AttributeError:
pass
else:
if rec is not None:
return rec._checkout_existing()
return _ConnectionFairy._checkout(self, self._threadconns)
def _return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`_pool.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
if self._use_threadlocal:
try:
del self._threadconns.current
except AttributeError:
pass
self._do_return_conn(record)
def _do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def _do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
class _ConnectionRecord(object):
"""Internal object which maintains an individual DBAPI connection
referenced by a :class:`_pool.Pool`.
The :class:`._ConnectionRecord` object always exists for any particular
DBAPI connection whether or not that DBAPI connection has been
"checked out". This is in contrast to the :class:`._ConnectionFairy`
which is only a public facade to the DBAPI connection while it is checked
out.
A :class:`._ConnectionRecord` may exist for a span longer than that
of a single DBAPI connection. For example, if the
:meth:`._ConnectionRecord.invalidate`
method is called, the DBAPI connection associated with this
:class:`._ConnectionRecord`
will be discarded, but the :class:`._ConnectionRecord` may be used again,
in which case a new DBAPI connection is produced when the
:class:`_pool.Pool`
next uses this record.
The :class:`._ConnectionRecord` is delivered along with connection
pool events, including :meth:`_events.PoolEvents.connect` and
:meth:`_events.PoolEvents.checkout`, however :class:`._ConnectionRecord`
still
remains an internal object whose API and internals may change.
.. seealso::
:class:`._ConnectionFairy`
"""
def __init__(self, pool, connect=True):
self.__pool = pool
if connect:
self.__connect(first_connect_check=True)
self.finalize_callback = deque()
fairy_ref = None
starttime = None
connection = None
"""A reference to the actual DBAPI connection being tracked.
May be ``None`` if this :class:`._ConnectionRecord` has been marked
as invalidated; a new DBAPI connection may replace it if the owning
pool calls upon this :class:`._ConnectionRecord` to reconnect.
"""
_soft_invalidate_time = 0
@util.memoized_property
def info(self):
"""The ``.info`` dictionary associated with the DBAPI connection.
This dictionary is shared among the :attr:`._ConnectionFairy.info`
and :attr:`_engine.Connection.info` accessors.
.. note::
The lifespan of this dictionary is linked to the
DBAPI connection itself, meaning that it is **discarded** each time
the DBAPI connection is closed and/or invalidated. The
:attr:`._ConnectionRecord.record_info` dictionary remains
persistent throughout the lifespan of the
:class:`._ConnectionRecord` container.
"""
return {}
@util.memoized_property
def record_info(self):
"""An "info' dictionary associated with the connection record
itself.
Unlike the :attr:`._ConnectionRecord.info` dictionary, which is linked
to the lifespan of the DBAPI connection, this dictionary is linked
to the lifespan of the :class:`._ConnectionRecord` container itself
and will remain persistent throughout the life of the
:class:`._ConnectionRecord`.
.. versionadded:: 1.1
"""
return {}
@classmethod
def checkout(cls, pool):
rec = pool._do_get()
try:
dbapi_connection = rec.get_connection()
except Exception as err:
with util.safe_reraise():
rec._checkin_failed(err)
echo = pool._should_log_debug()
fairy = _ConnectionFairy(dbapi_connection, rec, echo)
rec.fairy_ref = weakref.ref(
fairy,
lambda ref: _finalize_fairy
and _finalize_fairy(None, rec, pool, ref, echo),
)
_refs.add(rec)
if echo:
pool.logger.debug(
"Connection %r checked out from pool", dbapi_connection
)
return fairy
def _checkin_failed(self, err):
self.invalidate(e=err)
self.checkin(_no_fairy_ref=True)
def checkin(self, _no_fairy_ref=False):
if self.fairy_ref is None and not _no_fairy_ref:
util.warn("Double checkin attempted on %s" % self)
return
self.fairy_ref = None
connection = self.connection
pool = self.__pool
while self.finalize_callback:
finalizer = self.finalize_callback.pop()
finalizer(connection)
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, self)
pool._return_conn(self)
@property
def in_use(self):
return self.fairy_ref is not None
@property
def last_connect_time(self):
return self.starttime
def close(self):
if self.connection is not None:
self.__close()
def invalidate(self, e=None, soft=False):
"""Invalidate the DBAPI connection held by this :class:`._ConnectionRecord`.
This method is called for all connection invalidations, including
when the :meth:`._ConnectionFairy.invalidate` or
:meth:`_engine.Connection.invalidate` methods are called,
as well as when any
so-called "automatic invalidation" condition occurs.
:param e: an exception object indicating a reason for the invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
# already invalidated
if self.connection is None:
return
if soft:
self.__pool.dispatch.soft_invalidate(self.connection, self, e)
else:
self.__pool.dispatch.invalidate(self.connection, self, e)
if e is not None:
self.__pool.logger.info(
"%sInvalidate connection %r (reason: %s:%s)",
"Soft " if soft else "",
self.connection,
e.__class__.__name__,
e,
)
else:
self.__pool.logger.info(
"%sInvalidate connection %r",
"Soft " if soft else "",
self.connection,
)
if soft:
self._soft_invalidate_time = time.time()
else:
self.__close()
self.connection = None
def get_connection(self):
recycle = False
# NOTE: the various comparisons here are assuming that measurable time
# passes between these state changes. however, time.time() is not
# guaranteed to have sub-second precision. comparisons of
# "invalidation time" to "starttime" should perhaps use >= so that the
# state change can take place assuming no measurable time has passed,
# however this does not guarantee correct behavior here as if time
# continues to not pass, it will try to reconnect repeatedly until
# these timestamps diverge, so in that sense using > is safer. Per
# https://stackoverflow.com/a/1938096/34549, Windows time.time() may be
# within 16 milliseconds accuracy, so unit tests for connection
# invalidation need a sleep of at least this long between initial start
# time and invalidation for the logic below to work reliably.
if self.connection is None:
self.info.clear()
self.__connect()
elif (
self.__pool._recycle > -1
and time.time() - self.starttime > self.__pool._recycle
):
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling", self.connection
)
recycle = True
elif self.__pool._invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to pool invalidation; "
+ "recycling",
self.connection,
)
recycle = True
elif self._soft_invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to local soft invalidation; "
+ "recycling",
self.connection,
)
recycle = True
if recycle:
self.__close()
self.info.clear()
self.__connect()
return self.connection
def __close(self):
self.finalize_callback.clear()
if self.__pool.dispatch.close:
self.__pool.dispatch.close(self.connection, self)
self.__pool._close_connection(self.connection)
self.connection = None
def __connect(self, first_connect_check=False):
pool = self.__pool
# ensure any existing connection is removed, so that if
# creator fails, this attribute stays None
self.connection = None
try:
self.starttime = time.time()
connection = pool._invoke_creator(self)
pool.logger.debug("Created new connection %r", connection)
self.connection = connection
except Exception as e:
with util.safe_reraise():
pool.logger.debug("Error on connect(): %s", e)
else:
if first_connect_check:
pool.dispatch.first_connect.for_modify(
pool.dispatch
).exec_once_unless_exception(self.connection, self)
if pool.dispatch.connect:
pool.dispatch.connect(self.connection, self)
def _finalize_fairy(
connection, connection_record, pool, ref, echo, fairy=None
):
"""Cleanup for a :class:`._ConnectionFairy` whether or not it's already
been garbage collected.
"""
_refs.discard(connection_record)
if ref is not None:
if connection_record.fairy_ref is not ref:
return
assert connection is None
connection = connection_record.connection
if connection is not None:
if connection_record and echo:
pool.logger.debug(
"Connection %r being returned to pool", connection
)
try:
fairy = fairy or _ConnectionFairy(
connection, connection_record, echo
)
assert fairy.connection is connection
fairy._reset(pool)
# Immediately close detached instances
if not connection_record:
if pool.dispatch.close_detached:
pool.dispatch.close_detached(connection)
pool._close_connection(connection)
except BaseException as e:
pool.logger.error(
"Exception during reset or similar", exc_info=True
)
if connection_record:
connection_record.invalidate(e=e)
if not isinstance(e, Exception):
raise
if connection_record and connection_record.fairy_ref is not None:
connection_record.checkin()
_refs = set()
class _ConnectionFairy(object):
"""Proxies a DBAPI connection and provides return-on-dereference
support.
This is an internal object used by the :class:`_pool.Pool` implementation
to provide context management to a DBAPI connection delivered by
that :class:`_pool.Pool`.
The name "fairy" is inspired by the fact that the
:class:`._ConnectionFairy` object's lifespan is transitory, as it lasts
only for the length of a specific DBAPI connection being checked out from
the pool, and additionally that as a transparent proxy, it is mostly
invisible.
.. seealso::
:class:`._ConnectionRecord`
"""
def __init__(self, dbapi_connection, connection_record, echo):
self.connection = dbapi_connection
self._connection_record = connection_record
self._echo = echo
connection = None
"""A reference to the actual DBAPI connection being tracked."""
_connection_record = None
"""A reference to the :class:`._ConnectionRecord` object associated
with the DBAPI connection.
This is currently an internal accessor which is subject to change.
"""
_reset_agent = None
"""Refer to an object with a ``.commit()`` and ``.rollback()`` method;
if non-None, the "reset-on-return" feature will call upon this object
rather than directly against the dialect-level do_rollback() and
do_commit() methods.
In practice, a :class:`_engine.Connection` assigns a :class:`.Transaction`
object
to this variable when one is in scope so that the :class:`.Transaction`
takes the job of committing or rolling back on return if
:meth:`_engine.Connection.close` is called while the :class:`.Transaction`
still exists.
This is essentially an "event handler" of sorts but is simplified as an
instance variable both for performance/simplicity as well as that there
can only be one "reset agent" at a time.
"""
@classmethod
def _checkout(cls, pool, threadconns=None, fairy=None):
if not fairy:
fairy = _ConnectionRecord.checkout(pool)
fairy._pool = pool
fairy._counter = 0
if threadconns is not None:
threadconns.current = weakref.ref(fairy)
if fairy.connection is None:
raise exc.InvalidRequestError("This connection is closed")
fairy._counter += 1
if (
not pool.dispatch.checkout and not pool._pre_ping
) or fairy._counter != 1:
return fairy
# Pool listeners can trigger a reconnection on checkout, as well
# as the pre-pinger.
# there are three attempts made here, but note that if the database
# is not accessible from a connection standpoint, those won't proceed
# here.
attempts = 2
while attempts > 0:
try:
if pool._pre_ping:
if fairy._echo:
pool.logger.debug(
"Pool pre-ping on connection %s", fairy.connection
)
result = pool._dialect.do_ping(fairy.connection)
if not result:
if fairy._echo:
pool.logger.debug(
"Pool pre-ping on connection %s failed, "
"will invalidate pool",
fairy.connection,
)
raise exc.InvalidatePoolError()
pool.dispatch.checkout(
fairy.connection, fairy._connection_record, fairy
)
return fairy
except exc.DisconnectionError as e:
if e.invalidate_pool:
pool.logger.info(
"Disconnection detected on checkout, "
"invalidating all pooled connections prior to "
"current timestamp (reason: %r)",
e,
)
fairy._connection_record.invalidate(e)
pool._invalidate(fairy, e, _checkin=False)
else:
pool.logger.info(
"Disconnection detected on checkout, "
"invalidating individual connection %s (reason: %r)",
fairy.connection,
e,
)
fairy._connection_record.invalidate(e)
try:
fairy.connection = (
fairy._connection_record.get_connection()
)
except Exception as err:
with util.safe_reraise():
fairy._connection_record._checkin_failed(err)
attempts -= 1
pool.logger.info("Reconnection attempts exhausted on checkout")
fairy.invalidate()
raise exc.InvalidRequestError("This connection is closed")
def _checkout_existing(self):
return _ConnectionFairy._checkout(self._pool, fairy=self)
def _checkin(self):
_finalize_fairy(
self.connection,
self._connection_record,
self._pool,
None,
self._echo,
fairy=self,
)
self.connection = None
self._connection_record = None
_close = _checkin
def _reset(self, pool):
if pool.dispatch.reset:
pool.dispatch.reset(self, self._connection_record)
if pool._reset_on_return is reset_rollback:
if self._echo:
pool.logger.debug(
"Connection %s rollback-on-return%s",
self.connection,
", via agent" if self._reset_agent else "",
)
if self._reset_agent:
if not self._reset_agent.is_active:
util.warn(
"Reset agent is not active. "
"This should not occur unless there was already "
"a connectivity error in progress."
)
pool._dialect.do_rollback(self)
else:
self._reset_agent.rollback()
else:
pool._dialect.do_rollback(self)
elif pool._reset_on_return is reset_commit:
if self._echo:
pool.logger.debug(
"Connection %s commit-on-return%s",
self.connection,
", via agent" if self._reset_agent else "",
)
if self._reset_agent:
if not self._reset_agent.is_active:
util.warn(
"Reset agent is not active. "
"This should not occur unless there was already "
"a connectivity error in progress."
)
pool._dialect.do_commit(self)
else:
self._reset_agent.commit()
else:
pool._dialect.do_commit(self)
@property
def _logger(self):
return self._pool.logger
@property
def is_valid(self):
"""Return True if this :class:`._ConnectionFairy` still refers
to an active DBAPI connection."""
return self.connection is not None
@util.memoized_property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.ConnectionFairy`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`._ConnectionFairy`. It is shared
with the :attr:`._ConnectionRecord.info` and
:attr:`_engine.Connection.info`
accessors.
The dictionary associated with a particular DBAPI connection is
discarded when the connection itself is discarded.
"""
return self._connection_record.info
@property
def record_info(self):
"""Info dictionary associated with the :class:`._ConnectionRecord
container referred to by this :class:`.ConnectionFairy`.
Unlike the :attr:`._ConnectionFairy.info` dictionary, the lifespan
of this dictionary is persistent across connections that are
disconnected and/or invalidated within the lifespan of a
:class:`._ConnectionRecord`.
.. versionadded:: 1.1
"""
if self._connection_record:
return self._connection_record.record_info
else:
return None
def invalidate(self, e=None, soft=False):
"""Mark this connection as invalidated.
This method can be called directly, and is also called as a result
of the :meth:`_engine.Connection.invalidate` method. When invoked,
the DBAPI connection is immediately closed and discarded from
further use by the pool. The invalidation mechanism proceeds
via the :meth:`._ConnectionRecord.invalidate` internal method.
:param e: an exception object indicating a reason for the invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.connection is None:
util.warn("Can't invalidate an already-closed connection.")
return
if self._connection_record:
self._connection_record.invalidate(e=e, soft=soft)
if not soft:
self.connection = None
self._checkin()
def cursor(self, *args, **kwargs):
"""Return a new DBAPI cursor for the underlying connection.
This method is a proxy for the ``connection.cursor()`` DBAPI
method.
"""
return self.connection.cursor(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.connection, key)
def detach(self):
"""Separate this connection from its Pool.
This means that the connection will no longer be returned to the
pool when closed, and will instead be literally closed. The
containing ConnectionRecord is separated from the DB-API connection,
and will create a new connection when next used.
Note that any overall connection limiting constraints imposed by a
Pool implementation may be violated after a detach, as the detached
connection is removed from the pool's knowledge and control.
"""
if self._connection_record is not None:
rec = self._connection_record
_refs.remove(rec)
rec.fairy_ref = None
rec.connection = None
# TODO: should this be _return_conn?
self._pool._do_return_conn(self._connection_record)
self.info = self.info.copy()
self._connection_record = None
if self._pool.dispatch.detach:
self._pool.dispatch.detach(self.connection, rec)
def close(self):
self._counter -= 1
if self._counter == 0:
self._checkin()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/event/legacy.py
|
# event/legacy.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to handle adaption of legacy call signatures,
generation of deprecation notes and docstrings.
"""
from .. import util
def _legacy_signature(since, argnames, converter=None):
def leg(fn):
if not hasattr(fn, "_legacy_signatures"):
fn._legacy_signatures = []
fn._legacy_signatures.append((since, argnames, converter))
return fn
return leg
def _wrap_fn_for_legacy(dispatch_collection, fn, argspec):
for since, argnames, conv in dispatch_collection.legacy_signatures:
if argnames[-1] == "**kw":
has_kw = True
argnames = argnames[0:-1]
else:
has_kw = False
if len(argnames) == len(argspec.args) and has_kw is bool(
argspec.varkw
):
if conv:
assert not has_kw
def wrap_leg(*args):
return fn(*conv(*args))
else:
def wrap_leg(*args, **kw):
argdict = dict(zip(dispatch_collection.arg_names, args))
args = [argdict[name] for name in argnames]
if has_kw:
return fn(*args, **kw)
else:
return fn(*args)
return wrap_leg
else:
return fn
def _indent(text, indent):
return "\n".join(indent + line for line in text.split("\n"))
def _standard_listen_example(dispatch_collection, sample_target, fn):
example_kw_arg = _indent(
"\n".join(
"%(arg)s = kw['%(arg)s']" % {"arg": arg}
for arg in dispatch_collection.arg_names[0:2]
),
" ",
)
if dispatch_collection.legacy_signatures:
current_since = max(
since
for since, args, conv in dispatch_collection.legacy_signatures
)
else:
current_since = None
text = (
"from sqlalchemy import event\n\n"
"# standard decorator style%(current_since)s\n"
"@event.listens_for(%(sample_target)s, '%(event_name)s')\n"
"def receive_%(event_name)s("
"%(named_event_arguments)s%(has_kw_arguments)s):\n"
" \"listen for the '%(event_name)s' event\"\n"
"\n # ... (event handling logic) ...\n"
)
if len(dispatch_collection.arg_names) > 3:
text += (
"\n# named argument style (new in 0.9)\n"
"@event.listens_for("
"%(sample_target)s, '%(event_name)s', named=True)\n"
"def receive_%(event_name)s(**kw):\n"
" \"listen for the '%(event_name)s' event\"\n"
"%(example_kw_arg)s\n"
"\n # ... (event handling logic) ...\n"
)
text %= {
"current_since": " (arguments as of %s)" % current_since
if current_since
else "",
"event_name": fn.__name__,
"has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "",
"named_event_arguments": ", ".join(dispatch_collection.arg_names),
"example_kw_arg": example_kw_arg,
"sample_target": sample_target,
}
return text
def _legacy_listen_examples(dispatch_collection, sample_target, fn):
text = ""
for since, args, conv in dispatch_collection.legacy_signatures:
text += (
"\n# DEPRECATED calling style (pre-%(since)s, "
"will be removed in a future release)\n"
"@event.listens_for(%(sample_target)s, '%(event_name)s')\n"
"def receive_%(event_name)s("
"%(named_event_arguments)s%(has_kw_arguments)s):\n"
" \"listen for the '%(event_name)s' event\"\n"
"\n # ... (event handling logic) ...\n"
% {
"since": since,
"event_name": fn.__name__,
"has_kw_arguments": " **kw"
if dispatch_collection.has_kw
else "",
"named_event_arguments": ", ".join(args),
"sample_target": sample_target,
}
)
return text
def _version_signature_changes(parent_dispatch_cls, dispatch_collection):
since, args, conv = dispatch_collection.legacy_signatures[0]
return (
"\n.. deprecated:: %(since)s\n"
" The :class:`.%(clsname)s.%(event_name)s` event now accepts the \n"
" arguments ``%(named_event_arguments)s%(has_kw_arguments)s``.\n"
" Support for listener functions which accept the previous \n"
' argument signature(s) listed above as "deprecated" will be \n'
" removed in a future release."
% {
"since": since,
"clsname": parent_dispatch_cls.__name__,
"event_name": dispatch_collection.name,
"named_event_arguments": ", ".join(dispatch_collection.arg_names),
"has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "",
}
)
def _augment_fn_docs(dispatch_collection, parent_dispatch_cls, fn):
header = (
".. container:: event_signatures\n\n"
" Example argument forms::\n"
"\n"
)
sample_target = getattr(parent_dispatch_cls, "_target_class_doc", "obj")
text = header + _indent(
_standard_listen_example(dispatch_collection, sample_target, fn),
" " * 8,
)
if dispatch_collection.legacy_signatures:
text += _indent(
_legacy_listen_examples(dispatch_collection, sample_target, fn),
" " * 8,
)
text += _version_signature_changes(
parent_dispatch_cls, dispatch_collection
)
return util.inject_docstring_text(fn.__doc__, text, 1)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/event/registry.py
|
# event/registry.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides managed registration services on behalf of :func:`.listen`
arguments.
By "managed registration", we mean that event listening functions and
other objects can be added to various collections in such a way that their
membership in all those collections can be revoked at once, based on
an equivalent :class:`._EventKey`.
"""
from __future__ import absolute_import
import collections
import types
import weakref
from .. import exc
from .. import util
_key_to_collection = collections.defaultdict(dict)
"""
Given an original listen() argument, can locate all
listener collections and the listener fn contained
(target, identifier, fn) -> {
ref(listenercollection) -> ref(listener_fn)
ref(listenercollection) -> ref(listener_fn)
ref(listenercollection) -> ref(listener_fn)
}
"""
_collection_to_key = collections.defaultdict(dict)
"""
Given a _ListenerCollection or _ClsLevelListener, can locate
all the original listen() arguments and the listener fn contained
ref(listenercollection) -> {
ref(listener_fn) -> (target, identifier, fn),
ref(listener_fn) -> (target, identifier, fn),
ref(listener_fn) -> (target, identifier, fn),
}
"""
def _collection_gced(ref):
# defaultdict, so can't get a KeyError
if not _collection_to_key or ref not in _collection_to_key:
return
listener_to_key = _collection_to_key.pop(ref)
for key in listener_to_key.values():
if key in _key_to_collection:
# defaultdict, so can't get a KeyError
dispatch_reg = _key_to_collection[key]
dispatch_reg.pop(ref)
if not dispatch_reg:
_key_to_collection.pop(key)
def _stored_in_collection(event_key, owner):
key = event_key._key
dispatch_reg = _key_to_collection[key]
owner_ref = owner.ref
listen_ref = weakref.ref(event_key._listen_fn)
if owner_ref in dispatch_reg:
return False
dispatch_reg[owner_ref] = listen_ref
listener_to_key = _collection_to_key[owner_ref]
listener_to_key[listen_ref] = key
return True
def _removed_from_collection(event_key, owner):
key = event_key._key
dispatch_reg = _key_to_collection[key]
listen_ref = weakref.ref(event_key._listen_fn)
owner_ref = owner.ref
dispatch_reg.pop(owner_ref, None)
if not dispatch_reg:
del _key_to_collection[key]
if owner_ref in _collection_to_key:
listener_to_key = _collection_to_key[owner_ref]
listener_to_key.pop(listen_ref)
def _stored_in_collection_multi(newowner, oldowner, elements):
if not elements:
return
oldowner = oldowner.ref
newowner = newowner.ref
old_listener_to_key = _collection_to_key[oldowner]
new_listener_to_key = _collection_to_key[newowner]
for listen_fn in elements:
listen_ref = weakref.ref(listen_fn)
key = old_listener_to_key[listen_ref]
dispatch_reg = _key_to_collection[key]
if newowner in dispatch_reg:
assert dispatch_reg[newowner] == listen_ref
else:
dispatch_reg[newowner] = listen_ref
new_listener_to_key[listen_ref] = key
def _clear(owner, elements):
if not elements:
return
owner = owner.ref
listener_to_key = _collection_to_key[owner]
for listen_fn in elements:
listen_ref = weakref.ref(listen_fn)
key = listener_to_key[listen_ref]
dispatch_reg = _key_to_collection[key]
dispatch_reg.pop(owner, None)
if not dispatch_reg:
del _key_to_collection[key]
class _EventKey(object):
"""Represent :func:`.listen` arguments.
"""
__slots__ = (
"target",
"identifier",
"fn",
"fn_key",
"fn_wrap",
"dispatch_target",
)
def __init__(self, target, identifier, fn, dispatch_target, _fn_wrap=None):
self.target = target
self.identifier = identifier
self.fn = fn
if isinstance(fn, types.MethodType):
self.fn_key = id(fn.__func__), id(fn.__self__)
else:
self.fn_key = id(fn)
self.fn_wrap = _fn_wrap
self.dispatch_target = dispatch_target
@property
def _key(self):
return (id(self.target), self.identifier, self.fn_key)
def with_wrapper(self, fn_wrap):
if fn_wrap is self._listen_fn:
return self
else:
return _EventKey(
self.target,
self.identifier,
self.fn,
self.dispatch_target,
_fn_wrap=fn_wrap,
)
def with_dispatch_target(self, dispatch_target):
if dispatch_target is self.dispatch_target:
return self
else:
return _EventKey(
self.target,
self.identifier,
self.fn,
dispatch_target,
_fn_wrap=self.fn_wrap,
)
def listen(self, *args, **kw):
once = kw.pop("once", False)
once_unless_exception = kw.pop("_once_unless_exception", False)
named = kw.pop("named", False)
target, identifier, fn = (
self.dispatch_target,
self.identifier,
self._listen_fn,
)
dispatch_collection = getattr(target.dispatch, identifier)
adjusted_fn = dispatch_collection._adjust_fn_spec(fn, named)
self = self.with_wrapper(adjusted_fn)
stub_function = getattr(
self.dispatch_target.dispatch._events, self.identifier
)
if hasattr(stub_function, "_sa_warn"):
stub_function._sa_warn()
if once or once_unless_exception:
self.with_wrapper(
util.only_once(
self._listen_fn, retry_on_exception=once_unless_exception
)
).listen(*args, **kw)
else:
self.dispatch_target.dispatch._listen(self, *args, **kw)
def remove(self):
key = self._key
if key not in _key_to_collection:
raise exc.InvalidRequestError(
"No listeners found for event %s / %r / %s "
% (self.target, self.identifier, self.fn)
)
dispatch_reg = _key_to_collection.pop(key)
for collection_ref, listener_ref in dispatch_reg.items():
collection = collection_ref()
listener_fn = listener_ref()
if collection is not None and listener_fn is not None:
collection.remove(self.with_wrapper(listener_fn))
def contains(self):
"""Return True if this event key is registered to listen.
"""
return self._key in _key_to_collection
def base_listen(
self, propagate=False, insert=False, named=False, retval=None
):
target, identifier = self.dispatch_target, self.identifier
dispatch_collection = getattr(target.dispatch, identifier)
if insert:
dispatch_collection.for_modify(target.dispatch).insert(
self, propagate
)
else:
dispatch_collection.for_modify(target.dispatch).append(
self, propagate
)
@property
def _listen_fn(self):
return self.fn_wrap or self.fn
def append_to_list(self, owner, list_):
if _stored_in_collection(self, owner):
list_.append(self._listen_fn)
return True
else:
return False
def remove_from_list(self, owner, list_):
_removed_from_collection(self, owner)
list_.remove(self._listen_fn)
def prepend_to_list(self, owner, list_):
if _stored_in_collection(self, owner):
list_.appendleft(self._listen_fn)
return True
else:
return False
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/event/__init__.py
|
# event/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .api import CANCEL # noqa
from .api import contains # noqa
from .api import listen # noqa
from .api import listens_for # noqa
from .api import NO_RETVAL # noqa
from .api import remove # noqa
from .attr import RefCollection # noqa
from .base import dispatcher # noqa
from .base import Events # noqa
from .legacy import _legacy_signature # noqa
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/event/api.py
|
# event/api.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Public API functions for the event system.
"""
from __future__ import absolute_import
from .base import _registrars
from .registry import _EventKey
from .. import exc
from .. import util
CANCEL = util.symbol("CANCEL")
NO_RETVAL = util.symbol("NO_RETVAL")
def _event_key(target, identifier, fn):
for evt_cls in _registrars[identifier]:
tgt = evt_cls._accept_with(target)
if tgt is not None:
return _EventKey(target, identifier, fn, tgt)
else:
raise exc.InvalidRequestError(
"No such event '%s' for target '%s'" % (identifier, target)
)
def listen(target, identifier, fn, *args, **kw):
"""Register a listener function for the given target.
The :func:`.listen` function is part of the primary interface for the
SQLAlchemy event system, documented at :ref:`event_toplevel`.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
event.listen(
UniqueConstraint,
"after_parent_attach",
unique_constraint_name)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
def on_config():
do_config()
event.listen(Mapper, "before_configure", on_config, once=True)
.. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
.. warning:: The ``once`` argument does not imply automatic de-registration
of the listener function after it has been invoked a first time; a
listener entry will remain associated with the target object.
Associating an arbitrarily high number of listeners without explictitly
removing them will cause memory to grow unbounded even if ``once=True``
is specified.
.. note::
The :func:`.listen` function cannot be called at the same time
that the target event is being run. This has implications
for thread safety, and also means an event cannot be added
from inside the listener function for itself. The list of
events to be run are present inside of a mutable collection
that can't be changed during iteration.
Event registration and removal is not intended to be a "high
velocity" operation; it is a configurational operation. For
systems that need to quickly associate and deassociate with
events at high scale, use a mutable structure that is handled
from inside of a single listener.
.. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now
used as the container for the list of events, which explicitly
disallows collection mutation while the collection is being
iterated.
.. seealso::
:func:`.listens_for`
:func:`.remove`
"""
_event_key(target, identifier, fn).listen(*args, **kw)
def listens_for(target, identifier, *args, **kw):
"""Decorate a function as a listener for the given target + identifier.
The :func:`.listens_for` decorator is part of the primary interface for the
SQLAlchemy event system, documented at :ref:`event_toplevel`.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
@event.listens_for(UniqueConstraint, "after_parent_attach")
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
@event.listens_for(Mapper, "before_configure", once=True)
def on_config():
do_config()
.. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
.. warning:: The ``once`` argument does not imply automatic de-registration
of the listener function after it has been invoked a first time; a
listener entry will remain associated with the target object.
Associating an arbitrarily high number of listeners without explictitly
removing them will cause memory to grow unbounded even if ``once=True``
is specified.
.. seealso::
:func:`.listen` - general description of event listening
"""
def decorate(fn):
listen(target, identifier, fn, *args, **kw)
return fn
return decorate
def remove(target, identifier, fn):
"""Remove an event listener.
The arguments here should match exactly those which were sent to
:func:`.listen`; all the event registration which proceeded as a result
of this call will be reverted by calling :func:`.remove` with the same
arguments.
e.g.::
# if a function was registered like this...
@event.listens_for(SomeMappedClass, "before_insert", propagate=True)
def my_listener_function(*arg):
pass
# ... it's removed like this
event.remove(SomeMappedClass, "before_insert", my_listener_function)
Above, the listener function associated with ``SomeMappedClass`` was also
propagated to subclasses of ``SomeMappedClass``; the :func:`.remove`
function will revert all of these operations.
.. versionadded:: 0.9.0
.. note::
The :func:`.remove` function cannot be called at the same time
that the target event is being run. This has implications
for thread safety, and also means an event cannot be removed
from inside the listener function for itself. The list of
events to be run are present inside of a mutable collection
that can't be changed during iteration.
Event registration and removal is not intended to be a "high
velocity" operation; it is a configurational operation. For
systems that need to quickly associate and deassociate with
events at high scale, use a mutable structure that is handled
from inside of a single listener.
.. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now
used as the container for the list of events, which explicitly
disallows collection mutation while the collection is being
iterated.
.. seealso::
:func:`.listen`
"""
_event_key(target, identifier, fn).remove()
def contains(target, identifier, fn):
"""Return True if the given target/ident/fn is set up to listen.
.. versionadded:: 0.9.0
"""
return _event_key(target, identifier, fn).contains()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/event/attr.py
|
# event/attr.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Attribute implementation for _Dispatch classes.
The various listener targets for a particular event class are represented
as attributes, which refer to collections of listeners to be fired off.
These collections can exist at the class level as well as at the instance
level. An event is fired off using code like this::
some_object.dispatch.first_connect(arg1, arg2)
Above, ``some_object.dispatch`` would be an instance of ``_Dispatch`` and
``first_connect`` is typically an instance of ``_ListenerCollection``
if event listeners are present, or ``_EmptyListener`` if none are present.
The attribute mechanics here spend effort trying to ensure listener functions
are available with a minimum of function call overhead, that unnecessary
objects aren't created (i.e. many empty per-instance listener collections),
as well as that everything is garbage collectable when owning references are
lost. Other features such as "propagation" of listener functions across
many ``_Dispatch`` instances, "joining" of multiple ``_Dispatch`` instances,
as well as support for subclass propagation (e.g. events assigned to
``Pool`` vs. ``QueuePool``) are all implemented here.
"""
from __future__ import absolute_import
from __future__ import with_statement
import collections
from itertools import chain
import weakref
from . import legacy
from . import registry
from .. import exc
from .. import util
from ..util import threading
class RefCollection(util.MemoizedSlots):
__slots__ = ("ref",)
def _memoized_attr_ref(self):
return weakref.ref(self, registry._collection_gced)
class _empty_collection(object):
def append(self, element):
pass
def extend(self, other):
pass
def remove(self, element):
pass
def __iter__(self):
return iter([])
def clear(self):
pass
class _ClsLevelDispatch(RefCollection):
"""Class-level events on :class:`._Dispatch` classes."""
__slots__ = (
"name",
"arg_names",
"has_kw",
"legacy_signatures",
"_clslevel",
"__weakref__",
)
def __init__(self, parent_dispatch_cls, fn):
self.name = fn.__name__
argspec = util.inspect_getfullargspec(fn)
self.arg_names = argspec.args[1:]
self.has_kw = bool(argspec.varkw)
self.legacy_signatures = list(
reversed(
sorted(
getattr(fn, "_legacy_signatures", []), key=lambda s: s[0]
)
)
)
fn.__doc__ = legacy._augment_fn_docs(self, parent_dispatch_cls, fn)
self._clslevel = weakref.WeakKeyDictionary()
def _adjust_fn_spec(self, fn, named):
if named:
fn = self._wrap_fn_for_kw(fn)
if self.legacy_signatures:
try:
argspec = util.get_callable_argspec(fn, no_self=True)
except TypeError:
pass
else:
fn = legacy._wrap_fn_for_legacy(self, fn, argspec)
return fn
def _wrap_fn_for_kw(self, fn):
def wrap_kw(*args, **kw):
argdict = dict(zip(self.arg_names, args))
argdict.update(kw)
return fn(**argdict)
return wrap_kw
def insert(self, event_key, propagate):
target = event_key.dispatch_target
assert isinstance(
target, type
), "Class-level Event targets must be classes."
if not getattr(target, "_sa_propagate_class_events", True):
raise exc.InvalidRequestError(
"Can't assign an event directly to the %s class" % target
)
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._assign_cls_collection(cls)
self._clslevel[cls].appendleft(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def append(self, event_key, propagate):
target = event_key.dispatch_target
assert isinstance(
target, type
), "Class-level Event targets must be classes."
if not getattr(target, "_sa_propagate_class_events", True):
raise exc.InvalidRequestError(
"Can't assign an event directly to the %s class" % target
)
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._assign_cls_collection(cls)
self._clslevel[cls].append(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def _assign_cls_collection(self, target):
if getattr(target, "_sa_propagate_class_events", True):
self._clslevel[target] = collections.deque()
else:
self._clslevel[target] = _empty_collection()
def update_subclass(self, target):
if target not in self._clslevel:
self._assign_cls_collection(target)
clslevel = self._clslevel[target]
for cls in target.__mro__[1:]:
if cls in self._clslevel:
clslevel.extend(
[fn for fn in self._clslevel[cls] if fn not in clslevel]
)
def remove(self, event_key):
target = event_key.dispatch_target
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls in self._clslevel:
self._clslevel[cls].remove(event_key._listen_fn)
registry._removed_from_collection(event_key, self)
def clear(self):
"""Clear all class level listeners"""
to_clear = set()
for dispatcher in self._clslevel.values():
to_clear.update(dispatcher)
dispatcher.clear()
registry._clear(self, to_clear)
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _ClsLevelDispatch at the class level of
a dispatcher, this returns self.
"""
return self
class _InstanceLevelDispatch(RefCollection):
__slots__ = ()
def _adjust_fn_spec(self, fn, named):
return self.parent._adjust_fn_spec(fn, named)
class _EmptyListener(_InstanceLevelDispatch):
"""Serves as a proxy interface to the events
served by a _ClsLevelDispatch, when there are no
instance-level events present.
Is replaced by _ListenerCollection when instance-level
events are added.
"""
propagate = frozenset()
listeners = ()
__slots__ = "parent", "parent_listeners", "name"
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
self.parent = parent # _ClsLevelDispatch
self.parent_listeners = parent._clslevel[target_cls]
self.name = parent.name
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _EmptyListener at the instance level of
a dispatcher, this generates a new
_ListenerCollection, applies it to the instance,
and returns it.
"""
result = _ListenerCollection(self.parent, obj._instance_cls)
if getattr(obj, self.name) is self:
setattr(obj, self.name, result)
else:
assert isinstance(getattr(obj, self.name), _JoinedListener)
return result
def _needs_modify(self, *args, **kw):
raise NotImplementedError("need to call for_modify()")
exec_once = (
exec_once_unless_exception
) = insert = append = remove = clear = _needs_modify
def __call__(self, *args, **kw):
"""Execute this event."""
for fn in self.parent_listeners:
fn(*args, **kw)
def __len__(self):
return len(self.parent_listeners)
def __iter__(self):
return iter(self.parent_listeners)
def __bool__(self):
return bool(self.parent_listeners)
__nonzero__ = __bool__
class _CompoundListener(_InstanceLevelDispatch):
__slots__ = "_exec_once_mutex", "_exec_once"
def _memoized_attr__exec_once_mutex(self):
return threading.Lock()
def _exec_once_impl(self, retry_on_exception, *args, **kw):
with self._exec_once_mutex:
if not self._exec_once:
try:
self(*args, **kw)
exception = False
except:
exception = True
raise
finally:
if not exception or not retry_on_exception:
self._exec_once = True
def exec_once(self, *args, **kw):
"""Execute this event, but only if it has not been
executed already for this collection."""
if not self._exec_once:
self._exec_once_impl(False, *args, **kw)
def exec_once_unless_exception(self, *args, **kw):
"""Execute this event, but only if it has not been
executed already for this collection, or was called
by a previous exec_once_unless_exception call and
raised an exception.
If exec_once was already called, then this method will never run
the callable regardless of whether it raised or not.
.. versionadded:: 1.3.8
"""
if not self._exec_once:
self._exec_once_impl(True, *args, **kw)
def __call__(self, *args, **kw):
"""Execute this event."""
for fn in self.parent_listeners:
fn(*args, **kw)
for fn in self.listeners:
fn(*args, **kw)
def __len__(self):
return len(self.parent_listeners) + len(self.listeners)
def __iter__(self):
return chain(self.parent_listeners, self.listeners)
def __bool__(self):
return bool(self.listeners or self.parent_listeners)
__nonzero__ = __bool__
class _ListenerCollection(_CompoundListener):
"""Instance-level attributes on instances of :class:`._Dispatch`.
Represents a collection of listeners.
As of 0.7.9, _ListenerCollection is only first
created via the _EmptyListener.for_modify() method.
"""
__slots__ = (
"parent_listeners",
"parent",
"name",
"listeners",
"propagate",
"__weakref__",
)
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
self._exec_once = False
self.parent_listeners = parent._clslevel[target_cls]
self.parent = parent
self.name = parent.name
self.listeners = collections.deque()
self.propagate = set()
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _ListenerCollection at the instance level of
a dispatcher, this returns self.
"""
return self
def _update(self, other, only_propagate=True):
"""Populate from the listeners in another :class:`_Dispatch`
object."""
existing_listeners = self.listeners
existing_listener_set = set(existing_listeners)
self.propagate.update(other.propagate)
other_listeners = [
l
for l in other.listeners
if l not in existing_listener_set
and not only_propagate
or l in self.propagate
]
existing_listeners.extend(other_listeners)
to_associate = other.propagate.union(other_listeners)
registry._stored_in_collection_multi(self, other, to_associate)
def insert(self, event_key, propagate):
if event_key.prepend_to_list(self, self.listeners):
if propagate:
self.propagate.add(event_key._listen_fn)
def append(self, event_key, propagate):
if event_key.append_to_list(self, self.listeners):
if propagate:
self.propagate.add(event_key._listen_fn)
def remove(self, event_key):
self.listeners.remove(event_key._listen_fn)
self.propagate.discard(event_key._listen_fn)
registry._removed_from_collection(event_key, self)
def clear(self):
registry._clear(self, self.listeners)
self.propagate.clear()
self.listeners.clear()
class _JoinedListener(_CompoundListener):
__slots__ = "parent", "name", "local", "parent_listeners"
def __init__(self, parent, name, local):
self._exec_once = False
self.parent = parent
self.name = name
self.local = local
self.parent_listeners = self.local
@property
def listeners(self):
return getattr(self.parent, self.name)
def _adjust_fn_spec(self, fn, named):
return self.local._adjust_fn_spec(fn, named)
def for_modify(self, obj):
self.local = self.parent_listeners = self.local.for_modify(obj)
return self
def insert(self, event_key, propagate):
self.local.insert(event_key, propagate)
def append(self, event_key, propagate):
self.local.append(event_key, propagate)
def remove(self, event_key):
self.local.remove(event_key)
def clear(self):
raise NotImplementedError()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/event/base.py
|
# event/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base implementation classes.
The public-facing ``Events`` serves as the base class for an event interface;
its public attributes represent different kinds of events. These attributes
are mirrored onto a ``_Dispatch`` class, which serves as a container for
collections of listener functions. These collections are represented both
at the class level of a particular ``_Dispatch`` class as well as within
instances of ``_Dispatch``.
"""
from __future__ import absolute_import
import weakref
from .attr import _ClsLevelDispatch
from .attr import _EmptyListener
from .attr import _JoinedListener
from .. import util
_registrars = util.defaultdict(list)
def _is_event_name(name):
return not name.startswith("_") and name != "dispatch"
class _UnpickleDispatch(object):
"""Serializable callable that re-generates an instance of
:class:`_Dispatch` given a particular :class:`.Events` subclass.
"""
def __call__(self, _instance_cls):
for cls in _instance_cls.__mro__:
if "dispatch" in cls.__dict__:
return cls.__dict__["dispatch"].dispatch._for_class(
_instance_cls
)
else:
raise AttributeError("No class with a 'dispatch' member present.")
class _Dispatch(object):
"""Mirror the event listening definitions of an Events class with
listener collections.
Classes which define a "dispatch" member will return a
non-instantiated :class:`._Dispatch` subclass when the member
is accessed at the class level. When the "dispatch" member is
accessed at the instance level of its owner, an instance
of the :class:`._Dispatch` class is returned.
A :class:`._Dispatch` class is generated for each :class:`.Events`
class defined, by the :func:`._create_dispatcher_class` function.
The original :class:`.Events` classes remain untouched.
This decouples the construction of :class:`.Events` subclasses from
the implementation used by the event internals, and allows
inspecting tools like Sphinx to work in an unsurprising
way against the public API.
"""
# In one ORM edge case, an attribute is added to _Dispatch,
# so __dict__ is used in just that case and potentially others.
__slots__ = "_parent", "_instance_cls", "__dict__", "_empty_listeners"
_empty_listener_reg = weakref.WeakKeyDictionary()
def __init__(self, parent, instance_cls=None):
self._parent = parent
self._instance_cls = instance_cls
if instance_cls:
try:
self._empty_listeners = self._empty_listener_reg[instance_cls]
except KeyError:
self._empty_listeners = self._empty_listener_reg[
instance_cls
] = {
ls.name: _EmptyListener(ls, instance_cls)
for ls in parent._event_descriptors
}
else:
self._empty_listeners = {}
def __getattr__(self, name):
# Assign EmptyListeners as attributes on demand
# to reduce startup time for new dispatch objects.
try:
ls = self._empty_listeners[name]
except KeyError:
raise AttributeError(name)
else:
setattr(self, ls.name, ls)
return ls
@property
def _event_descriptors(self):
for k in self._event_names:
# Yield _ClsLevelDispatch related
# to relevant event name.
yield getattr(self, k)
@property
def _listen(self):
return self._events._listen
def _for_class(self, instance_cls):
return self.__class__(self, instance_cls)
def _for_instance(self, instance):
instance_cls = instance.__class__
return self._for_class(instance_cls)
def _join(self, other):
"""Create a 'join' of this :class:`._Dispatch` and another.
This new dispatcher will dispatch events to both
:class:`._Dispatch` objects.
"""
if "_joined_dispatch_cls" not in self.__class__.__dict__:
cls = type(
"Joined%s" % self.__class__.__name__,
(_JoinedDispatcher,),
{"__slots__": self._event_names},
)
self.__class__._joined_dispatch_cls = cls
return self._joined_dispatch_cls(self, other)
def __reduce__(self):
return _UnpickleDispatch(), (self._instance_cls,)
def _update(self, other, only_propagate=True):
"""Populate from the listeners in another :class:`_Dispatch`
object."""
for ls in other._event_descriptors:
if isinstance(ls, _EmptyListener):
continue
getattr(self, ls.name).for_modify(self)._update(
ls, only_propagate=only_propagate
)
def _clear(self):
for ls in self._event_descriptors:
ls.for_modify(self).clear()
class _EventMeta(type):
"""Intercept new Event subclasses and create
associated _Dispatch classes."""
def __init__(cls, classname, bases, dict_):
_create_dispatcher_class(cls, classname, bases, dict_)
type.__init__(cls, classname, bases, dict_)
def _create_dispatcher_class(cls, classname, bases, dict_):
"""Create a :class:`._Dispatch` class corresponding to an
:class:`.Events` class."""
# there's all kinds of ways to do this,
# i.e. make a Dispatch class that shares the '_listen' method
# of the Event class, this is the straight monkeypatch.
if hasattr(cls, "dispatch"):
dispatch_base = cls.dispatch.__class__
else:
dispatch_base = _Dispatch
event_names = [k for k in dict_ if _is_event_name(k)]
dispatch_cls = type(
"%sDispatch" % classname, (dispatch_base,), {"__slots__": event_names}
)
dispatch_cls._event_names = event_names
dispatch_inst = cls._set_dispatch(cls, dispatch_cls)
for k in dispatch_cls._event_names:
setattr(dispatch_inst, k, _ClsLevelDispatch(cls, dict_[k]))
_registrars[k].append(cls)
for super_ in dispatch_cls.__bases__:
if issubclass(super_, _Dispatch) and super_ is not _Dispatch:
for ls in super_._events.dispatch._event_descriptors:
setattr(dispatch_inst, ls.name, ls)
dispatch_cls._event_names.append(ls.name)
if getattr(cls, "_dispatch_target", None):
cls._dispatch_target.dispatch = dispatcher(cls)
def _remove_dispatcher(cls):
for k in cls.dispatch._event_names:
_registrars[k].remove(cls)
if not _registrars[k]:
del _registrars[k]
class Events(util.with_metaclass(_EventMeta, object)):
"""Define event listening functions for a particular target type."""
@staticmethod
def _set_dispatch(cls, dispatch_cls):
# This allows an Events subclass to define additional utility
# methods made available to the target via
# "self.dispatch._events.<utilitymethod>"
# @staticemethod to allow easy "super" calls while in a metaclass
# constructor.
cls.dispatch = dispatch_cls(None)
dispatch_cls._events = cls
return cls.dispatch
@classmethod
def _accept_with(cls, target):
def dispatch_is(*types):
return all(isinstance(target.dispatch, t) for t in types)
def dispatch_parent_is(t):
return isinstance(target.dispatch.parent, t)
# Mapper, ClassManager, Session override this to
# also accept classes, scoped_sessions, sessionmakers, etc.
if hasattr(target, "dispatch"):
if (
dispatch_is(cls.dispatch.__class__)
or dispatch_is(type, cls.dispatch.__class__)
or (
dispatch_is(_JoinedDispatcher)
and dispatch_parent_is(cls.dispatch.__class__)
)
):
return target
@classmethod
def _listen(cls, event_key, propagate=False, insert=False, named=False):
event_key.base_listen(propagate=propagate, insert=insert, named=named)
@classmethod
def _remove(cls, event_key):
event_key.remove()
@classmethod
def _clear(cls):
cls.dispatch._clear()
class _JoinedDispatcher(object):
"""Represent a connection between two _Dispatch objects."""
__slots__ = "local", "parent", "_instance_cls"
def __init__(self, local, parent):
self.local = local
self.parent = parent
self._instance_cls = self.local._instance_cls
def __getattr__(self, name):
# Assign _JoinedListeners as attributes on demand
# to reduce startup time for new dispatch objects.
ls = getattr(self.local, name)
jl = _JoinedListener(self.parent, ls.name, ls)
setattr(self, ls.name, jl)
return jl
@property
def _listen(self):
return self.parent._listen
@property
def _events(self):
return self.parent._events
class dispatcher(object):
"""Descriptor used by target classes to
deliver the _Dispatch class at the class level
and produce new _Dispatch instances for target
instances.
"""
def __init__(self, events):
self.dispatch = events.dispatch
self.events = events
def __get__(self, obj, cls):
if obj is None:
return self.dispatch
obj.__dict__["dispatch"] = disp = self.dispatch._for_instance(obj)
return disp
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/sql/functions.py
|
# sql/functions.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL function API, factories, and built-in functions.
"""
from . import annotation
from . import operators
from . import schema
from . import sqltypes
from . import util as sqlutil
from .base import ColumnCollection
from .base import Executable
from .elements import _clone
from .elements import _literal_as_binds
from .elements import _type_from_args
from .elements import BinaryExpression
from .elements import BindParameter
from .elements import Cast
from .elements import ClauseList
from .elements import ColumnElement
from .elements import Extract
from .elements import FunctionFilter
from .elements import Grouping
from .elements import literal_column
from .elements import Over
from .elements import WithinGroup
from .selectable import Alias
from .selectable import FromClause
from .selectable import Select
from .visitors import VisitableType
from .. import util
_registry = util.defaultdict(dict)
_case_sensitive_registry = util.defaultdict(lambda: util.defaultdict(dict))
_CASE_SENSITIVE = util.symbol(
name="case_sensitive_function",
doc="Symbol to mark the functions that are switched into case-sensitive "
"mode.",
)
def register_function(identifier, fn, package="_default"):
"""Associate a callable with a particular func. name.
This is normally called by _GenericMeta, but is also
available by itself so that a non-Function construct
can be associated with the :data:`.func` accessor (i.e.
CAST, EXTRACT).
"""
reg = _registry[package]
case_sensitive_reg = _case_sensitive_registry[package]
raw_identifier = identifier
identifier = util.text_type(identifier).lower()
# Check if a function with the same lowercase identifier is registered.
if identifier in reg and reg[identifier] is not _CASE_SENSITIVE:
if raw_identifier in case_sensitive_reg[identifier]:
util.warn(
"The GenericFunction '{}' is already registered and "
"is going to be overriden.".format(identifier)
)
reg[identifier] = fn
else:
# If a function with the same lowercase identifier is registered,
# then these 2 functions are considered as case-sensitive.
# Note: This case should raise an error in a later release.
util.warn_deprecated(
"GenericFunction '{}' is already registered with "
"different letter case, so the previously registered function "
"'{}' is switched into case-sensitive mode. "
"GenericFunction objects will be fully case-insensitive in a "
"future release.".format(
raw_identifier,
list(case_sensitive_reg[identifier].keys())[0],
)
)
reg[identifier] = _CASE_SENSITIVE
# Check if a function with different letter case identifier is registered.
elif identifier in case_sensitive_reg:
# Note: This case will be removed in a later release.
if raw_identifier not in case_sensitive_reg[identifier]:
util.warn_deprecated(
"GenericFunction(s) '{}' are already registered with "
"different letter cases and might interact with '{}'. "
"GenericFunction objects will be fully case-insensitive in a "
"future release.".format(
sorted(case_sensitive_reg[identifier].keys()),
raw_identifier,
)
)
else:
util.warn(
"The GenericFunction '{}' is already registered and "
"is going to be overriden.".format(raw_identifier)
)
# Register by default
else:
reg[identifier] = fn
# Always register in case-sensitive registry
case_sensitive_reg[identifier][raw_identifier] = fn
class FunctionElement(Executable, ColumnElement, FromClause):
"""Base for SQL function-oriented constructs.
.. seealso::
:ref:`coretutorial_functions` - in the Core tutorial
:class:`.Function` - named SQL function.
:data:`.func` - namespace which produces registered or ad-hoc
:class:`.Function` instances.
:class:`.GenericFunction` - allows creation of registered function
types.
"""
packagenames = ()
_has_args = False
def __init__(self, *clauses, **kwargs):
r"""Construct a :class:`.FunctionElement`.
:param \*clauses: list of column expressions that form the arguments
of the SQL function call.
:param \**kwargs: additional kwargs are typically consumed by
subclasses.
.. seealso::
:data:`.func`
:class:`.Function`
"""
args = [_literal_as_binds(c, self.name) for c in clauses]
self._has_args = self._has_args or bool(args)
self.clause_expr = ClauseList(
operator=operators.comma_op, group_contents=True, *args
).self_group()
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_function(self, multiparams, params)
@property
def columns(self):
"""The set of columns exported by this :class:`.FunctionElement`.
Function objects currently have no result column names built in;
this method returns a single-element column collection with
an anonymously named column.
An interim approach to providing named columns for a function
as a FROM clause is to build a :func:`_expression.select` with the
desired columns::
from sqlalchemy.sql import column
stmt = select([column('x'), column('y')]).\
select_from(func.myfunction())
"""
return ColumnCollection(self.label(None))
@util.memoized_property
def clauses(self):
"""Return the underlying :class:`.ClauseList` which contains
the arguments for this :class:`.FunctionElement`.
"""
return self.clause_expr.element
def over(self, partition_by=None, order_by=None, rows=None, range_=None):
"""Produce an OVER clause against this function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
The expression::
func.row_number().over(order_by='x')
is shorthand for::
from sqlalchemy import over
over(func.row_number(), order_by='x')
See :func:`_expression.over` for a full description.
"""
return Over(
self,
partition_by=partition_by,
order_by=order_by,
rows=rows,
range_=range_,
)
def within_group(self, *order_by):
"""Produce a WITHIN GROUP (ORDER BY expr) clause against this function.
Used against so-called "ordered set aggregate" and "hypothetical
set aggregate" functions, including :class:`.percentile_cont`,
:class:`.rank`, :class:`.dense_rank`, etc.
See :func:`_expression.within_group` for a full description.
.. versionadded:: 1.1
"""
return WithinGroup(self, *order_by)
def filter(self, *criterion):
"""Produce a FILTER clause against this function.
Used against aggregate and window functions,
for database backends that support the "FILTER" clause.
The expression::
func.count(1).filter(True)
is shorthand for::
from sqlalchemy import funcfilter
funcfilter(func.count(1), True)
.. versionadded:: 1.0.0
.. seealso::
:class:`.FunctionFilter`
:func:`.funcfilter`
"""
if not criterion:
return self
return FunctionFilter(self, *criterion)
def as_comparison(self, left_index, right_index):
"""Interpret this expression as a boolean comparison between two values.
A hypothetical SQL function "is_equal()" which compares to values
for equality would be written in the Core expression language as::
expr = func.is_equal("a", "b")
If "is_equal()" above is comparing "a" and "b" for equality, the
:meth:`.FunctionElement.as_comparison` method would be invoked as::
expr = func.is_equal("a", "b").as_comparison(1, 2)
Where above, the integer value "1" refers to the first argument of the
"is_equal()" function and the integer value "2" refers to the second.
This would create a :class:`.BinaryExpression` that is equivalent to::
BinaryExpression("a", "b", operator=op.eq)
However, at the SQL level it would still render as
"is_equal('a', 'b')".
The ORM, when it loads a related object or collection, needs to be able
to manipulate the "left" and "right" sides of the ON clause of a JOIN
expression. The purpose of this method is to provide a SQL function
construct that can also supply this information to the ORM, when used
with the :paramref:`_orm.relationship.primaryjoin` parameter.
The return
value is a containment object called :class:`.FunctionAsBinary`.
An ORM example is as follows::
class Venue(Base):
__tablename__ = 'venue'
id = Column(Integer, primary_key=True)
name = Column(String)
descendants = relationship(
"Venue",
primaryjoin=func.instr(
remote(foreign(name)), name + "/"
).as_comparison(1, 2) == 1,
viewonly=True,
order_by=name
)
Above, the "Venue" class can load descendant "Venue" objects by
determining if the name of the parent Venue is contained within the
start of the hypothetical descendant value's name, e.g. "parent1" would
match up to "parent1/child1", but not to "parent2/child1".
Possible use cases include the "materialized path" example given above,
as well as making use of special SQL functions such as geometric
functions to create join conditions.
:param left_index: the integer 1-based index of the function argument
that serves as the "left" side of the expression.
:param right_index: the integer 1-based index of the function argument
that serves as the "right" side of the expression.
.. versionadded:: 1.3
"""
return FunctionAsBinary(self, left_index, right_index)
@property
def _from_objects(self):
return self.clauses._from_objects
def get_children(self, **kwargs):
return (self.clause_expr,)
def _copy_internals(self, clone=_clone, **kw):
self.clause_expr = clone(self.clause_expr, **kw)
self._reset_exported()
FunctionElement.clauses._reset(self)
def within_group_type(self, within_group):
"""For types that define their return type as based on the criteria
within a WITHIN GROUP (ORDER BY) expression, called by the
:class:`.WithinGroup` construct.
Returns None by default, in which case the function's normal ``.type``
is used.
"""
return None
def alias(self, name=None, flat=False):
r"""Produce a :class:`_expression.Alias` construct against this
:class:`.FunctionElement`.
This construct wraps the function in a named alias which
is suitable for the FROM clause, in the style accepted for example
by PostgreSQL.
e.g.::
from sqlalchemy.sql import column
stmt = select([column('data_view')]).\
select_from(SomeTable).\
select_from(func.unnest(SomeTable.data).alias('data_view')
)
Would produce:
.. sourcecode:: sql
SELECT data_view
FROM sometable, unnest(sometable.data) AS data_view
.. versionadded:: 0.9.8 The :meth:`.FunctionElement.alias` method
is now supported. Previously, this method's behavior was
undefined and did not behave consistently across versions.
"""
return Alias._construct(self, name)
def select(self):
"""Produce a :func:`_expression.select` construct
against this :class:`.FunctionElement`.
This is shorthand for::
s = select([function_element])
"""
s = Select([self])
if self._execution_options:
s = s.execution_options(**self._execution_options)
return s
def scalar(self):
"""Execute this :class:`.FunctionElement` against an embedded
'bind' and return a scalar value.
This first calls :meth:`~.FunctionElement.select` to
produce a SELECT construct.
Note that :class:`.FunctionElement` can be passed to
the :meth:`.Connectable.scalar` method of :class:`_engine.Connection`
or :class:`_engine.Engine`.
"""
return self.select().execute().scalar()
def execute(self):
"""Execute this :class:`.FunctionElement` against an embedded
'bind'.
This first calls :meth:`~.FunctionElement.select` to
produce a SELECT construct.
Note that :class:`.FunctionElement` can be passed to
the :meth:`.Connectable.execute` method of :class:`_engine.Connection`
or :class:`_engine.Engine`.
"""
return self.select().execute()
def _bind_param(self, operator, obj, type_=None):
return BindParameter(
None,
obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True,
type_=type_,
)
def self_group(self, against=None):
# for the moment, we are parenthesizing all array-returning
# expressions against getitem. This may need to be made
# more portable if in the future we support other DBs
# besides postgresql.
if against is operators.getitem and isinstance(
self.type, sqltypes.ARRAY
):
return Grouping(self)
else:
return super(FunctionElement, self).self_group(against=against)
class FunctionAsBinary(BinaryExpression):
def __init__(self, fn, left_index, right_index):
left = fn.clauses.clauses[left_index - 1]
right = fn.clauses.clauses[right_index - 1]
self.sql_function = fn
self.left_index = left_index
self.right_index = right_index
super(FunctionAsBinary, self).__init__(
left,
right,
operators.function_as_comparison_op,
type_=sqltypes.BOOLEANTYPE,
)
@property
def left(self):
return self.sql_function.clauses.clauses[self.left_index - 1]
@left.setter
def left(self, value):
self.sql_function.clauses.clauses[self.left_index - 1] = value
@property
def right(self):
return self.sql_function.clauses.clauses[self.right_index - 1]
@right.setter
def right(self, value):
self.sql_function.clauses.clauses[self.right_index - 1] = value
def _copy_internals(self, **kw):
clone = kw.pop("clone")
self.sql_function = clone(self.sql_function, **kw)
super(FunctionAsBinary, self)._copy_internals(**kw)
class _FunctionGenerator(object):
"""Generate SQL function expressions.
:data:`.func` is a special object instance which generates SQL
functions based on name-based attributes, e.g.::
>>> print(func.count(1))
count(:param_1)
The returned object is an instance of :class:`.Function`, and is a
column-oriented SQL element like any other, and is used in that way::
>>> print(select([func.count(table.c.id)]))
SELECT count(sometable.id) FROM sometable
Any name can be given to :data:`.func`. If the function name is unknown to
SQLAlchemy, it will be rendered exactly as is. For common SQL functions
which SQLAlchemy is aware of, the name may be interpreted as a *generic
function* which will be compiled appropriately to the target database::
>>> print(func.current_timestamp())
CURRENT_TIMESTAMP
To call functions which are present in dot-separated packages,
specify them in the same manner::
>>> print(func.stats.yield_curve(5, 10))
stats.yield_curve(:yield_curve_1, :yield_curve_2)
SQLAlchemy can be made aware of the return type of functions to enable
type-specific lexical and result-based behavior. For example, to ensure
that a string-based function returns a Unicode value and is similarly
treated as a string in expressions, specify
:class:`~sqlalchemy.types.Unicode` as the type:
>>> print(func.my_string(u'hi', type_=Unicode) + ' ' +
... func.my_string(u'there', type_=Unicode))
my_string(:my_string_1) || :my_string_2 || my_string(:my_string_3)
The object returned by a :data:`.func` call is usually an instance of
:class:`.Function`.
This object meets the "column" interface, including comparison and labeling
functions. The object can also be passed the :meth:`~.Connectable.execute`
method of a :class:`_engine.Connection` or :class:`_engine.Engine`,
where it will be
wrapped inside of a SELECT statement first::
print(connection.execute(func.current_timestamp()).scalar())
In a few exception cases, the :data:`.func` accessor
will redirect a name to a built-in expression such as :func:`.cast`
or :func:`.extract`, as these names have well-known meaning
but are not exactly the same as "functions" from a SQLAlchemy
perspective.
Functions which are interpreted as "generic" functions know how to
calculate their return type automatically. For a listing of known generic
functions, see :ref:`generic_functions`.
.. note::
The :data:`.func` construct has only limited support for calling
standalone "stored procedures", especially those with special
parameterization concerns.
See the section :ref:`stored_procedures` for details on how to use
the DBAPI-level ``callproc()`` method for fully traditional stored
procedures.
.. seealso::
:ref:`coretutorial_functions` - in the Core Tutorial
:class:`.Function`
"""
def __init__(self, **opts):
self.__names = []
self.opts = opts
def __getattr__(self, name):
# passthru __ attributes; fixes pydoc
if name.startswith("__"):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
elif name.endswith("_"):
name = name[0:-1]
f = _FunctionGenerator(**self.opts)
f.__names = list(self.__names) + [name]
return f
def __call__(self, *c, **kwargs):
o = self.opts.copy()
o.update(kwargs)
tokens = len(self.__names)
if tokens == 2:
package, fname = self.__names
elif tokens == 1:
package, fname = "_default", self.__names[0]
else:
package = None
if package is not None:
func = _registry[package].get(fname.lower())
if func is _CASE_SENSITIVE:
case_sensitive_reg = _case_sensitive_registry[package]
func = case_sensitive_reg.get(fname.lower()).get(fname)
if func is not None:
return func(*c, **o)
return Function(
self.__names[-1], packagenames=self.__names[0:-1], *c, **o
)
func = _FunctionGenerator()
func.__doc__ = _FunctionGenerator.__doc__
modifier = _FunctionGenerator(group=False)
class Function(FunctionElement):
r"""Describe a named SQL function.
The :class:`.Function` object is typically generated from the
:data:`.func` generation object.
:param \*clauses: list of column expressions that form the arguments
of the SQL function call.
:param type\_: optional :class:`.TypeEngine` datatype object that will be
used as the return value of the column expression generated by this
function call.
:param packagenames: a string which indicates package prefix names
to be prepended to the function name when the SQL is generated.
The :data:`.func` generator creates these when it is called using
dotted format, e.g.::
func.mypackage.some_function(col1, col2)
.. seealso::
:ref:`coretutorial_functions`
:data:`.func` - namespace which produces registered or ad-hoc
:class:`.Function` instances.
:class:`.GenericFunction` - allows creation of registered function
types.
"""
__visit_name__ = "function"
def __init__(self, name, *clauses, **kw):
"""Construct a :class:`.Function`.
The :data:`.func` construct is normally used to construct
new :class:`.Function` instances.
"""
self.packagenames = kw.pop("packagenames", None) or []
self.name = name
self._bind = kw.get("bind", None)
self.type = sqltypes.to_instance(kw.get("type_", None))
FunctionElement.__init__(self, *clauses, **kw)
def _bind_param(self, operator, obj, type_=None):
return BindParameter(
self.name,
obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
type_=type_,
unique=True,
)
class _GenericMeta(VisitableType):
def __init__(cls, clsname, bases, clsdict):
if annotation.Annotated not in cls.__mro__:
cls.name = name = clsdict.get("name", clsname)
cls.identifier = identifier = clsdict.get("identifier", name)
package = clsdict.pop("package", "_default")
# legacy
if "__return_type__" in clsdict:
cls.type = clsdict["__return_type__"]
# Check _register attribute status
cls._register = getattr(cls, "_register", True)
# Register the function if required
if cls._register:
register_function(identifier, cls, package)
else:
# Set _register to True to register child classes by default
cls._register = True
super(_GenericMeta, cls).__init__(clsname, bases, clsdict)
class GenericFunction(util.with_metaclass(_GenericMeta, Function)):
"""Define a 'generic' function.
A generic function is a pre-established :class:`.Function`
class that is instantiated automatically when called
by name from the :data:`.func` attribute. Note that
calling any name from :data:`.func` has the effect that
a new :class:`.Function` instance is created automatically,
given that name. The primary use case for defining
a :class:`.GenericFunction` class is so that a function
of a particular name may be given a fixed return type.
It can also include custom argument parsing schemes as well
as additional methods.
Subclasses of :class:`.GenericFunction` are automatically
registered under the name of the class. For
example, a user-defined function ``as_utc()`` would
be available immediately::
from sqlalchemy.sql.functions import GenericFunction
from sqlalchemy.types import DateTime
class as_utc(GenericFunction):
type = DateTime
print(select([func.as_utc()]))
User-defined generic functions can be organized into
packages by specifying the "package" attribute when defining
:class:`.GenericFunction`. Third party libraries
containing many functions may want to use this in order
to avoid name conflicts with other systems. For example,
if our ``as_utc()`` function were part of a package
"time"::
class as_utc(GenericFunction):
type = DateTime
package = "time"
The above function would be available from :data:`.func`
using the package name ``time``::
print(select([func.time.as_utc()]))
A final option is to allow the function to be accessed
from one name in :data:`.func` but to render as a different name.
The ``identifier`` attribute will override the name used to
access the function as loaded from :data:`.func`, but will retain
the usage of ``name`` as the rendered name::
class GeoBuffer(GenericFunction):
type = Geometry
package = "geo"
name = "ST_Buffer"
identifier = "buffer"
The above function will render as follows::
>>> print(func.geo.buffer())
ST_Buffer()
The name will be rendered as is, however without quoting unless the name
contains special characters that require quoting. To force quoting
on or off for the name, use the :class:`.sqlalchemy.sql.quoted_name`
construct::
from sqlalchemy.sql import quoted_name
class GeoBuffer(GenericFunction):
type = Geometry
package = "geo"
name = quoted_name("ST_Buffer", True)
identifier = "buffer"
The above function will render as::
>>> print(func.geo.buffer())
"ST_Buffer"()
.. versionadded:: 1.3.13 The :class:`.quoted_name` construct is now
recognized for quoting when used with the "name" attribute of the
object, so that quoting can be forced on or off for the function
name.
"""
coerce_arguments = True
_register = False
def __init__(self, *args, **kwargs):
parsed_args = kwargs.pop("_parsed_args", None)
if parsed_args is None:
parsed_args = [_literal_as_binds(c, self.name) for c in args]
self._has_args = self._has_args or bool(parsed_args)
self.packagenames = []
self._bind = kwargs.get("bind", None)
self.clause_expr = ClauseList(
operator=operators.comma_op, group_contents=True, *parsed_args
).self_group()
self.type = sqltypes.to_instance(
kwargs.pop("type_", None) or getattr(self, "type", None)
)
register_function("cast", Cast)
register_function("extract", Extract)
class next_value(GenericFunction):
"""Represent the 'next value', given a :class:`.Sequence`
as its single argument.
Compiles into the appropriate function on each backend,
or will raise NotImplementedError if used on a backend
that does not provide support for sequences.
"""
type = sqltypes.Integer()
name = "next_value"
def __init__(self, seq, **kw):
assert isinstance(
seq, schema.Sequence
), "next_value() accepts a Sequence object as input."
self._bind = kw.get("bind", None)
self.sequence = seq
@property
def _from_objects(self):
return []
class AnsiFunction(GenericFunction):
def __init__(self, *args, **kwargs):
GenericFunction.__init__(self, *args, **kwargs)
class ReturnTypeFromArgs(GenericFunction):
"""Define a function whose return type is the same as its arguments."""
def __init__(self, *args, **kwargs):
args = [_literal_as_binds(c, self.name) for c in args]
kwargs.setdefault("type_", _type_from_args(args))
kwargs["_parsed_args"] = args
super(ReturnTypeFromArgs, self).__init__(*args, **kwargs)
class coalesce(ReturnTypeFromArgs):
_has_args = True
class max(ReturnTypeFromArgs): # noqa
pass
class min(ReturnTypeFromArgs): # noqa
pass
class sum(ReturnTypeFromArgs): # noqa
pass
class now(GenericFunction): # noqa
type = sqltypes.DateTime
class concat(GenericFunction):
type = sqltypes.String
class char_length(GenericFunction):
type = sqltypes.Integer
def __init__(self, arg, **kwargs):
GenericFunction.__init__(self, arg, **kwargs)
class random(GenericFunction):
_has_args = True
class count(GenericFunction):
r"""The ANSI COUNT aggregate function. With no arguments,
emits COUNT \*.
E.g.::
from sqlalchemy import func
from sqlalchemy import select
from sqlalchemy import table, column
my_table = table('some_table', column('id'))
stmt = select([func.count()]).select_from(my_table)
Executing ``stmt`` would emit::
SELECT count(*) AS count_1
FROM some_table
"""
type = sqltypes.Integer
def __init__(self, expression=None, **kwargs):
if expression is None:
expression = literal_column("*")
super(count, self).__init__(expression, **kwargs)
class current_date(AnsiFunction):
type = sqltypes.Date
class current_time(AnsiFunction):
type = sqltypes.Time
class current_timestamp(AnsiFunction):
type = sqltypes.DateTime
class current_user(AnsiFunction):
type = sqltypes.String
class localtime(AnsiFunction):
type = sqltypes.DateTime
class localtimestamp(AnsiFunction):
type = sqltypes.DateTime
class session_user(AnsiFunction):
type = sqltypes.String
class sysdate(AnsiFunction):
type = sqltypes.DateTime
class user(AnsiFunction):
type = sqltypes.String
class array_agg(GenericFunction):
"""support for the ARRAY_AGG function.
The ``func.array_agg(expr)`` construct returns an expression of
type :class:`_types.ARRAY`.
e.g.::
stmt = select([func.array_agg(table.c.values)[2:5]])
.. versionadded:: 1.1
.. seealso::
:func:`_postgresql.array_agg` - PostgreSQL-specific version that
returns :class:`_postgresql.ARRAY`, which has PG-specific operators
added.
"""
type = sqltypes.ARRAY
def __init__(self, *args, **kwargs):
args = [_literal_as_binds(c) for c in args]
default_array_type = kwargs.pop("_default_array_type", sqltypes.ARRAY)
if "type_" not in kwargs:
type_from_args = _type_from_args(args)
if isinstance(type_from_args, sqltypes.ARRAY):
kwargs["type_"] = type_from_args
else:
kwargs["type_"] = default_array_type(type_from_args)
kwargs["_parsed_args"] = args
super(array_agg, self).__init__(*args, **kwargs)
class OrderedSetAgg(GenericFunction):
"""Define a function where the return type is based on the sort
expression type as defined by the expression passed to the
:meth:`.FunctionElement.within_group` method."""
array_for_multi_clause = False
def within_group_type(self, within_group):
func_clauses = self.clause_expr.element
order_by = sqlutil.unwrap_order_by(within_group.order_by)
if self.array_for_multi_clause and len(func_clauses.clauses) > 1:
return sqltypes.ARRAY(order_by[0].type)
else:
return order_by[0].type
class mode(OrderedSetAgg):
"""implement the ``mode`` ordered-set aggregate function.
This function must be used with the :meth:`.FunctionElement.within_group`
modifier to supply a sort expression to operate upon.
The return type of this function is the same as the sort expression.
.. versionadded:: 1.1
"""
class percentile_cont(OrderedSetAgg):
"""implement the ``percentile_cont`` ordered-set aggregate function.
This function must be used with the :meth:`.FunctionElement.within_group`
modifier to supply a sort expression to operate upon.
The return type of this function is the same as the sort expression,
or if the arguments are an array, an :class:`_types.ARRAY` of the sort
expression's type.
.. versionadded:: 1.1
"""
array_for_multi_clause = True
class percentile_disc(OrderedSetAgg):
"""implement the ``percentile_disc`` ordered-set aggregate function.
This function must be used with the :meth:`.FunctionElement.within_group`
modifier to supply a sort expression to operate upon.
The return type of this function is the same as the sort expression,
or if the arguments are an array, an :class:`_types.ARRAY` of the sort
expression's type.
.. versionadded:: 1.1
"""
array_for_multi_clause = True
class rank(GenericFunction):
"""Implement the ``rank`` hypothetical-set aggregate function.
This function must be used with the :meth:`.FunctionElement.within_group`
modifier to supply a sort expression to operate upon.
The return type of this function is :class:`.Integer`.
.. versionadded:: 1.1
"""
type = sqltypes.Integer()
class dense_rank(GenericFunction):
"""Implement the ``dense_rank`` hypothetical-set aggregate function.
This function must be used with the :meth:`.FunctionElement.within_group`
modifier to supply a sort expression to operate upon.
The return type of this function is :class:`.Integer`.
.. versionadded:: 1.1
"""
type = sqltypes.Integer()
class percent_rank(GenericFunction):
"""Implement the ``percent_rank`` hypothetical-set aggregate function.
This function must be used with the :meth:`.FunctionElement.within_group`
modifier to supply a sort expression to operate upon.
The return type of this function is :class:`.Numeric`.
.. versionadded:: 1.1
"""
type = sqltypes.Numeric()
class cume_dist(GenericFunction):
"""Implement the ``cume_dist`` hypothetical-set aggregate function.
This function must be used with the :meth:`.FunctionElement.within_group`
modifier to supply a sort expression to operate upon.
The return type of this function is :class:`.Numeric`.
.. versionadded:: 1.1
"""
type = sqltypes.Numeric()
class cube(GenericFunction):
r"""Implement the ``CUBE`` grouping operation.
This function is used as part of the GROUP BY of a statement,
e.g. :meth:`_expression.Select.group_by`::
stmt = select(
[func.sum(table.c.value), table.c.col_1, table.c.col_2]
).group_by(func.cube(table.c.col_1, table.c.col_2))
.. versionadded:: 1.2
"""
_has_args = True
class rollup(GenericFunction):
r"""Implement the ``ROLLUP`` grouping operation.
This function is used as part of the GROUP BY of a statement,
e.g. :meth:`_expression.Select.group_by`::
stmt = select(
[func.sum(table.c.value), table.c.col_1, table.c.col_2]
).group_by(func.rollup(table.c.col_1, table.c.col_2))
.. versionadded:: 1.2
"""
_has_args = True
class grouping_sets(GenericFunction):
r"""Implement the ``GROUPING SETS`` grouping operation.
This function is used as part of the GROUP BY of a statement,
e.g. :meth:`_expression.Select.group_by`::
stmt = select(
[func.sum(table.c.value), table.c.col_1, table.c.col_2]
).group_by(func.grouping_sets(table.c.col_1, table.c.col_2))
In order to group by multiple sets, use the :func:`.tuple_` construct::
from sqlalchemy import tuple_
stmt = select(
[
func.sum(table.c.value),
table.c.col_1, table.c.col_2,
table.c.col_3]
).group_by(
func.grouping_sets(
tuple_(table.c.col_1, table.c.col_2),
tuple_(table.c.value, table.c.col_3),
)
)
.. versionadded:: 1.2
"""
_has_args = True
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/sql/annotation.py
|
# sql/annotation.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The :class:`.Annotated` class and related routines; creates hash-equivalent
copies of SQL constructs which contain context-specific markers and
associations.
"""
from . import operators
from .. import util
class Annotated(object):
"""clones a ClauseElement and applies an 'annotations' dictionary.
Unlike regular clones, this clone also mimics __hash__() and
__cmp__() of the original element so that it takes its place
in hashed collections.
A reference to the original element is maintained, for the important
reason of keeping its hash value current. When GC'ed, the
hash value may be reused, causing conflicts.
.. note:: The rationale for Annotated producing a brand new class,
rather than placing the functionality directly within ClauseElement,
is **performance**. The __hash__() method is absent on plain
ClauseElement which leads to significantly reduced function call
overhead, as the use of sets and dictionaries against ClauseElement
objects is prevalent, but most are not "annotated".
"""
def __new__(cls, *args):
if not args:
# clone constructor
return object.__new__(cls)
else:
element, values = args
# pull appropriate subclass from registry of annotated
# classes
try:
cls = annotated_classes[element.__class__]
except KeyError:
cls = _new_annotation_type(element.__class__, cls)
return object.__new__(cls)
def __init__(self, element, values):
self.__dict__ = element.__dict__.copy()
self.__element = element
self._annotations = values
self._hash = hash(element)
def _annotate(self, values):
_values = self._annotations.copy()
_values.update(values)
return self._with_annotations(_values)
def _with_annotations(self, values):
clone = self.__class__.__new__(self.__class__)
clone.__dict__ = self.__dict__.copy()
clone._annotations = values
return clone
def _deannotate(self, values=None, clone=True):
if values is None:
return self.__element
else:
_values = self._annotations.copy()
for v in values:
_values.pop(v, None)
return self._with_annotations(_values)
def _compiler_dispatch(self, visitor, **kw):
return self.__element.__class__._compiler_dispatch(self, visitor, **kw)
@property
def _constructor(self):
return self.__element._constructor
def _clone(self):
clone = self.__element._clone()
if clone is self.__element:
# detect immutable, don't change anything
return self
else:
# update the clone with any changes that have occurred
# to this object's __dict__.
clone.__dict__.update(self.__dict__)
return self.__class__(clone, self._annotations)
def __reduce__(self):
return self.__class__, (self.__element, self._annotations)
def __hash__(self):
return self._hash
def __eq__(self, other):
if isinstance(self.__element, operators.ColumnOperators):
return self.__element.__class__.__eq__(self, other)
else:
return hash(other) == hash(self)
# hard-generate Annotated subclasses. this technique
# is used instead of on-the-fly types (i.e. type.__new__())
# so that the resulting objects are pickleable.
annotated_classes = {}
def _deep_annotate(element, annotations, exclude=None):
"""Deep copy the given ClauseElement, annotating each element
with the given annotations dictionary.
Elements within the exclude collection will be cloned but not annotated.
"""
def clone(elem):
if (
exclude
and hasattr(elem, "proxy_set")
and elem.proxy_set.intersection(exclude)
):
newelem = elem._clone()
elif annotations != elem._annotations:
newelem = elem._annotate(annotations)
else:
newelem = elem
newelem._copy_internals(clone=clone)
return newelem
if element is not None:
element = clone(element)
clone = None # remove gc cycles
return element
def _deep_deannotate(element, values=None):
"""Deep copy the given element, removing annotations."""
cloned = util.column_dict()
def clone(elem):
# if a values dict is given,
# the elem must be cloned each time it appears,
# as there may be different annotations in source
# elements that are remaining. if totally
# removing all annotations, can assume the same
# slate...
if values or elem not in cloned:
newelem = elem._deannotate(values=values, clone=True)
newelem._copy_internals(clone=clone)
if not values:
cloned[elem] = newelem
return newelem
else:
return cloned[elem]
if element is not None:
element = clone(element)
clone = None # remove gc cycles
return element
def _shallow_annotate(element, annotations):
"""Annotate the given ClauseElement and copy its internals so that
internal objects refer to the new annotated object.
Basically used to apply a "dont traverse" annotation to a
selectable, without digging throughout the whole
structure wasting time.
"""
element = element._annotate(annotations)
element._copy_internals()
return element
def _new_annotation_type(cls, base_cls):
if issubclass(cls, Annotated):
return cls
elif cls in annotated_classes:
return annotated_classes[cls]
for super_ in cls.__mro__:
# check if an Annotated subclass more specific than
# the given base_cls is already registered, such
# as AnnotatedColumnElement.
if super_ in annotated_classes:
base_cls = annotated_classes[super_]
break
annotated_classes[cls] = anno_cls = type(
"Annotated%s" % cls.__name__, (base_cls, cls), {}
)
globals()["Annotated%s" % cls.__name__] = anno_cls
return anno_cls
def _prepare_annotations(target_hierarchy, base_cls):
stack = [target_hierarchy]
while stack:
cls = stack.pop()
stack.extend(cls.__subclasses__())
_new_annotation_type(cls, base_cls)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/sql/ddl.py
|
# sql/ddl.py
# Copyright (C) 2009-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Provides the hierarchy of DDL-defining schema items as well as routines
to invoke them for a create/drop call.
"""
from .base import _bind_or_error
from .base import _generative
from .base import Executable
from .base import SchemaVisitor
from .elements import ClauseElement
from .. import event
from .. import exc
from .. import util
from ..util import topological
class _DDLCompiles(ClauseElement):
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.ddl_compiler(dialect, self, **kw)
class DDLElement(Executable, _DDLCompiles):
"""Base class for DDL expression constructs.
This class is the base for the general purpose :class:`.DDL` class,
as well as the various create/drop clause constructs such as
:class:`.CreateTable`, :class:`.DropTable`, :class:`.AddConstraint`,
etc.
:class:`.DDLElement` integrates closely with SQLAlchemy events,
introduced in :ref:`event_toplevel`. An instance of one is
itself an event receiving callable::
event.listen(
users,
'after_create',
AddConstraint(constraint).execute_if(dialect='postgresql')
)
.. seealso::
:class:`.DDL`
:class:`.DDLEvents`
:ref:`event_toplevel`
:ref:`schema_ddl_sequences`
"""
_execution_options = Executable._execution_options.union(
{"autocommit": True}
)
target = None
on = None
dialect = None
callable_ = None
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_ddl(self, multiparams, params)
def execute(self, bind=None, target=None):
"""Execute this DDL immediately.
Executes the DDL statement in isolation using the supplied
:class:`.Connectable` or
:class:`.Connectable` assigned to the ``.bind``
property, if not supplied. If the DDL has a conditional ``on``
criteria, it will be invoked with None as the event.
:param bind:
Optional, an ``Engine`` or ``Connection``. If not supplied, a valid
:class:`.Connectable` must be present in the
``.bind`` property.
:param target:
Optional, defaults to None. The target SchemaItem for the
execute call. Will be passed to the ``on`` callable if any,
and may also provide string expansion data for the
statement. See ``execute_at`` for more information.
"""
if bind is None:
bind = _bind_or_error(self)
if self._should_execute(target, bind):
return bind.execute(self.against(target))
else:
bind.engine.logger.info("DDL execution skipped, criteria not met.")
@util.deprecated(
"0.7",
"The :meth:`.DDLElement.execute_at` method is deprecated and will "
"be removed in a future release. Please use the :class:`.DDLEvents` "
"listener interface in conjunction with the "
":meth:`.DDLElement.execute_if` method.",
)
def execute_at(self, event_name, target):
"""Link execution of this DDL to the DDL lifecycle of a SchemaItem.
Links this ``DDLElement`` to a ``Table`` or ``MetaData`` instance,
executing it when that schema item is created or dropped. The DDL
statement will be executed using the same Connection and transactional
context as the Table create/drop itself. The ``.bind`` property of
this statement is ignored.
:param event:
One of the events defined in the schema item's ``.ddl_events``;
e.g. 'before-create', 'after-create', 'before-drop' or 'after-drop'
:param target:
The Table or MetaData instance for which this DDLElement will
be associated with.
A DDLElement instance can be linked to any number of schema items.
``execute_at`` builds on the ``append_ddl_listener`` interface of
:class:`_schema.MetaData` and :class:`_schema.Table` objects.
Caveat: Creating or dropping a Table in isolation will also trigger
any DDL set to ``execute_at`` that Table's MetaData. This may change
in a future release.
"""
def call_event(target, connection, **kw):
if self._should_execute_deprecated(
event_name, target, connection, **kw
):
return connection.execute(self.against(target))
event.listen(target, "" + event_name.replace("-", "_"), call_event)
@_generative
def against(self, target):
"""Return a copy of this DDL against a specific schema item."""
self.target = target
@_generative
def execute_if(self, dialect=None, callable_=None, state=None):
r"""Return a callable that will execute this
DDLElement conditionally.
Used to provide a wrapper for event listening::
event.listen(
metadata,
'before_create',
DDL("my_ddl").execute_if(dialect='postgresql')
)
:param dialect: May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something').execute_if(dialect='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something').execute_if(dialect=('postgresql', 'mysql'))
:param callable\_: A callable, which will be invoked with
four positional arguments as well as optional keyword
arguments:
:ddl:
This DDL element.
:target:
The :class:`_schema.Table` or :class:`_schema.MetaData`
object which is the
target of this event. May be None if the DDL is executed
explicitly.
:bind:
The :class:`_engine.Connection` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
:state:
Optional keyword argument - will be the ``state`` argument
passed to this function.
:checkfirst:
Keyword argument, will be True if the 'checkfirst' flag was
set during the call to ``create()``, ``create_all()``,
``drop()``, ``drop_all()``.
If the callable returns a true value, the DDL statement will be
executed.
:param state: any value which will be passed to the callable\_
as the ``state`` keyword argument.
.. seealso::
:class:`.DDLEvents`
:ref:`event_toplevel`
"""
self.dialect = dialect
self.callable_ = callable_
self.state = state
def _should_execute(self, target, bind, **kw):
if self.on is not None and not self._should_execute_deprecated(
None, target, bind, **kw
):
return False
if isinstance(self.dialect, util.string_types):
if self.dialect != bind.engine.name:
return False
elif isinstance(self.dialect, (tuple, list, set)):
if bind.engine.name not in self.dialect:
return False
if self.callable_ is not None and not self.callable_(
self, target, bind, state=self.state, **kw
):
return False
return True
def _should_execute_deprecated(self, event, target, bind, **kw):
if self.on is None:
return True
elif isinstance(self.on, util.string_types):
return self.on == bind.engine.name
elif isinstance(self.on, (tuple, list, set)):
return bind.engine.name in self.on
else:
return self.on(self, event, target, bind, **kw)
def __call__(self, target, bind, **kw):
"""Execute the DDL as a ddl_listener."""
if self._should_execute(target, bind, **kw):
return bind.execute(self.against(target))
def _check_ddl_on(self, on):
if on is not None and (
not isinstance(on, util.string_types + (tuple, list, set))
and not util.callable(on)
):
raise exc.ArgumentError(
"Expected the name of a database dialect, a tuple "
"of names, or a callable for "
"'on' criteria, got type '%s'." % type(on).__name__
)
def bind(self):
if self._bind:
return self._bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class DDL(DDLElement):
"""A literal DDL statement.
Specifies literal SQL DDL to be executed by the database. DDL objects
function as DDL event listeners, and can be subscribed to those events
listed in :class:`.DDLEvents`, using either :class:`_schema.Table` or
:class:`_schema.MetaData` objects as targets.
Basic templating support allows
a single DDL instance to handle repetitive tasks for multiple tables.
Examples::
from sqlalchemy import event, DDL
tbl = Table('users', metadata, Column('uid', Integer))
event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger'))
spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE')
event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb'))
drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE')
connection.execute(drop_spow)
When operating on Table events, the following ``statement``
string substitutions are available::
%(table)s - the Table name, with any required quoting applied
%(schema)s - the schema name, with any required quoting applied
%(fullname)s - the Table name including schema, quoted if needed
The DDL's "context", if any, will be combined with the standard
substitutions noted above. Keys present in the context will override
the standard substitutions.
"""
__visit_name__ = "ddl"
@util.deprecated_params(
on=(
"0.7",
"The :paramref:`.DDL.on` parameter is deprecated and will be "
"removed in a future release. Please refer to "
":meth:`.DDLElement.execute_if`.",
)
)
def __init__(self, statement, on=None, context=None, bind=None):
"""Create a DDL statement.
:param statement:
A string or unicode string to be executed. Statements will be
processed with Python's string formatting operator. See the
``context`` argument and the ``execute_at`` method.
A literal '%' in a statement must be escaped as '%%'.
SQL bind parameters are not available in DDL statements.
:param on:
Optional filtering criteria. May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something', on='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something', on=('postgresql', 'mysql'))
If a callable, it will be invoked with four positional arguments
as well as optional keyword arguments:
:ddl:
This DDL element.
:event:
The name of the event that has triggered this DDL, such as
'after-create' Will be None if the DDL is executed explicitly.
:target:
The ``Table`` or ``MetaData`` object which is the target of
this event. May be None if the DDL is executed explicitly.
:connection:
The ``Connection`` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
If the callable returns a true value, the DDL statement will be
executed.
:param context:
Optional dictionary, defaults to None. These values will be
available for use in string substitutions on the DDL statement.
:param bind:
Optional. A :class:`.Connectable`, used by
default when ``execute()`` is invoked without a bind argument.
.. seealso::
:class:`.DDLEvents`
:ref:`event_toplevel`
"""
if not isinstance(statement, util.string_types):
raise exc.ArgumentError(
"Expected a string or unicode SQL statement, got '%r'"
% statement
)
self.statement = statement
self.context = context or {}
self._check_ddl_on(on)
self.on = on
self._bind = bind
def __repr__(self):
return "<%s@%s; %s>" % (
type(self).__name__,
id(self),
", ".join(
[repr(self.statement)]
+ [
"%s=%r" % (key, getattr(self, key))
for key in ("on", "context")
if getattr(self, key)
]
),
)
class _CreateDropBase(DDLElement):
"""Base class for DDL constructs that represent CREATE and DROP or
equivalents.
The common theme of _CreateDropBase is a single
``element`` attribute which refers to the element
to be created or dropped.
"""
def __init__(self, element, on=None, bind=None):
self.element = element
self._check_ddl_on(on)
self.on = on
self.bind = bind
def _create_rule_disable(self, compiler):
"""Allow disable of _create_rule using a callable.
Pass to _create_rule using
util.portable_instancemethod(self._create_rule_disable)
to retain serializability.
"""
return False
class CreateSchema(_CreateDropBase):
"""Represent a CREATE SCHEMA statement.
The argument here is the string name of the schema.
"""
__visit_name__ = "create_schema"
def __init__(self, name, quote=None, **kw):
"""Create a new :class:`.CreateSchema` construct."""
self.quote = quote
super(CreateSchema, self).__init__(name, **kw)
class DropSchema(_CreateDropBase):
"""Represent a DROP SCHEMA statement.
The argument here is the string name of the schema.
"""
__visit_name__ = "drop_schema"
def __init__(self, name, quote=None, cascade=False, **kw):
"""Create a new :class:`.DropSchema` construct."""
self.quote = quote
self.cascade = cascade
super(DropSchema, self).__init__(name, **kw)
class CreateTable(_CreateDropBase):
"""Represent a CREATE TABLE statement."""
__visit_name__ = "create_table"
def __init__(
self, element, on=None, bind=None, include_foreign_key_constraints=None
):
"""Create a :class:`.CreateTable` construct.
:param element: a :class:`_schema.Table` that's the subject
of the CREATE
:param on: See the description for 'on' in :class:`.DDL`.
:param bind: See the description for 'bind' in :class:`.DDL`.
:param include_foreign_key_constraints: optional sequence of
:class:`_schema.ForeignKeyConstraint` objects that will be included
inline within the CREATE construct; if omitted, all foreign key
constraints that do not specify use_alter=True are included.
.. versionadded:: 1.0.0
"""
super(CreateTable, self).__init__(element, on=on, bind=bind)
self.columns = [CreateColumn(column) for column in element.columns]
self.include_foreign_key_constraints = include_foreign_key_constraints
class _DropView(_CreateDropBase):
"""Semi-public 'DROP VIEW' construct.
Used by the test suite for dialect-agnostic drops of views.
This object will eventually be part of a public "view" API.
"""
__visit_name__ = "drop_view"
class CreateColumn(_DDLCompiles):
"""Represent a :class:`_schema.Column`
as rendered in a CREATE TABLE statement,
via the :class:`.CreateTable` construct.
This is provided to support custom column DDL within the generation
of CREATE TABLE statements, by using the
compiler extension documented in :ref:`sqlalchemy.ext.compiler_toplevel`
to extend :class:`.CreateColumn`.
Typical integration is to examine the incoming :class:`_schema.Column`
object, and to redirect compilation if a particular flag or condition
is found::
from sqlalchemy import schema
from sqlalchemy.ext.compiler import compiles
@compiles(schema.CreateColumn)
def compile(element, compiler, **kw):
column = element.element
if "special" not in column.info:
return compiler.visit_create_column(element, **kw)
text = "%s SPECIAL DIRECTIVE %s" % (
column.name,
compiler.type_compiler.process(column.type)
)
default = compiler.get_column_default_string(column)
if default is not None:
text += " DEFAULT " + default
if not column.nullable:
text += " NOT NULL"
if column.constraints:
text += " ".join(
compiler.process(const)
for const in column.constraints)
return text
The above construct can be applied to a :class:`_schema.Table`
as follows::
from sqlalchemy import Table, Metadata, Column, Integer, String
from sqlalchemy import schema
metadata = MetaData()
table = Table('mytable', MetaData(),
Column('x', Integer, info={"special":True}, primary_key=True),
Column('y', String(50)),
Column('z', String(20), info={"special":True})
)
metadata.create_all(conn)
Above, the directives we've added to the :attr:`_schema.Column.info`
collection
will be detected by our custom compilation scheme::
CREATE TABLE mytable (
x SPECIAL DIRECTIVE INTEGER NOT NULL,
y VARCHAR(50),
z SPECIAL DIRECTIVE VARCHAR(20),
PRIMARY KEY (x)
)
The :class:`.CreateColumn` construct can also be used to skip certain
columns when producing a ``CREATE TABLE``. This is accomplished by
creating a compilation rule that conditionally returns ``None``.
This is essentially how to produce the same effect as using the
``system=True`` argument on :class:`_schema.Column`, which marks a column
as an implicitly-present "system" column.
For example, suppose we wish to produce a :class:`_schema.Table`
which skips
rendering of the PostgreSQL ``xmin`` column against the PostgreSQL
backend, but on other backends does render it, in anticipation of a
triggered rule. A conditional compilation rule could skip this name only
on PostgreSQL::
from sqlalchemy.schema import CreateColumn
@compiles(CreateColumn, "postgresql")
def skip_xmin(element, compiler, **kw):
if element.element.name == 'xmin':
return None
else:
return compiler.visit_create_column(element, **kw)
my_table = Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('xmin', Integer)
)
Above, a :class:`.CreateTable` construct will generate a ``CREATE TABLE``
which only includes the ``id`` column in the string; the ``xmin`` column
will be omitted, but only against the PostgreSQL backend.
"""
__visit_name__ = "create_column"
def __init__(self, element):
self.element = element
class DropTable(_CreateDropBase):
"""Represent a DROP TABLE statement."""
__visit_name__ = "drop_table"
class CreateSequence(_CreateDropBase):
"""Represent a CREATE SEQUENCE statement."""
__visit_name__ = "create_sequence"
class DropSequence(_CreateDropBase):
"""Represent a DROP SEQUENCE statement."""
__visit_name__ = "drop_sequence"
class CreateIndex(_CreateDropBase):
"""Represent a CREATE INDEX statement."""
__visit_name__ = "create_index"
class DropIndex(_CreateDropBase):
"""Represent a DROP INDEX statement."""
__visit_name__ = "drop_index"
class AddConstraint(_CreateDropBase):
"""Represent an ALTER TABLE ADD CONSTRAINT statement."""
__visit_name__ = "add_constraint"
def __init__(self, element, *args, **kw):
super(AddConstraint, self).__init__(element, *args, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable
)
class DropConstraint(_CreateDropBase):
"""Represent an ALTER TABLE DROP CONSTRAINT statement."""
__visit_name__ = "drop_constraint"
def __init__(self, element, cascade=False, **kw):
self.cascade = cascade
super(DropConstraint, self).__init__(element, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable
)
class SetTableComment(_CreateDropBase):
"""Represent a COMMENT ON TABLE IS statement."""
__visit_name__ = "set_table_comment"
class DropTableComment(_CreateDropBase):
"""Represent a COMMENT ON TABLE '' statement.
Note this varies a lot across database backends.
"""
__visit_name__ = "drop_table_comment"
class SetColumnComment(_CreateDropBase):
"""Represent a COMMENT ON COLUMN IS statement."""
__visit_name__ = "set_column_comment"
class DropColumnComment(_CreateDropBase):
"""Represent a COMMENT ON COLUMN IS NULL statement."""
__visit_name__ = "drop_column_comment"
class DDLBase(SchemaVisitor):
def __init__(self, connection):
self.connection = connection
class SchemaGenerator(DDLBase):
def __init__(
self, dialect, connection, checkfirst=False, tables=None, **kwargs
):
super(SchemaGenerator, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def _can_create_table(self, table):
self.dialect.validate_identifier(table.name)
effective_schema = self.connection.schema_for_object(table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or not self.dialect.has_table(
self.connection, table.name, schema=effective_schema
)
def _can_create_sequence(self, sequence):
effective_schema = self.connection.schema_for_object(sequence)
return self.dialect.supports_sequences and (
(not self.dialect.sequences_optional or not sequence.optional)
and (
not self.checkfirst
or not self.dialect.has_sequence(
self.connection, sequence.name, schema=effective_schema
)
)
)
def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
collection = sort_tables_and_constraints(
[t for t in tables if self._can_create_table(t)]
)
seq_coll = [
s
for s in metadata._sequences.values()
if s.column is None and self._can_create_sequence(s)
]
event_collection = [t for (t, fks) in collection if t is not None]
metadata.dispatch.before_create(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
for seq in seq_coll:
self.traverse_single(seq, create_ok=True)
for table, fkcs in collection:
if table is not None:
self.traverse_single(
table,
create_ok=True,
include_foreign_key_constraints=fkcs,
_is_metadata_operation=True,
)
else:
for fkc in fkcs:
self.traverse_single(fkc)
metadata.dispatch.after_create(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
def visit_table(
self,
table,
create_ok=False,
include_foreign_key_constraints=None,
_is_metadata_operation=False,
):
if not create_ok and not self._can_create_table(table):
return
table.dispatch.before_create(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
if not self.dialect.supports_alter:
# e.g., don't omit any foreign key constraints
include_foreign_key_constraints = None
self.connection.execute(
# fmt: off
CreateTable(
table,
include_foreign_key_constraints= # noqa
include_foreign_key_constraints, # noqa
)
# fmt: on
)
if hasattr(table, "indexes"):
for index in table.indexes:
self.traverse_single(index)
if self.dialect.supports_comments and not self.dialect.inline_comments:
if table.comment is not None:
self.connection.execute(SetTableComment(table))
for column in table.columns:
if column.comment is not None:
self.connection.execute(SetColumnComment(column))
table.dispatch.after_create(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
def visit_foreign_key_constraint(self, constraint):
if not self.dialect.supports_alter:
return
self.connection.execute(AddConstraint(constraint))
def visit_sequence(self, sequence, create_ok=False):
if not create_ok and not self._can_create_sequence(sequence):
return
self.connection.execute(CreateSequence(sequence))
def visit_index(self, index):
self.connection.execute(CreateIndex(index))
class SchemaDropper(DDLBase):
def __init__(
self, dialect, connection, checkfirst=False, tables=None, **kwargs
):
super(SchemaDropper, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
try:
unsorted_tables = [t for t in tables if self._can_drop_table(t)]
collection = list(
reversed(
sort_tables_and_constraints(
unsorted_tables,
filter_fn=lambda constraint: False
if not self.dialect.supports_alter
or constraint.name is None
else None,
)
)
)
except exc.CircularDependencyError as err2:
if not self.dialect.supports_alter:
util.warn(
"Can't sort tables for DROP; an "
"unresolvable foreign key "
"dependency exists between tables: %s, and backend does "
"not support ALTER. To restore at least a partial sort, "
"apply use_alter=True to ForeignKey and "
"ForeignKeyConstraint "
"objects involved in the cycle to mark these as known "
"cycles that will be ignored."
% (", ".join(sorted([t.fullname for t in err2.cycles])))
)
collection = [(t, ()) for t in unsorted_tables]
else:
util.raise_(
exc.CircularDependencyError(
err2.args[0],
err2.cycles,
err2.edges,
msg="Can't sort tables for DROP; an "
"unresolvable foreign key "
"dependency exists between tables: %s. Please ensure "
"that the ForeignKey and ForeignKeyConstraint objects "
"involved in the cycle have "
"names so that they can be dropped using "
"DROP CONSTRAINT."
% (
", ".join(
sorted([t.fullname for t in err2.cycles])
)
),
),
from_=err2,
)
seq_coll = [
s
for s in metadata._sequences.values()
if s.column is None and self._can_drop_sequence(s)
]
event_collection = [t for (t, fks) in collection if t is not None]
metadata.dispatch.before_drop(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
for table, fkcs in collection:
if table is not None:
self.traverse_single(
table, drop_ok=True, _is_metadata_operation=True
)
else:
for fkc in fkcs:
self.traverse_single(fkc)
for seq in seq_coll:
self.traverse_single(seq, drop_ok=True)
metadata.dispatch.after_drop(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
def _can_drop_table(self, table):
self.dialect.validate_identifier(table.name)
effective_schema = self.connection.schema_for_object(table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or self.dialect.has_table(
self.connection, table.name, schema=effective_schema
)
def _can_drop_sequence(self, sequence):
effective_schema = self.connection.schema_for_object(sequence)
return self.dialect.supports_sequences and (
(not self.dialect.sequences_optional or not sequence.optional)
and (
not self.checkfirst
or self.dialect.has_sequence(
self.connection, sequence.name, schema=effective_schema
)
)
)
def visit_index(self, index):
self.connection.execute(DropIndex(index))
def visit_table(self, table, drop_ok=False, _is_metadata_operation=False):
if not drop_ok and not self._can_drop_table(table):
return
table.dispatch.before_drop(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
self.connection.execute(DropTable(table))
# traverse client side defaults which may refer to server-side
# sequences. noting that some of these client side defaults may also be
# set up as server side defaults (see http://docs.sqlalchemy.org/en/
# latest/core/defaults.html#associating-a-sequence-as-the-server-side-
# default), so have to be dropped after the table is dropped.
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
table.dispatch.after_drop(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
def visit_foreign_key_constraint(self, constraint):
if not self.dialect.supports_alter:
return
self.connection.execute(DropConstraint(constraint))
def visit_sequence(self, sequence, drop_ok=False):
if not drop_ok and not self._can_drop_sequence(sequence):
return
self.connection.execute(DropSequence(sequence))
def sort_tables(
tables, skip_fn=None, extra_dependencies=None,
):
"""sort a collection of :class:`_schema.Table` objects based on dependency
.
This is a dependency-ordered sort which will emit :class:`_schema.Table`
objects such that they will follow their dependent :class:`_schema.Table`
objects.
Tables are dependent on another based on the presence of
:class:`_schema.ForeignKeyConstraint`
objects as well as explicit dependencies
added by :meth:`_schema.Table.add_is_dependent_on`.
.. warning::
The :func:`._schema.sort_tables` function cannot by itself
accommodate automatic resolution of dependency cycles between
tables, which are usually caused by mutually dependent foreign key
constraints. When these cycles are detected, the foreign keys
of these tables are omitted from consideration in the sort.
A warning is emitted when this condition occurs, which will be an
exception raise in a future release. Tables which are not part
of the cycle will still be returned in dependency order.
To resolve these cycles, the
:paramref:`_schema.ForeignKeyConstraint.use_alter` parameter may be
applied to those constraints which create a cycle. Alternatively,
the :func:`_schema.sort_tables_and_constraints` function will
automatically return foreign key constraints in a separate
collection when cycles are detected so that they may be applied
to a schema separately.
.. versionchanged:: 1.3.17 - a warning is emitted when
:func:`_schema.sort_tables` cannot perform a proper sort due to
cyclical dependencies. This will be an exception in a future
release. Additionally, the sort will continue to return
other tables not involved in the cycle in dependency order
which was not the case previously.
:param tables: a sequence of :class:`_schema.Table` objects.
:param skip_fn: optional callable which will be passed a
:class:`_schema.ForeignKey` object; if it returns True, this
constraint will not be considered as a dependency. Note this is
**different** from the same parameter in
:func:`.sort_tables_and_constraints`, which is
instead passed the owning :class:`_schema.ForeignKeyConstraint` object.
:param extra_dependencies: a sequence of 2-tuples of tables which will
also be considered as dependent on each other.
.. seealso::
:func:`.sort_tables_and_constraints`
:attr:`_schema.MetaData.sorted_tables` - uses this function to sort
"""
if skip_fn is not None:
def _skip_fn(fkc):
for fk in fkc.elements:
if skip_fn(fk):
return True
else:
return None
else:
_skip_fn = None
return [
t
for (t, fkcs) in sort_tables_and_constraints(
tables,
filter_fn=_skip_fn,
extra_dependencies=extra_dependencies,
_warn_for_cycles=True,
)
if t is not None
]
def sort_tables_and_constraints(
tables, filter_fn=None, extra_dependencies=None, _warn_for_cycles=False
):
"""sort a collection of :class:`_schema.Table` /
:class:`_schema.ForeignKeyConstraint`
objects.
This is a dependency-ordered sort which will emit tuples of
``(Table, [ForeignKeyConstraint, ...])`` such that each
:class:`_schema.Table` follows its dependent :class:`_schema.Table`
objects.
Remaining :class:`_schema.ForeignKeyConstraint`
objects that are separate due to
dependency rules not satisfied by the sort are emitted afterwards
as ``(None, [ForeignKeyConstraint ...])``.
Tables are dependent on another based on the presence of
:class:`_schema.ForeignKeyConstraint` objects, explicit dependencies
added by :meth:`_schema.Table.add_is_dependent_on`,
as well as dependencies
stated here using the :paramref:`~.sort_tables_and_constraints.skip_fn`
and/or :paramref:`~.sort_tables_and_constraints.extra_dependencies`
parameters.
:param tables: a sequence of :class:`_schema.Table` objects.
:param filter_fn: optional callable which will be passed a
:class:`_schema.ForeignKeyConstraint` object,
and returns a value based on
whether this constraint should definitely be included or excluded as
an inline constraint, or neither. If it returns False, the constraint
will definitely be included as a dependency that cannot be subject
to ALTER; if True, it will **only** be included as an ALTER result at
the end. Returning None means the constraint is included in the
table-based result unless it is detected as part of a dependency cycle.
:param extra_dependencies: a sequence of 2-tuples of tables which will
also be considered as dependent on each other.
.. versionadded:: 1.0.0
.. seealso::
:func:`.sort_tables`
"""
fixed_dependencies = set()
mutable_dependencies = set()
if extra_dependencies is not None:
fixed_dependencies.update(extra_dependencies)
remaining_fkcs = set()
for table in tables:
for fkc in table.foreign_key_constraints:
if fkc.use_alter is True:
remaining_fkcs.add(fkc)
continue
if filter_fn:
filtered = filter_fn(fkc)
if filtered is True:
remaining_fkcs.add(fkc)
continue
dependent_on = fkc.referred_table
if dependent_on is not table:
mutable_dependencies.add((dependent_on, table))
fixed_dependencies.update(
(parent, table) for parent in table._extra_dependencies
)
try:
candidate_sort = list(
topological.sort(
fixed_dependencies.union(mutable_dependencies),
tables,
deterministic_order=True,
)
)
except exc.CircularDependencyError as err:
if _warn_for_cycles:
util.warn(
"Cannot correctly sort tables; there are unresolvable cycles "
'between tables "%s", which is usually caused by mutually '
"dependent foreign key constraints. Foreign key constraints "
"involving these tables will not be considered; this warning "
"may raise an error in a future release."
% (", ".join(sorted(t.fullname for t in err.cycles)),)
)
for edge in err.edges:
if edge in mutable_dependencies:
table = edge[1]
if table not in err.cycles:
continue
can_remove = [
fkc
for fkc in table.foreign_key_constraints
if filter_fn is None or filter_fn(fkc) is not False
]
remaining_fkcs.update(can_remove)
for fkc in can_remove:
dependent_on = fkc.referred_table
if dependent_on is not table:
mutable_dependencies.discard((dependent_on, table))
candidate_sort = list(
topological.sort(
fixed_dependencies.union(mutable_dependencies),
tables,
deterministic_order=True,
)
)
return [
(table, table.foreign_key_constraints.difference(remaining_fkcs))
for table in candidate_sort
] + [(None, list(remaining_fkcs))]
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/sql/compiler.py
|
# sql/compiler.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Classes provided include:
:class:`.compiler.SQLCompiler` - renders SQL
strings
:class:`.compiler.DDLCompiler` - renders DDL
(data definition language) strings
:class:`.compiler.GenericTypeCompiler` - renders
type specification strings.
To generate user-defined SQL strings, see
:doc:`/ext/compiler`.
"""
import contextlib
import itertools
import re
from . import crud
from . import elements
from . import functions
from . import operators
from . import schema
from . import selectable
from . import sqltypes
from . import visitors
from .. import exc
from .. import util
RESERVED_WORDS = set(
[
"all",
"analyse",
"analyze",
"and",
"any",
"array",
"as",
"asc",
"asymmetric",
"authorization",
"between",
"binary",
"both",
"case",
"cast",
"check",
"collate",
"column",
"constraint",
"create",
"cross",
"current_date",
"current_role",
"current_time",
"current_timestamp",
"current_user",
"default",
"deferrable",
"desc",
"distinct",
"do",
"else",
"end",
"except",
"false",
"for",
"foreign",
"freeze",
"from",
"full",
"grant",
"group",
"having",
"ilike",
"in",
"initially",
"inner",
"intersect",
"into",
"is",
"isnull",
"join",
"leading",
"left",
"like",
"limit",
"localtime",
"localtimestamp",
"natural",
"new",
"not",
"notnull",
"null",
"off",
"offset",
"old",
"on",
"only",
"or",
"order",
"outer",
"overlaps",
"placing",
"primary",
"references",
"right",
"select",
"session_user",
"set",
"similar",
"some",
"symmetric",
"table",
"then",
"to",
"trailing",
"true",
"union",
"unique",
"user",
"using",
"verbose",
"when",
"where",
]
)
LEGAL_CHARACTERS = re.compile(r"^[A-Z0-9_$]+$", re.I)
LEGAL_CHARACTERS_PLUS_SPACE = re.compile(r"^[A-Z0-9_ $]+$", re.I)
ILLEGAL_INITIAL_CHARACTERS = {str(x) for x in range(0, 10)}.union(["$"])
FK_ON_DELETE = re.compile(
r"^(?:RESTRICT|CASCADE|SET NULL|NO ACTION|SET DEFAULT)$", re.I
)
FK_ON_UPDATE = re.compile(
r"^(?:RESTRICT|CASCADE|SET NULL|NO ACTION|SET DEFAULT)$", re.I
)
FK_INITIALLY = re.compile(r"^(?:DEFERRED|IMMEDIATE)$", re.I)
BIND_PARAMS = re.compile(r"(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])", re.UNICODE)
BIND_PARAMS_ESC = re.compile(r"\x5c(:[\w\$]*)(?![:\w\$])", re.UNICODE)
BIND_TEMPLATES = {
"pyformat": "%%(%(name)s)s",
"qmark": "?",
"format": "%%s",
"numeric": ":[_POSITION]",
"named": ":%(name)s",
}
OPERATORS = {
# binary
operators.and_: " AND ",
operators.or_: " OR ",
operators.add: " + ",
operators.mul: " * ",
operators.sub: " - ",
operators.div: " / ",
operators.mod: " % ",
operators.truediv: " / ",
operators.neg: "-",
operators.lt: " < ",
operators.le: " <= ",
operators.ne: " != ",
operators.gt: " > ",
operators.ge: " >= ",
operators.eq: " = ",
operators.is_distinct_from: " IS DISTINCT FROM ",
operators.isnot_distinct_from: " IS NOT DISTINCT FROM ",
operators.concat_op: " || ",
operators.match_op: " MATCH ",
operators.notmatch_op: " NOT MATCH ",
operators.in_op: " IN ",
operators.notin_op: " NOT IN ",
operators.comma_op: ", ",
operators.from_: " FROM ",
operators.as_: " AS ",
operators.is_: " IS ",
operators.isnot: " IS NOT ",
operators.collate: " COLLATE ",
# unary
operators.exists: "EXISTS ",
operators.distinct_op: "DISTINCT ",
operators.inv: "NOT ",
operators.any_op: "ANY ",
operators.all_op: "ALL ",
# modifiers
operators.desc_op: " DESC",
operators.asc_op: " ASC",
operators.nullsfirst_op: " NULLS FIRST",
operators.nullslast_op: " NULLS LAST",
}
FUNCTIONS = {
functions.coalesce: "coalesce",
functions.current_date: "CURRENT_DATE",
functions.current_time: "CURRENT_TIME",
functions.current_timestamp: "CURRENT_TIMESTAMP",
functions.current_user: "CURRENT_USER",
functions.localtime: "LOCALTIME",
functions.localtimestamp: "LOCALTIMESTAMP",
functions.random: "random",
functions.sysdate: "sysdate",
functions.session_user: "SESSION_USER",
functions.user: "USER",
functions.cube: "CUBE",
functions.rollup: "ROLLUP",
functions.grouping_sets: "GROUPING SETS",
}
EXTRACT_MAP = {
"month": "month",
"day": "day",
"year": "year",
"second": "second",
"hour": "hour",
"doy": "doy",
"minute": "minute",
"quarter": "quarter",
"dow": "dow",
"week": "week",
"epoch": "epoch",
"milliseconds": "milliseconds",
"microseconds": "microseconds",
"timezone_hour": "timezone_hour",
"timezone_minute": "timezone_minute",
}
COMPOUND_KEYWORDS = {
selectable.CompoundSelect.UNION: "UNION",
selectable.CompoundSelect.UNION_ALL: "UNION ALL",
selectable.CompoundSelect.EXCEPT: "EXCEPT",
selectable.CompoundSelect.EXCEPT_ALL: "EXCEPT ALL",
selectable.CompoundSelect.INTERSECT: "INTERSECT",
selectable.CompoundSelect.INTERSECT_ALL: "INTERSECT ALL",
}
class Compiled(object):
"""Represent a compiled SQL or DDL expression.
The ``__str__`` method of the ``Compiled`` object should produce
the actual text of the statement. ``Compiled`` objects are
specific to their underlying database dialect, and also may
or may not be specific to the columns referenced within a
particular set of bind parameters. In no case should the
``Compiled`` object be dependent on the actual values of those
bind parameters, even though it may reference those values as
defaults.
"""
_cached_metadata = None
execution_options = util.immutabledict()
"""
Execution options propagated from the statement. In some cases,
sub-elements of the statement can modify these.
"""
def __init__(
self,
dialect,
statement,
bind=None,
schema_translate_map=None,
compile_kwargs=util.immutabledict(),
):
"""Construct a new :class:`.Compiled` object.
:param dialect: :class:`.Dialect` to compile against.
:param statement: :class:`_expression.ClauseElement` to be compiled.
:param bind: Optional Engine or Connection to compile this
statement against.
:param schema_translate_map: dictionary of schema names to be
translated when forming the resultant SQL
.. versionadded:: 1.1
.. seealso::
:ref:`schema_translating`
:param compile_kwargs: additional kwargs that will be
passed to the initial call to :meth:`.Compiled.process`.
"""
self.dialect = dialect
self.bind = bind
self.preparer = self.dialect.identifier_preparer
if schema_translate_map:
self.preparer = self.preparer._with_schema_translate(
schema_translate_map
)
if statement is not None:
self.statement = statement
self.can_execute = statement.supports_execution
if self.can_execute:
self.execution_options = statement._execution_options
self.string = self.process(self.statement, **compile_kwargs)
@util.deprecated(
"0.7",
"The :meth:`.Compiled.compile` method is deprecated and will be "
"removed in a future release. The :class:`.Compiled` object "
"now runs its compilation within the constructor, and this method "
"does nothing.",
)
def compile(self):
"""Produce the internal string representation of this element.
"""
pass
def _execute_on_connection(self, connection, multiparams, params):
if self.can_execute:
return connection._execute_compiled(self, multiparams, params)
else:
raise exc.ObjectNotExecutableError(self.statement)
@property
def sql_compiler(self):
"""Return a Compiled that is capable of processing SQL expressions.
If this compiler is one, it would likely just return 'self'.
"""
raise NotImplementedError()
def process(self, obj, **kwargs):
return obj._compiler_dispatch(self, **kwargs)
def __str__(self):
"""Return the string text of the generated SQL or DDL."""
return self.string or ""
def construct_params(self, params=None):
"""Return the bind params for this compiled object.
:param params: a dict of string/object pairs whose values will
override bind values compiled in to the
statement.
"""
raise NotImplementedError()
@property
def params(self):
"""Return the bind params for this compiled object."""
return self.construct_params()
def execute(self, *multiparams, **params):
"""Execute this compiled object."""
e = self.bind
if e is None:
raise exc.UnboundExecutionError(
"This Compiled object is not bound to any Engine "
"or Connection.",
code="2afi",
)
return e._execute_compiled(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Execute this compiled object and return the result's
scalar value."""
return self.execute(*multiparams, **params).scalar()
class TypeCompiler(util.with_metaclass(util.EnsureKWArgType, object)):
"""Produces DDL specification for TypeEngine objects."""
ensure_kwarg = r"visit_\w+"
def __init__(self, dialect):
self.dialect = dialect
def process(self, type_, **kw):
return type_._compiler_dispatch(self, **kw)
class _CompileLabel(visitors.Visitable):
"""lightweight label object which acts as an expression.Label."""
__visit_name__ = "label"
__slots__ = "element", "name"
def __init__(self, col, name, alt_names=()):
self.element = col
self.name = name
self._alt_names = (col,) + alt_names
@property
def proxy_set(self):
return self.element.proxy_set
@property
def type(self):
return self.element.type
def self_group(self, **kw):
return self
class prefix_anon_map(dict):
"""A map that creates new keys for missing key access.
Considers keys of the form "<ident> <name>" to produce
new symbols "<name>_<index>", where "index" is an incrementing integer
corresponding to <name>.
Inlines the approach taken by :class:`sqlalchemy.util.PopulateDict` which
is otherwise usually used for this type of operation.
"""
def __missing__(self, key):
(ident, derived) = key.split(" ", 1)
anonymous_counter = self.get(derived, 1)
self[derived] = anonymous_counter + 1
value = derived + "_" + str(anonymous_counter)
self[key] = value
return value
class SQLCompiler(Compiled):
"""Default implementation of :class:`.Compiled`.
Compiles :class:`_expression.ClauseElement` objects into SQL strings.
"""
extract_map = EXTRACT_MAP
compound_keywords = COMPOUND_KEYWORDS
isdelete = isinsert = isupdate = False
"""class-level defaults which can be set at the instance
level to define if this Compiled instance represents
INSERT/UPDATE/DELETE
"""
isplaintext = False
returning = None
"""holds the "returning" collection of columns if
the statement is CRUD and defines returning columns
either implicitly or explicitly
"""
returning_precedes_values = False
"""set to True classwide to generate RETURNING
clauses before the VALUES or WHERE clause (i.e. MSSQL)
"""
render_table_with_column_in_update_from = False
"""set to True classwide to indicate the SET clause
in a multi-table UPDATE statement should qualify
columns with the table name (i.e. MySQL only)
"""
contains_expanding_parameters = False
"""True if we've encountered bindparam(..., expanding=True).
These need to be converted before execution time against the
string statement.
"""
ansi_bind_rules = False
"""SQL 92 doesn't allow bind parameters to be used
in the columns clause of a SELECT, nor does it allow
ambiguous expressions like "? = ?". A compiler
subclass can set this flag to False if the target
driver/DB enforces this
"""
_textual_ordered_columns = False
"""tell the result object that the column names as rendered are important,
but they are also "ordered" vs. what is in the compiled object here.
"""
_ordered_columns = True
"""
if False, means we can't be sure the list of entries
in _result_columns is actually the rendered order. Usually
True unless using an unordered TextAsFrom.
"""
_numeric_binds = False
"""
True if paramstyle is "numeric". This paramstyle is trickier than
all the others.
"""
insert_single_values_expr = None
"""When an INSERT is compiled with a single set of parameters inside
a VALUES expression, the string is assigned here, where it can be
used for insert batching schemes to rewrite the VALUES expression.
.. versionadded:: 1.3.8
"""
insert_prefetch = update_prefetch = ()
def __init__(
self, dialect, statement, column_keys=None, inline=False, **kwargs
):
"""Construct a new :class:`.SQLCompiler` object.
:param dialect: :class:`.Dialect` to be used
:param statement: :class:`_expression.ClauseElement` to be compiled
:param column_keys: a list of column names to be compiled into an
INSERT or UPDATE statement.
:param inline: whether to generate INSERT statements as "inline", e.g.
not formatted to return any generated defaults
:param kwargs: additional keyword arguments to be consumed by the
superclass.
"""
self.column_keys = column_keys
# compile INSERT/UPDATE defaults/sequences inlined (no pre-
# execute)
self.inline = inline or getattr(statement, "inline", False)
# a dictionary of bind parameter keys to BindParameter
# instances.
self.binds = {}
# a dictionary of BindParameter instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
# TypeEngine. ResultProxy uses this for type processing and
# column targeting
self._result_columns = []
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self.positiontup = []
self._numeric_binds = dialect.paramstyle == "numeric"
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
self.ctes = None
self.label_length = (
dialect.label_length or dialect.max_identifier_length
)
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = prefix_anon_map()
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names = {}
Compiled.__init__(self, dialect, statement, **kwargs)
if (
self.isinsert or self.isupdate or self.isdelete
) and statement._returning:
self.returning = statement._returning
if self.positional and self._numeric_binds:
self._apply_numbered_params()
@property
def prefetch(self):
return list(self.insert_prefetch + self.update_prefetch)
@util.memoized_instancemethod
def _init_cte_state(self):
"""Initialize collections related to CTEs only if
a CTE is located, to save on the overhead of
these collections otherwise.
"""
# collect CTEs to tack on top of a SELECT
self.ctes = util.OrderedDict()
self.ctes_by_name = {}
self.ctes_recursive = False
if self.positional:
self.cte_positional = {}
@contextlib.contextmanager
def _nested_result(self):
"""special API to support the use case of 'nested result sets'"""
result_columns, ordered_columns = (
self._result_columns,
self._ordered_columns,
)
self._result_columns, self._ordered_columns = [], False
try:
if self.stack:
entry = self.stack[-1]
entry["need_result_map_for_nested"] = True
else:
entry = None
yield self._result_columns, self._ordered_columns
finally:
if entry:
entry.pop("need_result_map_for_nested")
self._result_columns, self._ordered_columns = (
result_columns,
ordered_columns,
)
def _apply_numbered_params(self):
poscount = itertools.count(1)
self.string = re.sub(
r"\[_POSITION\]", lambda m: str(util.next(poscount)), self.string
)
@util.memoized_property
def _bind_processors(self):
return dict(
(key, value)
for key, value in (
(
self.bind_names[bindparam],
bindparam.type._cached_bind_processor(self.dialect),
)
for bindparam in self.bind_names
)
if value is not None
)
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(self, params=None, _group_number=None, _check=True):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam in self.bind_names:
name = self.bind_names[bindparam]
if bindparam.key in params:
pd[name] = params[bindparam.key]
elif name in params:
pd[name] = params[name]
elif _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d"
% (bindparam.key, _group_number),
code="cd3x",
)
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key,
code="cd3x",
)
elif bindparam.callable:
pd[name] = bindparam.effective_value
else:
pd[name] = bindparam.value
return pd
else:
pd = {}
for bindparam in self.bind_names:
if _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d"
% (bindparam.key, _group_number),
code="cd3x",
)
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key,
code="cd3x",
)
if bindparam.callable:
pd[self.bind_names[bindparam]] = bindparam.effective_value
else:
pd[self.bind_names[bindparam]] = bindparam.value
return pd
@property
def params(self):
"""Return the bind param dictionary embedded into this
compiled object, for those values that are present."""
return self.construct_params(_check=False)
@util.dependencies("sqlalchemy.engine.result")
def _create_result_map(self, result):
"""utility method used for unit tests only."""
return result.ResultMetaData._create_result_map(self._result_columns)
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label_reference(
self, element, within_columns_clause=False, **kwargs
):
if self.stack and self.dialect.supports_simple_order_by_label:
selectable = self.stack[-1]["selectable"]
with_cols, only_froms, only_cols = selectable._label_resolve_dict
if within_columns_clause:
resolve_dict = only_froms
else:
resolve_dict = only_cols
# this can be None in the case that a _label_reference()
# were subject to a replacement operation, in which case
# the replacement of the Label element may have changed
# to something else like a ColumnClause expression.
order_by_elem = element.element._order_by_label_element
if (
order_by_elem is not None
and order_by_elem.name in resolve_dict
and order_by_elem.shares_lineage(
resolve_dict[order_by_elem.name]
)
):
kwargs[
"render_label_as_label"
] = element.element._order_by_label_element
return self.process(
element.element,
within_columns_clause=within_columns_clause,
**kwargs
)
def visit_textual_label_reference(
self, element, within_columns_clause=False, **kwargs
):
if not self.stack:
# compiling the element outside of the context of a SELECT
return self.process(element._text_clause)
selectable = self.stack[-1]["selectable"]
with_cols, only_froms, only_cols = selectable._label_resolve_dict
try:
if within_columns_clause:
col = only_froms[element.element]
else:
col = with_cols[element.element]
except KeyError as ke:
elements._no_text_coercion(
element.element,
exc.CompileError,
"Can't resolve label reference for ORDER BY / "
"GROUP BY / DISTINCT etc.",
err=ke,
)
else:
kwargs["render_label_as_label"] = col
return self.process(
col, within_columns_clause=within_columns_clause, **kwargs
)
def visit_label(
self,
label,
add_to_result_map=None,
within_label_clause=False,
within_columns_clause=False,
render_label_as_label=None,
**kw
):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
render_label_with_as = (
within_columns_clause and not within_label_clause
)
render_label_only = render_label_as_label is label
if render_label_only or render_label_with_as:
if isinstance(label.name, elements._truncated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if render_label_with_as:
if add_to_result_map is not None:
add_to_result_map(
labelname,
label.name,
(label, labelname) + label._alt_names,
label.type,
)
return (
label.element._compiler_dispatch(
self,
within_columns_clause=True,
within_label_clause=True,
**kw
)
+ OPERATORS[operators.as_]
+ self.preparer.format_label(label, labelname)
)
elif render_label_only:
return self.preparer.format_label(label, labelname)
else:
return label.element._compiler_dispatch(
self, within_columns_clause=False, **kw
)
def _fallback_column_name(self, column):
raise exc.CompileError(
"Cannot compile Column object until " "its 'name' is assigned."
)
def visit_column(
self, column, add_to_result_map=None, include_table=True, **kwargs
):
name = orig_name = column.name
if name is None:
name = self._fallback_column_name(column)
is_literal = column.is_literal
if not is_literal and isinstance(name, elements._truncated_label):
name = self._truncated_identifier("colident", name)
if add_to_result_map is not None:
add_to_result_map(
name, orig_name, (column, name, column.key), column.type
)
if is_literal:
# note we are not currently accommodating for
# literal_column(quoted_name('ident', True)) here
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name)
table = column.table
if table is None or not include_table or not table.named_with_column:
return name
else:
effective_schema = self.preparer.schema_for_object(table)
if effective_schema:
schema_prefix = (
self.preparer.quote_schema(effective_schema) + "."
)
else:
schema_prefix = ""
tablename = table.name
if isinstance(tablename, elements._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + self.preparer.quote(tablename) + "." + name
def visit_collation(self, element, **kw):
return self.preparer.format_collation(element.collation)
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kw):
kw["type_expression"] = typeclause
return self.dialect.type_compiler.process(typeclause.type, **kw)
def post_process_text(self, text):
if self.preparer._double_percents:
text = text.replace("%", "%%")
return text
def escape_literal_column(self, text):
if self.preparer._double_percents:
text = text.replace("%", "%%")
return text
def visit_textclause(self, textclause, **kw):
def do_bindparam(m):
name = m.group(1)
if name in textclause._bindparams:
return self.process(textclause._bindparams[name], **kw)
else:
return self.bindparam_string(name, **kw)
if not self.stack:
self.isplaintext = True
# un-escape any \:params
return BIND_PARAMS_ESC.sub(
lambda m: m.group(1),
BIND_PARAMS.sub(
do_bindparam, self.post_process_text(textclause.text)
),
)
def visit_text_as_from(
self, taf, compound_index=None, asfrom=False, parens=True, **kw
):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = (
toplevel
or (
compound_index == 0
and entry.get("need_result_map_for_compound", False)
)
or entry.get("need_result_map_for_nested", False)
)
if populate_result_map:
self._ordered_columns = (
self._textual_ordered_columns
) = taf.positional
for c in taf.column_args:
self.process(
c,
within_columns_clause=True,
add_to_result_map=self._add_to_result_map,
)
text = self.process(taf.element, **kw)
if asfrom and parens:
text = "(%s)" % text
return text
def visit_null(self, expr, **kw):
return "NULL"
def visit_true(self, expr, **kw):
if self.dialect.supports_native_boolean:
return "true"
else:
return "1"
def visit_false(self, expr, **kw):
if self.dialect.supports_native_boolean:
return "false"
else:
return "0"
def visit_clauselist(self, clauselist, **kw):
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
text = sep.join(
s
for s in (
c._compiler_dispatch(self, **kw) for c in clauselist.clauses
)
if s
)
if clauselist._tuple_values and self.dialect.tuple_in_values:
text = "VALUES " + text
return text
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
x += (
"WHEN "
+ cond._compiler_dispatch(self, **kwargs)
+ " THEN "
+ result._compiler_dispatch(self, **kwargs)
+ " "
)
if clause.else_ is not None:
x += (
"ELSE " + clause.else_._compiler_dispatch(self, **kwargs) + " "
)
x += "END"
return x
def visit_type_coerce(self, type_coerce, **kw):
return type_coerce.typed_expression._compiler_dispatch(self, **kw)
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % (
cast.clause._compiler_dispatch(self, **kwargs),
cast.typeclause._compiler_dispatch(self, **kwargs),
)
def _format_frame_clause(self, range_, **kw):
return "%s AND %s" % (
"UNBOUNDED PRECEDING"
if range_[0] is elements.RANGE_UNBOUNDED
else "CURRENT ROW"
if range_[0] is elements.RANGE_CURRENT
else "%s PRECEDING"
% (self.process(elements.literal(abs(range_[0])), **kw),)
if range_[0] < 0
else "%s FOLLOWING"
% (self.process(elements.literal(range_[0]), **kw),),
"UNBOUNDED FOLLOWING"
if range_[1] is elements.RANGE_UNBOUNDED
else "CURRENT ROW"
if range_[1] is elements.RANGE_CURRENT
else "%s PRECEDING"
% (self.process(elements.literal(abs(range_[1])), **kw),)
if range_[1] < 0
else "%s FOLLOWING"
% (self.process(elements.literal(range_[1]), **kw),),
)
def visit_over(self, over, **kwargs):
if over.range_:
range_ = "RANGE BETWEEN %s" % self._format_frame_clause(
over.range_, **kwargs
)
elif over.rows:
range_ = "ROWS BETWEEN %s" % self._format_frame_clause(
over.rows, **kwargs
)
else:
range_ = None
return "%s OVER (%s)" % (
over.element._compiler_dispatch(self, **kwargs),
" ".join(
[
"%s BY %s"
% (word, clause._compiler_dispatch(self, **kwargs))
for word, clause in (
("PARTITION", over.partition_by),
("ORDER", over.order_by),
)
if clause is not None and len(clause)
]
+ ([range_] if range_ else [])
),
)
def visit_withingroup(self, withingroup, **kwargs):
return "%s WITHIN GROUP (ORDER BY %s)" % (
withingroup.element._compiler_dispatch(self, **kwargs),
withingroup.order_by._compiler_dispatch(self, **kwargs),
)
def visit_funcfilter(self, funcfilter, **kwargs):
return "%s FILTER (WHERE %s)" % (
funcfilter.func._compiler_dispatch(self, **kwargs),
funcfilter.criterion._compiler_dispatch(self, **kwargs),
)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (
field,
extract.expr._compiler_dispatch(self, **kwargs),
)
def visit_function(self, func, add_to_result_map=None, **kwargs):
if add_to_result_map is not None:
add_to_result_map(func.name, func.name, (), func.type)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
if disp:
return disp(func, **kwargs)
else:
name = FUNCTIONS.get(func.__class__, None)
if name:
if func._has_args:
name += "%(expr)s"
else:
name = func.name
name = (
self.preparer.quote(name)
if self.preparer._requires_quotes_illegal_chars(name)
or isinstance(name, elements.quoted_name)
else name
)
name = name + "%(expr)s"
return ".".join(
[
(
self.preparer.quote(tok)
if self.preparer._requires_quotes_illegal_chars(tok)
or isinstance(name, elements.quoted_name)
else tok
)
for tok in func.packagenames
]
+ [name]
) % {"expr": self.function_argspec(func, **kwargs)}
def visit_next_value_func(self, next_value, **kw):
return self.visit_sequence(next_value.sequence)
def visit_sequence(self, sequence, **kw):
raise NotImplementedError(
"Dialect '%s' does not support sequence increments."
% self.dialect.name
)
def function_argspec(self, func, **kwargs):
return func.clause_expr._compiler_dispatch(self, **kwargs)
def visit_compound_select(
self, cs, asfrom=False, parens=True, compound_index=0, **kwargs
):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
need_result_map = toplevel or (
compound_index == 0
and entry.get("need_result_map_for_compound", False)
)
self.stack.append(
{
"correlate_froms": entry["correlate_froms"],
"asfrom_froms": entry["asfrom_froms"],
"selectable": cs,
"need_result_map_for_compound": need_result_map,
}
)
keyword = self.compound_keywords.get(cs.keyword)
text = (" " + keyword + " ").join(
(
c._compiler_dispatch(
self,
asfrom=asfrom,
parens=False,
compound_index=i,
**kwargs
)
for i, c in enumerate(cs.selects)
)
)
text += self.group_by_clause(cs, **dict(asfrom=asfrom, **kwargs))
text += self.order_by_clause(cs, **kwargs)
text += (
(cs._limit_clause is not None or cs._offset_clause is not None)
and self.limit_clause(cs, **kwargs)
or ""
)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def _get_operator_dispatch(self, operator_, qualifier1, qualifier2):
attrname = "visit_%s_%s%s" % (
operator_.__name__,
qualifier1,
"_" + qualifier2 if qualifier2 else "",
)
return getattr(self, attrname, None)
def visit_unary(self, unary, **kw):
if unary.operator:
if unary.modifier:
raise exc.CompileError(
"Unary expression does not support operator "
"and modifier simultaneously"
)
disp = self._get_operator_dispatch(
unary.operator, "unary", "operator"
)
if disp:
return disp(unary, unary.operator, **kw)
else:
return self._generate_generic_unary_operator(
unary, OPERATORS[unary.operator], **kw
)
elif unary.modifier:
disp = self._get_operator_dispatch(
unary.modifier, "unary", "modifier"
)
if disp:
return disp(unary, unary.modifier, **kw)
else:
return self._generate_generic_unary_modifier(
unary, OPERATORS[unary.modifier], **kw
)
else:
raise exc.CompileError(
"Unary expression has no operator or modifier"
)
def visit_istrue_unary_operator(self, element, operator, **kw):
if (
element._is_implicitly_boolean
or self.dialect.supports_native_boolean
):
return self.process(element.element, **kw)
else:
return "%s = 1" % self.process(element.element, **kw)
def visit_isfalse_unary_operator(self, element, operator, **kw):
if (
element._is_implicitly_boolean
or self.dialect.supports_native_boolean
):
return "NOT %s" % self.process(element.element, **kw)
else:
return "%s = 0" % self.process(element.element, **kw)
def visit_notmatch_op_binary(self, binary, operator, **kw):
return "NOT %s" % self.visit_binary(
binary, override_operator=operators.match_op
)
def _emit_empty_in_warning(self):
util.warn(
"The IN-predicate was invoked with an "
"empty sequence. This results in a "
"contradiction, which nonetheless can be "
"expensive to evaluate. Consider alternative "
"strategies for improved performance."
)
def visit_empty_in_op_binary(self, binary, operator, **kw):
if self.dialect._use_static_in:
return "1 != 1"
else:
if self.dialect._warn_on_empty_in:
self._emit_empty_in_warning()
return self.process(binary.left != binary.left)
def visit_empty_notin_op_binary(self, binary, operator, **kw):
if self.dialect._use_static_in:
return "1 = 1"
else:
if self.dialect._warn_on_empty_in:
self._emit_empty_in_warning()
return self.process(binary.left == binary.left)
def visit_empty_set_expr(self, element_types):
raise NotImplementedError(
"Dialect '%s' does not support empty set expression."
% self.dialect.name
)
def visit_binary(
self, binary, override_operator=None, eager_grouping=False, **kw
):
# don't allow "? = ?" to render
if (
self.ansi_bind_rules
and isinstance(binary.left, elements.BindParameter)
and isinstance(binary.right, elements.BindParameter)
):
kw["literal_binds"] = True
operator_ = override_operator or binary.operator
disp = self._get_operator_dispatch(operator_, "binary", None)
if disp:
return disp(binary, operator_, **kw)
else:
try:
opstring = OPERATORS[operator_]
except KeyError as err:
util.raise_(
exc.UnsupportedCompilationError(self, operator_),
replace_context=err,
)
else:
return self._generate_generic_binary(binary, opstring, **kw)
def visit_function_as_comparison_op_binary(self, element, operator, **kw):
return self.process(element.sql_function, **kw)
def visit_mod_binary(self, binary, operator, **kw):
if self.preparer._double_percents:
return (
self.process(binary.left, **kw)
+ " %% "
+ self.process(binary.right, **kw)
)
else:
return (
self.process(binary.left, **kw)
+ " % "
+ self.process(binary.right, **kw)
)
def visit_custom_op_binary(self, element, operator, **kw):
kw["eager_grouping"] = operator.eager_grouping
return self._generate_generic_binary(
element, " " + operator.opstring + " ", **kw
)
def visit_custom_op_unary_operator(self, element, operator, **kw):
return self._generate_generic_unary_operator(
element, operator.opstring + " ", **kw
)
def visit_custom_op_unary_modifier(self, element, operator, **kw):
return self._generate_generic_unary_modifier(
element, " " + operator.opstring, **kw
)
def _generate_generic_binary(
self, binary, opstring, eager_grouping=False, **kw
):
_in_binary = kw.get("_in_binary", False)
kw["_in_binary"] = True
text = (
binary.left._compiler_dispatch(
self, eager_grouping=eager_grouping, **kw
)
+ opstring
+ binary.right._compiler_dispatch(
self, eager_grouping=eager_grouping, **kw
)
)
if _in_binary and eager_grouping:
text = "(%s)" % text
return text
def _generate_generic_unary_operator(self, unary, opstring, **kw):
return opstring + unary.element._compiler_dispatch(self, **kw)
def _generate_generic_unary_modifier(self, unary, opstring, **kw):
return unary.element._compiler_dispatch(self, **kw) + opstring
@util.memoized_property
def _like_percent_literal(self):
return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE)
def visit_contains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notcontains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_startswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notstartswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(binary.right)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_endswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notendswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_like_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
# TODO: use ternary here, not "and"/ "or"
return "%s LIKE %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_notlike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "%s NOT LIKE %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "lower(%s) LIKE lower(%s)" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "lower(%s) NOT LIKE lower(%s)" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_between_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " BETWEEN SYMMETRIC " if symmetric else " BETWEEN ", **kw
)
def visit_notbetween_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary,
" NOT BETWEEN SYMMETRIC " if symmetric else " NOT BETWEEN ",
**kw
)
def visit_bindparam(
self,
bindparam,
within_columns_clause=False,
literal_binds=False,
skip_bind_expression=False,
**kwargs
):
if not skip_bind_expression:
impl = bindparam.type.dialect_impl(self.dialect)
if impl._has_bind_expression:
bind_expression = impl.bind_expression(bindparam)
return self.process(
bind_expression,
skip_bind_expression=True,
within_columns_clause=within_columns_clause,
literal_binds=literal_binds,
**kwargs
)
if literal_binds or (within_columns_clause and self.ansi_bind_rules):
if bindparam.value is None and bindparam.callable is None:
raise exc.CompileError(
"Bind parameter '%s' without a "
"renderable value not allowed here." % bindparam.key
)
return self.render_literal_bindparam(
bindparam, within_columns_clause=True, **kwargs
)
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if (
existing.unique or bindparam.unique
) and not existing.proxy_set.intersection(bindparam.proxy_set):
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name"
% bindparam.key
)
elif existing._is_crud or bindparam._is_crud:
raise exc.CompileError(
"bindparam() name '%s' is reserved "
"for automatic usage in the VALUES or SET "
"clause of this "
"insert/update statement. Please use a "
"name other than column name when using bindparam() "
"with insert() or update() (for example, 'b_%s')."
% (bindparam.key, bindparam.key)
)
self.binds[bindparam.key] = self.binds[name] = bindparam
return self.bindparam_string(
name, expanding=bindparam.expanding, **kwargs
)
def render_literal_bindparam(self, bindparam, **kw):
value = bindparam.effective_value
return self.render_literal_value(value, bindparam.type)
def render_literal_value(self, value, type_):
"""Render the value of a bind parameter as a quoted literal.
This is used for statement sections that do not accept bind parameters
on the target driver/database.
This should be implemented by subclasses using the quoting services
of the DBAPI.
"""
processor = type_._cached_literal_processor(self.dialect)
if processor:
return processor(value)
else:
raise NotImplementedError(
"Don't know how to literal-quote value %r" % value
)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
if isinstance(bind_name, elements._truncated_label):
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name.apply_map(self.anon_map)
if len(anonname) > self.label_length - 6:
counter = self.truncated_names.get(ident_class, 1)
truncname = (
anonname[0 : max(self.label_length - 6, 0)]
+ "_"
+ hex(counter)[2:]
)
self.truncated_names[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name):
return name % self.anon_map
def bindparam_string(
self, name, positional_names=None, expanding=False, **kw
):
if self.positional:
if positional_names is not None:
positional_names.append(name)
else:
self.positiontup.append(name)
if expanding:
self.contains_expanding_parameters = True
return "([EXPANDING_%s])" % name
else:
return self.bindtemplate % {"name": name}
def visit_cte(
self,
cte,
asfrom=False,
ashint=False,
fromhints=None,
visiting_cte=None,
**kwargs
):
self._init_cte_state()
kwargs["visiting_cte"] = cte
if isinstance(cte.name, elements._truncated_label):
cte_name = self._truncated_identifier("alias", cte.name)
else:
cte_name = cte.name
is_new_cte = True
embedded_in_current_named_cte = False
if cte_name in self.ctes_by_name:
existing_cte = self.ctes_by_name[cte_name]
embedded_in_current_named_cte = visiting_cte is existing_cte
# we've generated a same-named CTE that we are enclosed in,
# or this is the same CTE. just return the name.
if cte in existing_cte._restates or cte is existing_cte:
is_new_cte = False
elif existing_cte in cte._restates:
# we've generated a same-named CTE that is
# enclosed in us - we take precedence, so
# discard the text for the "inner".
del self.ctes[existing_cte]
else:
raise exc.CompileError(
"Multiple, unrelated CTEs found with "
"the same name: %r" % cte_name
)
if asfrom or is_new_cte:
if cte._cte_alias is not None:
pre_alias_cte = cte._cte_alias
cte_pre_alias_name = cte._cte_alias.name
if isinstance(cte_pre_alias_name, elements._truncated_label):
cte_pre_alias_name = self._truncated_identifier(
"alias", cte_pre_alias_name
)
else:
pre_alias_cte = cte
cte_pre_alias_name = None
if is_new_cte:
self.ctes_by_name[cte_name] = cte
# look for embedded DML ctes and propagate autocommit
if (
"autocommit" in cte.element._execution_options
and "autocommit" not in self.execution_options
):
self.execution_options = self.execution_options.union(
{
"autocommit": cte.element._execution_options[
"autocommit"
]
}
)
if pre_alias_cte not in self.ctes:
self.visit_cte(pre_alias_cte, **kwargs)
if not cte_pre_alias_name and cte not in self.ctes:
if cte.recursive:
self.ctes_recursive = True
text = self.preparer.format_alias(cte, cte_name)
if cte.recursive:
if isinstance(cte.original, selectable.Select):
col_source = cte.original
elif isinstance(cte.original, selectable.CompoundSelect):
col_source = cte.original.selects[0]
else:
assert False
recur_cols = [
c
for c in util.unique_list(col_source.inner_columns)
if c is not None
]
text += "(%s)" % (
", ".join(
self.preparer.format_column(ident)
for ident in recur_cols
)
)
if self.positional:
kwargs["positional_names"] = self.cte_positional[cte] = []
text += " AS %s\n%s" % (
self._generate_prefixes(cte, cte._prefixes, **kwargs),
cte.original._compiler_dispatch(
self, asfrom=True, **kwargs
),
)
if cte._suffixes:
text += " " + self._generate_prefixes(
cte, cte._suffixes, **kwargs
)
self.ctes[cte] = text
if asfrom:
if not is_new_cte and embedded_in_current_named_cte:
return self.preparer.format_alias(cte, cte_name)
if cte_pre_alias_name:
text = self.preparer.format_alias(cte, cte_pre_alias_name)
if self.preparer._requires_quotes(cte_name):
cte_name = self.preparer.quote(cte_name)
text += self.get_render_as_alias_suffix(cte_name)
return text
else:
return self.preparer.format_alias(cte, cte_name)
def visit_alias(
self,
alias,
asfrom=False,
ashint=False,
iscrud=False,
fromhints=None,
**kwargs
):
if asfrom or ashint:
if isinstance(alias.name, elements._truncated_label):
alias_name = self._truncated_identifier("alias", alias.name)
else:
alias_name = alias.name
if ashint:
return self.preparer.format_alias(alias, alias_name)
elif asfrom:
ret = alias.original._compiler_dispatch(
self, asfrom=True, **kwargs
) + self.get_render_as_alias_suffix(
self.preparer.format_alias(alias, alias_name)
)
if fromhints and alias in fromhints:
ret = self.format_from_hint_text(
ret, alias, fromhints[alias], iscrud
)
return ret
else:
return alias.original._compiler_dispatch(self, **kwargs)
def visit_lateral(self, lateral, **kw):
kw["lateral"] = True
return "LATERAL %s" % self.visit_alias(lateral, **kw)
def visit_tablesample(self, tablesample, asfrom=False, **kw):
text = "%s TABLESAMPLE %s" % (
self.visit_alias(tablesample, asfrom=True, **kw),
tablesample._get_method()._compiler_dispatch(self, **kw),
)
if tablesample.seed is not None:
text += " REPEATABLE (%s)" % (
tablesample.seed._compiler_dispatch(self, **kw)
)
return text
def get_render_as_alias_suffix(self, alias_name_text):
return " AS " + alias_name_text
def _add_to_result_map(self, keyname, name, objects, type_):
self._result_columns.append((keyname, name, objects, type_))
def _label_select_column(
self,
select,
column,
populate_result_map,
asfrom,
column_clause_args,
name=None,
within_columns_clause=True,
need_column_expressions=False,
):
"""produce labeled columns present in a select()."""
impl = column.type.dialect_impl(self.dialect)
if impl._has_column_expression and (
need_column_expressions or populate_result_map
):
col_expr = impl.column_expression(column)
if populate_result_map:
def add_to_result_map(keyname, name, objects, type_):
self._add_to_result_map(
keyname, name, (column,) + objects, type_
)
else:
add_to_result_map = None
else:
col_expr = column
if populate_result_map:
add_to_result_map = self._add_to_result_map
else:
add_to_result_map = None
if not within_columns_clause:
result_expr = col_expr
elif isinstance(column, elements.Label):
if col_expr is not column:
result_expr = _CompileLabel(
col_expr, column.name, alt_names=(column.element,)
)
else:
result_expr = col_expr
elif select is not None and name:
result_expr = _CompileLabel(
col_expr, name, alt_names=(column._key_label,)
)
elif (
asfrom
and isinstance(column, elements.ColumnClause)
and not column.is_literal
and column.table is not None
and not isinstance(column.table, selectable.Select)
):
result_expr = _CompileLabel(
col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,),
)
elif (
not isinstance(column, elements.TextClause)
and (
not isinstance(column, elements.UnaryExpression)
or column.wraps_column_expression
)
and (
not hasattr(column, "name")
or isinstance(column, functions.Function)
)
):
result_expr = _CompileLabel(col_expr, column.anon_label)
elif col_expr is not column:
# TODO: are we sure "column" has a .name and .key here ?
# assert isinstance(column, elements.ColumnClause)
result_expr = _CompileLabel(
col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,),
)
else:
result_expr = col_expr
column_clause_args.update(
within_columns_clause=within_columns_clause,
add_to_result_map=add_to_result_map,
)
return result_expr._compiler_dispatch(self, **column_clause_args)
def format_from_hint_text(self, sqltext, table, hint, iscrud):
hinttext = self.get_from_hint_text(table, hint)
if hinttext:
sqltext += " " + hinttext
return sqltext
def get_select_hint_text(self, byfroms):
return None
def get_from_hint_text(self, table, text):
return None
def get_crud_hint_text(self, table, text):
return None
def get_statement_hint_text(self, hint_texts):
return " ".join(hint_texts)
def _transform_select_for_nested_joins(self, select):
"""Rewrite any "a JOIN (b JOIN c)" expression as
"a JOIN (select * from b JOIN c) AS anon", to support
databases that can't parse a parenthesized join correctly
(i.e. sqlite < 3.7.16).
"""
cloned = {}
column_translate = [{}]
def visit(element, **kw):
if element in column_translate[-1]:
return column_translate[-1][element]
elif element in cloned:
return cloned[element]
newelem = cloned[element] = element._clone()
if (
newelem.is_selectable
and newelem._is_join
and isinstance(newelem.right, selectable.FromGrouping)
):
newelem._reset_exported()
newelem.left = visit(newelem.left, **kw)
right = visit(newelem.right, **kw)
selectable_ = selectable.Select(
[right.element], use_labels=True
).alias()
for c in selectable_.c:
c._key_label = c.key
c._label = c.name
translate_dict = dict(
zip(newelem.right.element.c, selectable_.c)
)
# translating from both the old and the new
# because different select() structures will lead us
# to traverse differently
translate_dict[right.element.left] = selectable_
translate_dict[right.element.right] = selectable_
translate_dict[newelem.right.element.left] = selectable_
translate_dict[newelem.right.element.right] = selectable_
# propagate translations that we've gained
# from nested visit(newelem.right) outwards
# to the enclosing select here. this happens
# only when we have more than one level of right
# join nesting, i.e. "a JOIN (b JOIN (c JOIN d))"
for k, v in list(column_translate[-1].items()):
if v in translate_dict:
# remarkably, no current ORM tests (May 2013)
# hit this condition, only test_join_rewriting
# does.
column_translate[-1][k] = translate_dict[v]
column_translate[-1].update(translate_dict)
newelem.right = selectable_
newelem.onclause = visit(newelem.onclause, **kw)
elif newelem._is_from_container:
# if we hit an Alias, CompoundSelect or ScalarSelect, put a
# marker in the stack.
kw["transform_clue"] = "select_container"
newelem._copy_internals(clone=visit, **kw)
elif newelem.is_selectable and newelem._is_select:
barrier_select = (
kw.get("transform_clue", None) == "select_container"
)
# if we're still descended from an
# Alias/CompoundSelect/ScalarSelect, we're
# in a FROM clause, so start with a new translate collection
if barrier_select:
column_translate.append({})
kw["transform_clue"] = "inside_select"
newelem._copy_internals(clone=visit, **kw)
if barrier_select:
del column_translate[-1]
else:
newelem._copy_internals(clone=visit, **kw)
return newelem
return visit(select)
def _transform_result_map_for_nested_joins(
self, select, transformed_select
):
inner_col = dict(
(c._key_label, c) for c in transformed_select.inner_columns
)
d = dict((inner_col[c._key_label], c) for c in select.inner_columns)
self._result_columns = [
(key, name, tuple([d.get(col, col) for col in objs]), typ)
for key, name, objs, typ in self._result_columns
]
_default_stack_entry = util.immutabledict(
[("correlate_froms", frozenset()), ("asfrom_froms", frozenset())]
)
def _display_froms_for_select(self, select, asfrom, lateral=False):
# utility method to help external dialects
# get the correct from list for a select.
# specifically the oracle dialect needs this feature
# right now.
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
correlate_froms = entry["correlate_froms"]
asfrom_froms = entry["asfrom_froms"]
if asfrom and not lateral:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms
),
implicit_correlate_froms=(),
)
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms,
)
return froms
def visit_select(
self,
select,
asfrom=False,
parens=True,
fromhints=None,
compound_index=0,
nested_join_translation=False,
select_wraps_for=None,
lateral=False,
**kwargs
):
needs_nested_translation = (
select.use_labels
and not nested_join_translation
and not self.stack
and not self.dialect.supports_right_nested_joins
)
if needs_nested_translation:
transformed_select = self._transform_select_for_nested_joins(
select
)
text = self.visit_select(
transformed_select,
asfrom=asfrom,
parens=parens,
fromhints=fromhints,
compound_index=compound_index,
nested_join_translation=True,
**kwargs
)
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = need_column_expressions = (
toplevel
or entry.get("need_result_map_for_compound", False)
or entry.get("need_result_map_for_nested", False)
)
if compound_index > 0:
populate_result_map = False
# this was first proposed as part of #3372; however, it is not
# reached in current tests and could possibly be an assertion
# instead.
if not populate_result_map and "add_to_result_map" in kwargs:
del kwargs["add_to_result_map"]
if needs_nested_translation:
if populate_result_map:
self._transform_result_map_for_nested_joins(
select, transformed_select
)
return text
froms = self._setup_select_stack(select, entry, asfrom, lateral)
column_clause_args = kwargs.copy()
column_clause_args.update(
{"within_label_clause": False, "within_columns_clause": False}
)
text = "SELECT " # we're off to a good start !
if select._hints:
hint_text, byfrom = self._setup_select_hints(select)
if hint_text:
text += hint_text + " "
else:
byfrom = None
if select._prefixes:
text += self._generate_prefixes(select, select._prefixes, **kwargs)
text += self.get_select_precolumns(select, **kwargs)
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c
for c in [
self._label_select_column(
select,
column,
populate_result_map,
asfrom,
column_clause_args,
name=name,
need_column_expressions=need_column_expressions,
)
for name, column in select._columns_plus_names
]
if c is not None
]
if populate_result_map and select_wraps_for is not None:
# if this select is a compiler-generated wrapper,
# rewrite the targeted columns in the result map
translate = dict(
zip(
[name for (key, name) in select._columns_plus_names],
[
name
for (key, name) in select_wraps_for._columns_plus_names
],
)
)
self._result_columns = [
(key, name, tuple(translate.get(o, o) for o in obj), type_)
for key, name, obj, type_ in self._result_columns
]
text = self._compose_select_body(
text, select, inner_columns, froms, byfrom, kwargs
)
if select._statement_hints:
per_dialect = [
ht
for (dialect_name, ht) in select._statement_hints
if dialect_name in ("*", self.dialect.name)
]
if per_dialect:
text += " " + self.get_statement_hint_text(per_dialect)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
if select._suffixes:
text += " " + self._generate_prefixes(
select, select._suffixes, **kwargs
)
self.stack.pop(-1)
if (asfrom or lateral) and parens:
return "(" + text + ")"
else:
return text
def _setup_select_hints(self, select):
byfrom = dict(
[
(
from_,
hinttext
% {"name": from_._compiler_dispatch(self, ashint=True)},
)
for (from_, dialect), hinttext in select._hints.items()
if dialect in ("*", self.dialect.name)
]
)
hint_text = self.get_select_hint_text(byfrom)
return hint_text, byfrom
def _setup_select_stack(self, select, entry, asfrom, lateral):
correlate_froms = entry["correlate_froms"]
asfrom_froms = entry["asfrom_froms"]
if asfrom and not lateral:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms
),
implicit_correlate_froms=(),
)
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms,
)
new_correlate_froms = set(selectable._from_objects(*froms))
all_correlate_froms = new_correlate_froms.union(correlate_froms)
new_entry = {
"asfrom_froms": new_correlate_froms,
"correlate_froms": all_correlate_froms,
"selectable": select,
}
self.stack.append(new_entry)
return froms
def _compose_select_body(
self, text, select, inner_columns, froms, byfrom, kwargs
):
text += ", ".join(inner_columns)
if froms:
text += " \nFROM "
if select._hints:
text += ", ".join(
[
f._compiler_dispatch(
self, asfrom=True, fromhints=byfrom, **kwargs
)
for f in froms
]
)
else:
text += ", ".join(
[
f._compiler_dispatch(self, asfrom=True, **kwargs)
for f in froms
]
)
else:
text += self.default_from()
if select._whereclause is not None:
t = select._whereclause._compiler_dispatch(self, **kwargs)
if t:
text += " \nWHERE " + t
if select._group_by_clause.clauses:
text += self.group_by_clause(select, **kwargs)
if select._having is not None:
t = select._having._compiler_dispatch(self, **kwargs)
if t:
text += " \nHAVING " + t
if select._order_by_clause.clauses:
text += self.order_by_clause(select, **kwargs)
if (
select._limit_clause is not None
or select._offset_clause is not None
):
text += self.limit_clause(select, **kwargs)
if select._for_update_arg is not None:
text += self.for_update_clause(select, **kwargs)
return text
def _generate_prefixes(self, stmt, prefixes, **kw):
clause = " ".join(
prefix._compiler_dispatch(self, **kw)
for prefix, dialect_name in prefixes
if dialect_name is None or dialect_name == self.dialect.name
)
if clause:
clause += " "
return clause
def _render_cte_clause(self):
if self.positional:
self.positiontup = (
sum([self.cte_positional[cte] for cte in self.ctes], [])
+ self.positiontup
)
cte_text = self.get_cte_preamble(self.ctes_recursive) + " "
cte_text += ", \n".join([txt for txt in self.ctes.values()])
cte_text += "\n "
return cte_text
def get_cte_preamble(self, recursive):
if recursive:
return "WITH RECURSIVE"
else:
return "WITH"
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list.
"""
return select._distinct and "DISTINCT " or ""
def group_by_clause(self, select, **kw):
"""allow dialects to customize how GROUP BY is rendered."""
group_by = select._group_by_clause._compiler_dispatch(self, **kw)
if group_by:
return " GROUP BY " + group_by
else:
return ""
def order_by_clause(self, select, **kw):
"""allow dialects to customize how ORDER BY is rendered."""
order_by = select._order_by_clause._compiler_dispatch(self, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select, **kw):
return " FOR UPDATE"
def returning_clause(self, stmt, returning_cols):
raise exc.CompileError(
"RETURNING is not supported by this "
"dialect's statement compiler."
)
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT -1"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def visit_table(
self,
table,
asfrom=False,
iscrud=False,
ashint=False,
fromhints=None,
use_schema=True,
**kwargs
):
if asfrom or ashint:
effective_schema = self.preparer.schema_for_object(table)
if use_schema and effective_schema:
ret = (
self.preparer.quote_schema(effective_schema)
+ "."
+ self.preparer.quote(table.name)
)
else:
ret = self.preparer.quote(table.name)
if fromhints and table in fromhints:
ret = self.format_from_hint_text(
ret, table, fromhints[table], iscrud
)
return ret
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
if join.full:
join_type = " FULL OUTER JOIN "
elif join.isouter:
join_type = " LEFT OUTER JOIN "
else:
join_type = " JOIN "
return (
join.left._compiler_dispatch(self, asfrom=True, **kwargs)
+ join_type
+ join.right._compiler_dispatch(self, asfrom=True, **kwargs)
+ " ON "
+ join.onclause._compiler_dispatch(self, **kwargs)
)
def _setup_crud_hints(self, stmt, table_text):
dialect_hints = dict(
[
(table, hint_text)
for (table, dialect), hint_text in stmt._hints.items()
if dialect in ("*", self.dialect.name)
]
)
if stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text, stmt.table, dialect_hints[stmt.table], True
)
return dialect_hints, table_text
def visit_insert(self, insert_stmt, asfrom=False, **kw):
toplevel = not self.stack
self.stack.append(
{
"correlate_froms": set(),
"asfrom_froms": set(),
"selectable": insert_stmt,
}
)
crud_params = crud._setup_crud_params(
self, insert_stmt, crud.ISINSERT, **kw
)
if (
not crud_params
and not self.dialect.supports_default_values
and not self.dialect.supports_empty_insert
):
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support empty "
"inserts." % self.dialect.name
)
if insert_stmt._has_multi_parameters:
if not self.dialect.supports_multivalues_insert:
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support "
"in-place multirow inserts." % self.dialect.name
)
crud_params_single = crud_params[0]
else:
crud_params_single = crud_params
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT "
if insert_stmt._prefixes:
text += self._generate_prefixes(
insert_stmt, insert_stmt._prefixes, **kw
)
text += "INTO "
table_text = preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
_, table_text = self._setup_crud_hints(insert_stmt, table_text)
text += table_text
if crud_params_single or not supports_default_values:
text += " (%s)" % ", ".join(
[preparer.format_column(c[0]) for c in crud_params_single]
)
if self.returning or insert_stmt._returning:
returning_clause = self.returning_clause(
insert_stmt, self.returning or insert_stmt._returning
)
if self.returning_precedes_values:
text += " " + returning_clause
else:
returning_clause = None
if insert_stmt.select is not None:
select_text = self.process(self._insert_from_select, **kw)
if self.ctes and toplevel and self.dialect.cte_follows_insert:
text += " %s%s" % (self._render_cte_clause(), select_text)
else:
text += " %s" % select_text
elif not crud_params and supports_default_values:
text += " DEFAULT VALUES"
elif insert_stmt._has_multi_parameters:
text += " VALUES %s" % (
", ".join(
"(%s)" % (", ".join(c[1] for c in crud_param_set))
for crud_param_set in crud_params
)
)
else:
insert_single_values_expr = ", ".join([c[1] for c in crud_params])
text += " VALUES (%s)" % insert_single_values_expr
if toplevel:
self.insert_single_values_expr = insert_single_values_expr
if insert_stmt._post_values_clause is not None:
post_values_clause = self.process(
insert_stmt._post_values_clause, **kw
)
if post_values_clause:
text += " " + post_values_clause
if returning_clause and not self.returning_precedes_values:
text += " " + returning_clause
if self.ctes and toplevel and not self.dialect.cte_follows_insert:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom:
return "(" + text + ")"
else:
return text
def update_limit_clause(self, update_stmt):
"""Provide a hook for MySQL to add LIMIT to the UPDATE"""
return None
def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
"""Provide a hook to override the initial table clause
in an UPDATE statement.
MySQL overrides this.
"""
kw["asfrom"] = True
return from_table._compiler_dispatch(self, iscrud=True, **kw)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
"""Provide a hook to override the generation of an
UPDATE..FROM clause.
MySQL and MSSQL override this.
"""
raise NotImplementedError(
"This backend does not support multiple-table "
"criteria within UPDATE"
)
def visit_update(self, update_stmt, asfrom=False, **kw):
toplevel = not self.stack
extra_froms = update_stmt._extra_froms
is_multitable = bool(extra_froms)
if is_multitable:
# main table might be a JOIN
main_froms = set(selectable._from_objects(update_stmt.table))
render_extra_froms = [
f for f in extra_froms if f not in main_froms
]
correlate_froms = main_froms.union(extra_froms)
else:
render_extra_froms = []
correlate_froms = {update_stmt.table}
self.stack.append(
{
"correlate_froms": correlate_froms,
"asfrom_froms": correlate_froms,
"selectable": update_stmt,
}
)
text = "UPDATE "
if update_stmt._prefixes:
text += self._generate_prefixes(
update_stmt, update_stmt._prefixes, **kw
)
table_text = self.update_tables_clause(
update_stmt, update_stmt.table, render_extra_froms, **kw
)
crud_params = crud._setup_crud_params(
self, update_stmt, crud.ISUPDATE, **kw
)
if update_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
update_stmt, table_text
)
else:
dialect_hints = None
text += table_text
text += " SET "
include_table = (
is_multitable and self.render_table_with_column_in_update_from
)
text += ", ".join(
c[0]._compiler_dispatch(self, include_table=include_table)
+ "="
+ c[1]
for c in crud_params
)
if self.returning or update_stmt._returning:
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning or update_stmt._returning
)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
render_extra_froms,
dialect_hints,
**kw
)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._whereclause is not None:
t = self.process(update_stmt._whereclause, **kw)
if t:
text += " WHERE " + t
limit_clause = self.update_limit_clause(update_stmt)
if limit_clause:
text += " " + limit_clause
if (
self.returning or update_stmt._returning
) and not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning or update_stmt._returning
)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom:
return "(" + text + ")"
else:
return text
@util.memoized_property
def _key_getters_for_crud_column(self):
return crud._key_getters_for_crud_column(self, self.statement)
def delete_extra_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
"""Provide a hook to override the generation of an
DELETE..FROM clause.
This can be used to implement DELETE..USING for example.
MySQL and MSSQL override this.
"""
raise NotImplementedError(
"This backend does not support multiple-table "
"criteria within DELETE"
)
def delete_table_clause(self, delete_stmt, from_table, extra_froms):
return from_table._compiler_dispatch(self, asfrom=True, iscrud=True)
def visit_delete(self, delete_stmt, asfrom=False, **kw):
toplevel = not self.stack
crud._setup_crud_params(self, delete_stmt, crud.ISDELETE, **kw)
extra_froms = delete_stmt._extra_froms
correlate_froms = {delete_stmt.table}.union(extra_froms)
self.stack.append(
{
"correlate_froms": correlate_froms,
"asfrom_froms": correlate_froms,
"selectable": delete_stmt,
}
)
text = "DELETE "
if delete_stmt._prefixes:
text += self._generate_prefixes(
delete_stmt, delete_stmt._prefixes, **kw
)
text += "FROM "
table_text = self.delete_table_clause(
delete_stmt, delete_stmt.table, extra_froms
)
if delete_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
delete_stmt, table_text
)
else:
dialect_hints = None
text += table_text
if delete_stmt._returning:
if self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning
)
if extra_froms:
extra_from_text = self.delete_extra_from_clause(
delete_stmt,
delete_stmt.table,
extra_froms,
dialect_hints,
**kw
)
if extra_from_text:
text += " " + extra_from_text
if delete_stmt._whereclause is not None:
t = delete_stmt._whereclause._compiler_dispatch(self, **kw)
if t:
text += " WHERE " + t
if delete_stmt._returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning
)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom:
return "(" + text + ")"
else:
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % self.preparer.format_savepoint(
savepoint_stmt
)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % self.preparer.format_savepoint(
savepoint_stmt
)
class StrSQLCompiler(SQLCompiler):
"""A :class:`.SQLCompiler` subclass which allows a small selection
of non-standard SQL features to render into a string value.
The :class:`.StrSQLCompiler` is invoked whenever a Core expression
element is directly stringified without calling upon the
:meth:`_expression.ClauseElement.compile` method.
It can render a limited set
of non-standard SQL constructs to assist in basic stringification,
however for more substantial custom or dialect-specific SQL constructs,
it will be necessary to make use of
:meth:`_expression.ClauseElement.compile`
directly.
.. seealso::
:ref:`faq_sql_expression_string`
"""
def _fallback_column_name(self, column):
return "<name unknown>"
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_json_getitem_op_binary(self, binary, operator, **kw):
return self.visit_getitem_binary(binary, operator, **kw)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
return self.visit_getitem_binary(binary, operator, **kw)
def visit_sequence(self, seq, **kw):
return "<next sequence value: %s>" % self.preparer.format_sequence(seq)
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in elements._select_iterables(returning_cols)
]
return "RETURNING " + ", ".join(columns)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
return "FROM " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in extra_froms
)
def delete_extra_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
return ", " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in extra_froms
)
def get_from_hint_text(self, table, text):
return "[%s]" % text
class DDLCompiler(Compiled):
@util.memoized_property
def sql_compiler(self):
return self.dialect.statement_compiler(self.dialect, None)
@util.memoized_property
def type_compiler(self):
return self.dialect.type_compiler
def construct_params(self, params=None):
return None
def visit_ddl(self, ddl, **kwargs):
# table events can substitute table and schema name
context = ddl.context
if isinstance(ddl.target, schema.Table):
context = context.copy()
preparer = self.preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ""
else:
table, sch = path[-1], path[0]
context.setdefault("table", table)
context.setdefault("schema", sch)
context.setdefault("fullname", preparer.format_table(ddl.target))
return self.sql_compiler.post_process_text(ddl.statement % context)
def visit_create_schema(self, create):
schema = self.preparer.format_schema(create.element)
return "CREATE SCHEMA " + schema
def visit_drop_schema(self, drop):
schema = self.preparer.format_schema(drop.element)
text = "DROP SCHEMA " + schema
if drop.cascade:
text += " CASCADE"
return text
def visit_create_table(self, create):
table = create.element
preparer = self.preparer
text = "\nCREATE "
if table._prefixes:
text += " ".join(table._prefixes) + " "
text += "TABLE " + preparer.format_table(table) + " "
create_table_suffix = self.create_table_suffix(table)
if create_table_suffix:
text += create_table_suffix + " "
text += "("
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for create_column in create.columns:
column = create_column.element
try:
processed = self.process(
create_column, first_pk=column.primary_key and not first_pk
)
if processed is not None:
text += separator
separator = ", \n"
text += "\t" + processed
if column.primary_key:
first_pk = True
except exc.CompileError as ce:
util.raise_(
exc.CompileError(
util.u("(in table '%s', column '%s'): %s")
% (table.description, column.name, ce.args[0])
),
from_=ce,
)
const = self.create_table_constraints(
table,
_include_foreign_key_constraints=create.include_foreign_key_constraints, # noqa
)
if const:
text += separator + "\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
def visit_create_column(self, create, first_pk=False):
column = create.element
if column.system:
return None
text = self.get_column_specification(column, first_pk=first_pk)
const = " ".join(
self.process(constraint) for constraint in column.constraints
)
if const:
text += " " + const
return text
def create_table_constraints(
self, table, _include_foreign_key_constraints=None
):
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
constraints = []
if table.primary_key:
constraints.append(table.primary_key)
all_fkcs = table.foreign_key_constraints
if _include_foreign_key_constraints is not None:
omit_fkcs = all_fkcs.difference(_include_foreign_key_constraints)
else:
omit_fkcs = set()
constraints.extend(
[
c
for c in table._sorted_constraints
if c is not table.primary_key and c not in omit_fkcs
]
)
return ", \n\t".join(
p
for p in (
self.process(constraint)
for constraint in constraints
if (
constraint._create_rule is None
or constraint._create_rule(self)
)
and (
not self.dialect.supports_alter
or not getattr(constraint, "use_alter", False)
)
)
if p is not None
)
def visit_drop_table(self, drop):
return "\nDROP TABLE " + self.preparer.format_table(drop.element)
def visit_drop_view(self, drop):
return "\nDROP VIEW " + self.preparer.format_table(drop.element)
def _verify_index_table(self, index):
if index.table is None:
raise exc.CompileError(
"Index '%s' is not associated " "with any table." % index.name
)
def visit_create_index(
self, create, include_schema=False, include_table_schema=True
):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
if index.name is None:
raise exc.CompileError(
"CREATE INDEX requires that the index have a name"
)
text += "INDEX %s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=include_schema),
preparer.format_table(
index.table, use_schema=include_table_schema
),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
return text
def visit_drop_index(self, drop):
index = drop.element
if index.name is None:
raise exc.CompileError(
"DROP INDEX requires that the index have a name"
)
return "\nDROP INDEX " + self._prepared_index_name(
index, include_schema=True
)
def _prepared_index_name(self, index, include_schema=False):
if index.table is not None:
effective_schema = self.preparer.schema_for_object(index.table)
else:
effective_schema = None
if include_schema and effective_schema:
schema_name = self.preparer.quote_schema(effective_schema)
else:
schema_name = None
index_name = self.preparer.format_index(index)
if schema_name:
index_name = schema_name + "." + index_name
return index_name
def visit_add_constraint(self, create):
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element),
)
def visit_set_table_comment(self, create):
return "COMMENT ON TABLE %s IS %s" % (
self.preparer.format_table(create.element),
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.String()
),
)
def visit_drop_table_comment(self, drop):
return "COMMENT ON TABLE %s IS NULL" % self.preparer.format_table(
drop.element
)
def visit_set_column_comment(self, create):
return "COMMENT ON COLUMN %s IS %s" % (
self.preparer.format_column(
create.element, use_table=True, use_schema=True
),
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.String()
),
)
def visit_drop_column_comment(self, drop):
return "COMMENT ON COLUMN %s IS NULL" % self.preparer.format_column(
drop.element, use_table=True
)
def visit_create_sequence(self, create):
text = "CREATE SEQUENCE %s" % self.preparer.format_sequence(
create.element
)
if create.element.increment is not None:
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
if create.element.minvalue is not None:
text += " MINVALUE %d" % create.element.minvalue
if create.element.maxvalue is not None:
text += " MAXVALUE %d" % create.element.maxvalue
if create.element.nominvalue is not None:
text += " NO MINVALUE"
if create.element.nomaxvalue is not None:
text += " NO MAXVALUE"
if create.element.cache is not None:
text += " CACHE %d" % create.element.cache
if create.element.order is True:
text += " ORDER"
if create.element.cycle is not None:
text += " CYCLE"
return text
def visit_drop_sequence(self, drop):
return "DROP SEQUENCE %s" % self.preparer.format_sequence(drop.element)
def visit_drop_constraint(self, drop):
constraint = drop.element
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
else:
formatted_name = None
if formatted_name is None:
raise exc.CompileError(
"Can't emit DROP CONSTRAINT for constraint %r; "
"it has no name" % drop.element
)
return "ALTER TABLE %s DROP CONSTRAINT %s%s" % (
self.preparer.format_table(drop.element.table),
formatted_name,
drop.cascade and " CASCADE" or "",
)
def get_column_specification(self, column, **kwargs):
colspec = (
self.preparer.format_column(column)
+ " "
+ self.dialect.type_compiler.process(
column.type, type_expression=column
)
)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.computed is not None:
colspec += " " + self.process(column.computed)
if not column.nullable:
colspec += " NOT NULL"
return colspec
def create_table_suffix(self, table):
return ""
def post_create_table(self, table):
return ""
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.server_default.arg, util.string_types):
return self.sql_compiler.render_literal_value(
column.server_default.arg, sqltypes.STRINGTYPE
)
else:
return self.sql_compiler.process(
column.server_default.arg, literal_binds=True
)
else:
return None
def visit_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(
constraint.sqltext, include_table=False, literal_binds=True
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_column_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(
constraint.sqltext, include_table=False, literal_binds=True
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "PRIMARY KEY "
text += "(%s)" % ", ".join(
self.preparer.quote(c.name)
for c in (
constraint.columns_autoinc_first
if constraint._implicit_generated
else constraint.columns
)
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.preparer
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
remote_table = list(constraint.elements)[0].column.table
text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % (
", ".join(
preparer.quote(f.parent.name) for f in constraint.elements
),
self.define_constraint_remote_table(
constraint, remote_table, preparer
),
", ".join(
preparer.quote(f.column.name) for f in constraint.elements
),
)
text += self.define_constraint_match(constraint)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table)
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE (%s)" % (
", ".join(self.preparer.quote(c.name) for c in constraint)
)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % self.preparer.validate_sql_phrase(
constraint.ondelete, FK_ON_DELETE
)
if constraint.onupdate is not None:
text += " ON UPDATE %s" % self.preparer.validate_sql_phrase(
constraint.onupdate, FK_ON_UPDATE
)
return text
def define_constraint_deferrability(self, constraint):
text = ""
if constraint.deferrable is not None:
if constraint.deferrable:
text += " DEFERRABLE"
else:
text += " NOT DEFERRABLE"
if constraint.initially is not None:
text += " INITIALLY %s" % self.preparer.validate_sql_phrase(
constraint.initially, FK_INITIALLY
)
return text
def define_constraint_match(self, constraint):
text = ""
if constraint.match is not None:
text += " MATCH %s" % constraint.match
return text
def visit_computed_column(self, generated):
text = "GENERATED ALWAYS AS (%s)" % self.sql_compiler.process(
generated.sqltext, include_table=False, literal_binds=True
)
if generated.persisted is True:
text += " STORED"
elif generated.persisted is False:
text += " VIRTUAL"
return text
class GenericTypeCompiler(TypeCompiler):
def visit_FLOAT(self, type_, **kw):
return "FLOAT"
def visit_REAL(self, type_, **kw):
return "REAL"
def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % {"precision": type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % {
"precision": type_.precision,
"scale": type_.scale,
}
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return "DECIMAL"
elif type_.scale is None:
return "DECIMAL(%(precision)s)" % {"precision": type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % {
"precision": type_.precision,
"scale": type_.scale,
}
def visit_INTEGER(self, type_, **kw):
return "INTEGER"
def visit_SMALLINT(self, type_, **kw):
return "SMALLINT"
def visit_BIGINT(self, type_, **kw):
return "BIGINT"
def visit_TIMESTAMP(self, type_, **kw):
return "TIMESTAMP"
def visit_DATETIME(self, type_, **kw):
return "DATETIME"
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
return "TIME"
def visit_CLOB(self, type_, **kw):
return "CLOB"
def visit_NCLOB(self, type_, **kw):
return "NCLOB"
def _render_string_type(self, type_, name):
text = name
if type_.length:
text += "(%d)" % type_.length
if type_.collation:
text += ' COLLATE "%s"' % type_.collation
return text
def visit_CHAR(self, type_, **kw):
return self._render_string_type(type_, "CHAR")
def visit_NCHAR(self, type_, **kw):
return self._render_string_type(type_, "NCHAR")
def visit_VARCHAR(self, type_, **kw):
return self._render_string_type(type_, "VARCHAR")
def visit_NVARCHAR(self, type_, **kw):
return self._render_string_type(type_, "NVARCHAR")
def visit_TEXT(self, type_, **kw):
return self._render_string_type(type_, "TEXT")
def visit_BLOB(self, type_, **kw):
return "BLOB"
def visit_BINARY(self, type_, **kw):
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_, **kw):
return "BOOLEAN"
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_, **kw)
def visit_boolean(self, type_, **kw):
return self.visit_BOOLEAN(type_, **kw)
def visit_time(self, type_, **kw):
return self.visit_TIME(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_DATETIME(type_, **kw)
def visit_date(self, type_, **kw):
return self.visit_DATE(type_, **kw)
def visit_big_integer(self, type_, **kw):
return self.visit_BIGINT(type_, **kw)
def visit_small_integer(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_integer(self, type_, **kw):
return self.visit_INTEGER(type_, **kw)
def visit_real(self, type_, **kw):
return self.visit_REAL(type_, **kw)
def visit_float(self, type_, **kw):
return self.visit_FLOAT(type_, **kw)
def visit_numeric(self, type_, **kw):
return self.visit_NUMERIC(type_, **kw)
def visit_string(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_unicode(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_enum(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_null(self, type_, **kw):
raise exc.CompileError(
"Can't generate DDL for %r; "
"did you forget to specify a "
"type on this Column?" % type_
)
def visit_type_decorator(self, type_, **kw):
return self.process(type_.type_engine(self.dialect), **kw)
def visit_user_defined(self, type_, **kw):
return type_.get_col_spec(**kw)
class StrSQLTypeCompiler(GenericTypeCompiler):
def __getattr__(self, key):
if key.startswith("visit_"):
return self._visit_unknown
else:
raise AttributeError(key)
def _visit_unknown(self, type_, **kw):
return "%s" % type_.__class__.__name__
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
schema_for_object = schema._schema_getter(None)
def __init__(
self,
dialect,
initial_quote='"',
final_quote=None,
escape_quote='"',
quote_case_sensitive_collations=True,
omit_schema=False,
):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to
`initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.escape_quote = escape_quote
self.escape_to_quote = self.escape_quote * 2
self.omit_schema = omit_schema
self.quote_case_sensitive_collations = quote_case_sensitive_collations
self._strings = {}
self._double_percents = self.dialect.paramstyle in (
"format",
"pyformat",
)
def _with_schema_translate(self, schema_translate_map):
prep = self.__class__.__new__(self.__class__)
prep.__dict__.update(self.__dict__)
prep.schema_for_object = schema._schema_getter(schema_translate_map)
return prep
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
value = value.replace(self.escape_quote, self.escape_to_quote)
if self._double_percents:
value = value.replace("%", "%%")
return value
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace(self.escape_to_quote, self.escape_quote)
def validate_sql_phrase(self, element, reg):
"""keyword sequence filter.
a filter for elements that are intended to represent keyword sequences,
such as "INITIALLY", "INITIALLY DEFERRED", etc. no special characters
should be present.
.. versionadded:: 1.3
"""
if element is not None and not reg.match(element):
raise exc.CompileError(
"Unexpected SQL phrase: %r (matching against %r)"
% (element, reg.pattern)
)
return element
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return (
self.initial_quote
+ self._escape_identifier(value)
+ self.final_quote
)
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (
lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(util.text_type(value))
or (lc_value != value)
)
def _requires_quotes_illegal_chars(self, value):
"""Return True if the given identifier requires quoting, but
not taking case convention into account."""
return not self.legal_characters.match(util.text_type(value))
def quote_schema(self, schema, force=None):
"""Conditionally quote a schema name.
The name is quoted if it is a reserved word, contains quote-necessary
characters, or is an instance of :class:`.quoted_name` which includes
``quote`` set to ``True``.
Subclasses can override this to provide database-dependent
quoting behavior for schema names.
:param schema: string schema name
:param force: unused
.. deprecated:: 0.9
The :paramref:`.IdentifierPreparer.quote_schema.force`
parameter is deprecated and will be removed in a future
release. This flag has no effect on the behavior of the
:meth:`.IdentifierPreparer.quote` method; please refer to
:class:`.quoted_name`.
"""
if force is not None:
# not using the util.deprecated_params() decorator in this
# case because of the additional function call overhead on this
# very performance-critical spot.
util.warn_deprecated(
"The IdentifierPreparer.quote_schema.force parameter is "
"deprecated and will be removed in a future release. This "
"flag has no effect on the behavior of the "
"IdentifierPreparer.quote method; please refer to "
"quoted_name()."
)
return self.quote(schema)
def quote(self, ident, force=None):
"""Conditionally quote an identfier.
The identifier is quoted if it is a reserved word, contains
quote-necessary characters, or is an instance of
:class:`.quoted_name` which includes ``quote`` set to ``True``.
Subclasses can override this to provide database-dependent
quoting behavior for identifier names.
:param ident: string identifier
:param force: unused
.. deprecated:: 0.9
The :paramref:`.IdentifierPreparer.quote.force`
parameter is deprecated and will be removed in a future
release. This flag has no effect on the behavior of the
:meth:`.IdentifierPreparer.quote` method; please refer to
:class:`.quoted_name`.
"""
if force is not None:
# not using the util.deprecated_params() decorator in this
# case because of the additional function call overhead on this
# very performance-critical spot.
util.warn_deprecated(
"The IdentifierPreparer.quote.force parameter is "
"deprecated and will be removed in a future release. This "
"flag has no effect on the behavior of the "
"IdentifierPreparer.quote method; please refer to "
"quoted_name()."
)
force = getattr(ident, "quote", None)
if force is None:
if ident in self._strings:
return self._strings[ident]
else:
if self._requires_quotes(ident):
self._strings[ident] = self.quote_identifier(ident)
else:
self._strings[ident] = ident
return self._strings[ident]
elif force:
return self.quote_identifier(ident)
else:
return ident
def format_collation(self, collation_name):
if self.quote_case_sensitive_collations:
return self.quote(collation_name)
else:
return collation_name
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name)
effective_schema = self.schema_for_object(sequence)
if (
not self.omit_schema
and use_schema
and effective_schema is not None
):
name = self.quote_schema(effective_schema) + "." + name
return name
def format_label(self, label, name=None):
return self.quote(name or label.name)
def format_alias(self, alias, name=None):
return self.quote(name or alias.name)
def format_savepoint(self, savepoint, name=None):
# Running the savepoint name through quoting is unnecessary
# for all known dialects. This is here to support potential
# third party use cases
ident = name or savepoint.ident
if self._requires_quotes(ident):
ident = self.quote_identifier(ident)
return ident
@util.dependencies("sqlalchemy.sql.naming")
def format_constraint(self, naming, constraint):
if isinstance(constraint.name, elements._defer_name):
name = naming._constraint_name_for_table(
constraint, constraint.table
)
if name is None:
if isinstance(constraint.name, elements._defer_none_name):
return None
else:
name = constraint.name
else:
name = constraint.name
if isinstance(name, elements._truncated_label):
if constraint.__visit_name__ == "index":
max_ = (
self.dialect.max_index_name_length
or self.dialect.max_identifier_length
)
else:
max_ = self.dialect.max_identifier_length
if len(name) > max_:
name = name[0 : max_ - 8] + "_" + util.md5_hex(name)[-4:]
else:
self.dialect.validate_identifier(name)
return self.quote(name)
def format_index(self, index):
return self.format_constraint(index)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(name)
effective_schema = self.schema_for_object(table)
if not self.omit_schema and use_schema and effective_schema:
result = self.quote_schema(effective_schema) + "." + result
return result
def format_schema(self, name):
"""Prepare a quoted schema name."""
return self.quote(name)
def format_column(
self,
column,
use_table=False,
name=None,
table_name=None,
use_schema=False,
):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if not getattr(column, "is_literal", False):
if use_table:
return (
self.format_table(
column.table, use_schema=use_schema, name=table_name
)
+ "."
+ self.quote(name)
)
else:
return self.quote(name)
else:
# literal textual elements get stuck into ColumnClause a lot,
# which shouldn't get quoted
if use_table:
return (
self.format_table(
column.table, use_schema=use_schema, name=table_name
)
+ "."
+ name
)
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
effective_schema = self.schema_for_object(table)
if not self.omit_schema and use_schema and effective_schema:
return (
self.quote_schema(effective_schema),
self.format_table(table, use_schema=False),
)
else:
return (self.format_table(table, use_schema=False),)
@util.memoized_property
def _r_identifiers(self):
initial, final, escaped_final = [
re.escape(s)
for s in (
self.initial_quote,
self.final_quote,
self._escape_identifier(self.final_quote),
)
]
r = re.compile(
r"(?:"
r"(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s"
r"|([^\.]+))(?=\.|$))+"
% {"initial": initial, "final": final, "escaped": escaped_final}
)
return r
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
r = self._r_identifiers
return [
self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]
]
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/sql/naming.py
|
# sqlalchemy/naming.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Establish constraint and index naming conventions.
"""
import re
from .elements import _defer_name
from .elements import _defer_none_name
from .elements import conv
from .schema import CheckConstraint
from .schema import Column
from .schema import Constraint
from .schema import ForeignKeyConstraint
from .schema import Index
from .schema import PrimaryKeyConstraint
from .schema import Table
from .schema import UniqueConstraint
from .. import event
from .. import events # noqa
from .. import exc
class ConventionDict(object):
def __init__(self, const, table, convention):
self.const = const
self._is_fk = isinstance(const, ForeignKeyConstraint)
self.table = table
self.convention = convention
self._const_name = const.name
def _key_table_name(self):
return self.table.name
def _column_X(self, idx):
if self._is_fk:
fk = self.const.elements[idx]
return fk.parent
else:
return list(self.const.columns)[idx]
def _key_constraint_name(self):
if isinstance(self._const_name, (type(None), _defer_none_name)):
raise exc.InvalidRequestError(
"Naming convention including "
"%(constraint_name)s token requires that "
"constraint is explicitly named."
)
if not isinstance(self._const_name, conv):
self.const.name = None
return self._const_name
def _key_column_X_key(self, idx):
# note this method was missing before
# [ticket:3989], meaning tokens like ``%(column_0_key)s`` weren't
# working even though documented.
return self._column_X(idx).key
def _key_column_X_name(self, idx):
return self._column_X(idx).name
def _key_column_X_label(self, idx):
return self._column_X(idx)._ddl_label
def _key_referred_table_name(self):
fk = self.const.elements[0]
refs = fk.target_fullname.split(".")
if len(refs) == 3:
refschema, reftable, refcol = refs
else:
reftable, refcol = refs
return reftable
def _key_referred_column_X_name(self, idx):
fk = self.const.elements[idx]
# note that before [ticket:3989], this method was returning
# the specification for the :class:`.ForeignKey` itself, which normally
# would be using the ``.key`` of the column, not the name.
return fk.column.name
def __getitem__(self, key):
if key in self.convention:
return self.convention[key](self.const, self.table)
elif hasattr(self, "_key_%s" % key):
return getattr(self, "_key_%s" % key)()
else:
col_template = re.match(r".*_?column_(\d+)(_?N)?_.+", key)
if col_template:
idx = col_template.group(1)
multiples = col_template.group(2)
if multiples:
if self._is_fk:
elems = self.const.elements
else:
elems = list(self.const.columns)
tokens = []
for idx, elem in enumerate(elems):
attr = "_key_" + key.replace("0" + multiples, "X")
try:
tokens.append(getattr(self, attr)(idx))
except AttributeError:
raise KeyError(key)
sep = "_" if multiples.startswith("_") else ""
return sep.join(tokens)
else:
attr = "_key_" + key.replace(idx, "X")
idx = int(idx)
if hasattr(self, attr):
return getattr(self, attr)(idx)
raise KeyError(key)
_prefix_dict = {
Index: "ix",
PrimaryKeyConstraint: "pk",
CheckConstraint: "ck",
UniqueConstraint: "uq",
ForeignKeyConstraint: "fk",
}
def _get_convention(dict_, key):
for super_ in key.__mro__:
if super_ in _prefix_dict and _prefix_dict[super_] in dict_:
return dict_[_prefix_dict[super_]]
elif super_ in dict_:
return dict_[super_]
else:
return None
def _constraint_name_for_table(const, table):
metadata = table.metadata
convention = _get_convention(metadata.naming_convention, type(const))
if isinstance(const.name, conv):
return const.name
elif (
convention is not None
and not isinstance(const.name, conv)
and (
const.name is None
or "constraint_name" in convention
or isinstance(const.name, _defer_name)
)
):
return conv(
convention
% ConventionDict(const, table, metadata.naming_convention)
)
elif isinstance(convention, _defer_none_name):
return None
@event.listens_for(Constraint, "after_parent_attach")
@event.listens_for(Index, "after_parent_attach")
def _constraint_name(const, table):
if isinstance(table, Column):
# for column-attached constraint, set another event
# to link the column attached to the table as this constraint
# associated with the table.
event.listen(
table,
"after_parent_attach",
lambda col, table: _constraint_name(const, table),
)
elif isinstance(table, Table):
if isinstance(const.name, (conv, _defer_name)):
return
newname = _constraint_name_for_table(const, table)
if newname is not None:
const.name = newname
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/sql/util.py
|
# sql/util.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""High level utilities which build upon other modules here.
"""
from collections import deque
from itertools import chain
from . import operators
from . import visitors
from .annotation import _deep_annotate # noqa
from .annotation import _deep_deannotate # noqa
from .annotation import _shallow_annotate # noqa
from .base import _from_objects
from .base import ColumnSet
from .ddl import sort_tables # noqa
from .elements import _expand_cloned
from .elements import _find_columns # noqa
from .elements import _label_reference
from .elements import _textual_label_reference
from .elements import BindParameter
from .elements import ColumnClause
from .elements import ColumnElement
from .elements import Null
from .elements import UnaryExpression
from .schema import Column
from .selectable import Alias
from .selectable import FromClause
from .selectable import FromGrouping
from .selectable import Join
from .selectable import ScalarSelect
from .selectable import SelectBase
from .selectable import TableClause
from .. import exc
from .. import util
join_condition = util.langhelpers.public_factory(
Join._join_condition, ".sql.util.join_condition"
)
def find_join_source(clauses, join_to):
"""Given a list of FROM clauses and a selectable,
return the first index and element from the list of
clauses which can be joined against the selectable. returns
None, None if no match is found.
e.g.::
clause1 = table1.join(table2)
clause2 = table4.join(table5)
join_to = table2.join(table3)
find_join_source([clause1, clause2], join_to) == clause1
"""
selectables = list(_from_objects(join_to))
idx = []
for i, f in enumerate(clauses):
for s in selectables:
if f.is_derived_from(s):
idx.append(i)
return idx
def find_left_clause_that_matches_given(clauses, join_from):
"""Given a list of FROM clauses and a selectable,
return the indexes from the list of
clauses which is derived from the selectable.
"""
selectables = list(_from_objects(join_from))
liberal_idx = []
for i, f in enumerate(clauses):
for s in selectables:
# basic check, if f is derived from s.
# this can be joins containing a table, or an aliased table
# or select statement matching to a table. This check
# will match a table to a selectable that is adapted from
# that table. With Query, this suits the case where a join
# is being made to an adapted entity
if f.is_derived_from(s):
liberal_idx.append(i)
break
# in an extremely small set of use cases, a join is being made where
# there are multiple FROM clauses where our target table is represented
# in more than one, such as embedded or similar. in this case, do
# another pass where we try to get a more exact match where we aren't
# looking at adaption relationships.
if len(liberal_idx) > 1:
conservative_idx = []
for idx in liberal_idx:
f = clauses[idx]
for s in selectables:
if set(surface_selectables(f)).intersection(
surface_selectables(s)
):
conservative_idx.append(idx)
break
if conservative_idx:
return conservative_idx
return liberal_idx
def find_left_clause_to_join_from(clauses, join_to, onclause):
"""Given a list of FROM clauses, a selectable,
and optional ON clause, return a list of integer indexes from the
clauses list indicating the clauses that can be joined from.
The presence of an "onclause" indicates that at least one clause can
definitely be joined from; if the list of clauses is of length one
and the onclause is given, returns that index. If the list of clauses
is more than length one, and the onclause is given, attempts to locate
which clauses contain the same columns.
"""
idx = []
selectables = set(_from_objects(join_to))
# if we are given more than one target clause to join
# from, use the onclause to provide a more specific answer.
# otherwise, don't try to limit, after all, "ON TRUE" is a valid
# on clause
if len(clauses) > 1 and onclause is not None:
resolve_ambiguity = True
cols_in_onclause = _find_columns(onclause)
else:
resolve_ambiguity = False
cols_in_onclause = None
for i, f in enumerate(clauses):
for s in selectables.difference([f]):
if resolve_ambiguity:
if set(f.c).union(s.c).issuperset(cols_in_onclause):
idx.append(i)
break
elif Join._can_join(f, s) or onclause is not None:
idx.append(i)
break
if len(idx) > 1:
# this is the same "hide froms" logic from
# Selectable._get_display_froms
toremove = set(
chain(*[_expand_cloned(f._hide_froms) for f in clauses])
)
idx = [i for i in idx if clauses[i] not in toremove]
# onclause was given and none of them resolved, so assume
# all indexes can match
if not idx and onclause is not None:
return range(len(clauses))
else:
return idx
def visit_binary_product(fn, expr):
"""Produce a traversal of the given expression, delivering
column comparisons to the given function.
The function is of the form::
def my_fn(binary, left, right)
For each binary expression located which has a
comparison operator, the product of "left" and
"right" will be delivered to that function,
in terms of that binary.
Hence an expression like::
and_(
(a + b) == q + func.sum(e + f),
j == r
)
would have the traversal::
a <eq> q
a <eq> e
a <eq> f
b <eq> q
b <eq> e
b <eq> f
j <eq> r
That is, every combination of "left" and
"right" that doesn't further contain
a binary comparison is passed as pairs.
"""
stack = []
def visit(element):
if isinstance(element, ScalarSelect):
# we don't want to dig into correlated subqueries,
# those are just column elements by themselves
yield element
elif element.__visit_name__ == "binary" and operators.is_comparison(
element.operator
):
stack.insert(0, element)
for l in visit(element.left):
for r in visit(element.right):
fn(stack[0], l, r)
stack.pop(0)
for elem in element.get_children():
visit(elem)
else:
if isinstance(element, ColumnClause):
yield element
for elem in element.get_children():
for e in visit(elem):
yield e
list(visit(expr))
visit = None # remove gc cycles
def find_tables(
clause,
check_columns=False,
include_aliases=False,
include_joins=False,
include_selects=False,
include_crud=False,
):
"""locate Table objects within the given expression."""
tables = []
_visitors = {}
if include_selects:
_visitors["select"] = _visitors["compound_select"] = tables.append
if include_joins:
_visitors["join"] = tables.append
if include_aliases:
_visitors["alias"] = tables.append
if include_crud:
_visitors["insert"] = _visitors["update"] = _visitors[
"delete"
] = lambda ent: tables.append(ent.table)
if check_columns:
def visit_column(column):
tables.append(column.table)
_visitors["column"] = visit_column
_visitors["table"] = tables.append
visitors.traverse(clause, {"column_collections": False}, _visitors)
return tables
def unwrap_order_by(clause):
"""Break up an 'order by' expression into individual column-expressions,
without DESC/ASC/NULLS FIRST/NULLS LAST"""
cols = util.column_set()
result = []
stack = deque([clause])
while stack:
t = stack.popleft()
if isinstance(t, ColumnElement) and (
not isinstance(t, UnaryExpression)
or not operators.is_ordering_modifier(t.modifier)
):
if isinstance(t, _label_reference):
t = t.element
if isinstance(t, (_textual_label_reference)):
continue
if t not in cols:
cols.add(t)
result.append(t)
else:
for c in t.get_children():
stack.append(c)
return result
def unwrap_label_reference(element):
def replace(elem):
if isinstance(elem, (_label_reference, _textual_label_reference)):
return elem.element
return visitors.replacement_traverse(element, {}, replace)
def expand_column_list_from_order_by(collist, order_by):
"""Given the columns clause and ORDER BY of a selectable,
return a list of column expressions that can be added to the collist
corresponding to the ORDER BY, without repeating those already
in the collist.
"""
cols_already_present = set(
[
col.element if col._order_by_label_element is not None else col
for col in collist
]
)
return [
col
for col in chain(*[unwrap_order_by(o) for o in order_by])
if col not in cols_already_present
]
def clause_is_present(clause, search):
"""Given a target clause and a second to search within, return True
if the target is plainly present in the search without any
subqueries or aliases involved.
Basically descends through Joins.
"""
for elem in surface_selectables(search):
if clause == elem: # use == here so that Annotated's compare
return True
else:
return False
def surface_selectables(clause):
stack = [clause]
while stack:
elem = stack.pop()
yield elem
if isinstance(elem, Join):
stack.extend((elem.left, elem.right))
elif isinstance(elem, FromGrouping):
stack.append(elem.element)
def surface_selectables_only(clause):
stack = [clause]
while stack:
elem = stack.pop()
if isinstance(elem, (TableClause, Alias)):
yield elem
if isinstance(elem, Join):
stack.extend((elem.left, elem.right))
elif isinstance(elem, FromGrouping):
stack.append(elem.element)
elif isinstance(elem, ColumnClause):
stack.append(elem.table)
def surface_column_elements(clause, include_scalar_selects=True):
"""traverse and yield only outer-exposed column elements, such as would
be addressable in the WHERE clause of a SELECT if this element were
in the columns clause."""
filter_ = (FromGrouping,)
if not include_scalar_selects:
filter_ += (SelectBase,)
stack = deque([clause])
while stack:
elem = stack.popleft()
yield elem
for sub in elem.get_children():
if isinstance(sub, filter_):
continue
stack.append(sub)
def selectables_overlap(left, right):
"""Return True if left/right have some overlapping selectable"""
return bool(
set(surface_selectables(left)).intersection(surface_selectables(right))
)
def bind_values(clause):
"""Return an ordered list of "bound" values in the given clause.
E.g.::
>>> expr = and_(
... table.c.foo==5, table.c.foo==7
... )
>>> bind_values(expr)
[5, 7]
"""
v = []
def visit_bindparam(bind):
v.append(bind.effective_value)
visitors.traverse(clause, {}, {"bindparam": visit_bindparam})
return v
def _quote_ddl_expr(element):
if isinstance(element, util.string_types):
element = element.replace("'", "''")
return "'%s'" % element
else:
return repr(element)
class _repr_base(object):
_LIST = 0
_TUPLE = 1
_DICT = 2
__slots__ = ("max_chars",)
def trunc(self, value):
rep = repr(value)
lenrep = len(rep)
if lenrep > self.max_chars:
segment_length = self.max_chars // 2
rep = (
rep[0:segment_length]
+ (
" ... (%d characters truncated) ... "
% (lenrep - self.max_chars)
)
+ rep[-segment_length:]
)
return rep
class _repr_row(_repr_base):
"""Provide a string view of a row."""
__slots__ = ("row",)
def __init__(self, row, max_chars=300):
self.row = row
self.max_chars = max_chars
def __repr__(self):
trunc = self.trunc
return "(%s%s)" % (
", ".join(trunc(value) for value in self.row),
"," if len(self.row) == 1 else "",
)
class _repr_params(_repr_base):
"""Provide a string view of bound parameters.
Truncates display to a given numnber of 'multi' parameter sets,
as well as long values to a given number of characters.
"""
__slots__ = "params", "batches", "ismulti"
def __init__(self, params, batches, max_chars=300, ismulti=None):
self.params = params
self.ismulti = ismulti
self.batches = batches
self.max_chars = max_chars
def __repr__(self):
if self.ismulti is None:
return self.trunc(self.params)
if isinstance(self.params, list):
typ = self._LIST
elif isinstance(self.params, tuple):
typ = self._TUPLE
elif isinstance(self.params, dict):
typ = self._DICT
else:
return self.trunc(self.params)
if self.ismulti and len(self.params) > self.batches:
msg = " ... displaying %i of %i total bound parameter sets ... "
return " ".join(
(
self._repr_multi(self.params[: self.batches - 2], typ)[
0:-1
],
msg % (self.batches, len(self.params)),
self._repr_multi(self.params[-2:], typ)[1:],
)
)
elif self.ismulti:
return self._repr_multi(self.params, typ)
else:
return self._repr_params(self.params, typ)
def _repr_multi(self, multi_params, typ):
if multi_params:
if isinstance(multi_params[0], list):
elem_type = self._LIST
elif isinstance(multi_params[0], tuple):
elem_type = self._TUPLE
elif isinstance(multi_params[0], dict):
elem_type = self._DICT
else:
assert False, "Unknown parameter type %s" % (
type(multi_params[0])
)
elements = ", ".join(
self._repr_params(params, elem_type) for params in multi_params
)
else:
elements = ""
if typ == self._LIST:
return "[%s]" % elements
else:
return "(%s)" % elements
def _repr_params(self, params, typ):
trunc = self.trunc
if typ is self._DICT:
return "{%s}" % (
", ".join(
"%r: %s" % (key, trunc(value))
for key, value in params.items()
)
)
elif typ is self._TUPLE:
return "(%s%s)" % (
", ".join(trunc(value) for value in params),
"," if len(params) == 1 else "",
)
else:
return "[%s]" % (", ".join(trunc(value) for value in params))
def adapt_criterion_to_null(crit, nulls):
"""given criterion containing bind params, convert selected elements
to IS NULL.
"""
def visit_binary(binary):
if (
isinstance(binary.left, BindParameter)
and binary.left._identifying_key in nulls
):
# reverse order if the NULL is on the left side
binary.left = binary.right
binary.right = Null()
binary.operator = operators.is_
binary.negate = operators.isnot
elif (
isinstance(binary.right, BindParameter)
and binary.right._identifying_key in nulls
):
binary.right = Null()
binary.operator = operators.is_
binary.negate = operators.isnot
return visitors.cloned_traverse(crit, {}, {"binary": visit_binary})
def splice_joins(left, right, stop_on=None):
if left is None:
return right
stack = [(right, None)]
adapter = ClauseAdapter(left)
ret = None
while stack:
(right, prevright) = stack.pop()
if isinstance(right, Join) and right is not stop_on:
right = right._clone()
right._reset_exported()
right.onclause = adapter.traverse(right.onclause)
stack.append((right.left, right))
else:
right = adapter.traverse(right)
if prevright is not None:
prevright.left = right
if ret is None:
ret = right
return ret
def reduce_columns(columns, *clauses, **kw):
r"""given a list of columns, return a 'reduced' set based on natural
equivalents.
the set is reduced to the smallest list of columns which have no natural
equivalent present in the list. A "natural equivalent" means that two
columns will ultimately represent the same value because they are related
by a foreign key.
\*clauses is an optional list of join clauses which will be traversed
to further identify columns that are "equivalent".
\**kw may specify 'ignore_nonexistent_tables' to ignore foreign keys
whose tables are not yet configured, or columns that aren't yet present.
This function is primarily used to determine the most minimal "primary
key" from a selectable, by reducing the set of primary key columns present
in the selectable to just those that are not repeated.
"""
ignore_nonexistent_tables = kw.pop("ignore_nonexistent_tables", False)
only_synonyms = kw.pop("only_synonyms", False)
columns = util.ordered_column_set(columns)
omit = util.column_set()
for col in columns:
for fk in chain(*[c.foreign_keys for c in col.proxy_set]):
for c in columns:
if c is col:
continue
try:
fk_col = fk.column
except exc.NoReferencedColumnError:
# TODO: add specific coverage here
# to test/sql/test_selectable ReduceTest
if ignore_nonexistent_tables:
continue
else:
raise
except exc.NoReferencedTableError:
# TODO: add specific coverage here
# to test/sql/test_selectable ReduceTest
if ignore_nonexistent_tables:
continue
else:
raise
if fk_col.shares_lineage(c) and (
not only_synonyms or c.name == col.name
):
omit.add(col)
break
if clauses:
def visit_binary(binary):
if binary.operator == operators.eq:
cols = util.column_set(
chain(*[c.proxy_set for c in columns.difference(omit)])
)
if binary.left in cols and binary.right in cols:
for c in reversed(columns):
if c.shares_lineage(binary.right) and (
not only_synonyms or c.name == binary.left.name
):
omit.add(c)
break
for clause in clauses:
if clause is not None:
visitors.traverse(clause, {}, {"binary": visit_binary})
return ColumnSet(columns.difference(omit))
def criterion_as_pairs(
expression,
consider_as_foreign_keys=None,
consider_as_referenced_keys=None,
any_operator=False,
):
"""traverse an expression and locate binary criterion pairs."""
if consider_as_foreign_keys and consider_as_referenced_keys:
raise exc.ArgumentError(
"Can only specify one of "
"'consider_as_foreign_keys' or "
"'consider_as_referenced_keys'"
)
def col_is(a, b):
# return a is b
return a.compare(b)
def visit_binary(binary):
if not any_operator and binary.operator is not operators.eq:
return
if not isinstance(binary.left, ColumnElement) or not isinstance(
binary.right, ColumnElement
):
return
if consider_as_foreign_keys:
if binary.left in consider_as_foreign_keys and (
col_is(binary.right, binary.left)
or binary.right not in consider_as_foreign_keys
):
pairs.append((binary.right, binary.left))
elif binary.right in consider_as_foreign_keys and (
col_is(binary.left, binary.right)
or binary.left not in consider_as_foreign_keys
):
pairs.append((binary.left, binary.right))
elif consider_as_referenced_keys:
if binary.left in consider_as_referenced_keys and (
col_is(binary.right, binary.left)
or binary.right not in consider_as_referenced_keys
):
pairs.append((binary.left, binary.right))
elif binary.right in consider_as_referenced_keys and (
col_is(binary.left, binary.right)
or binary.left not in consider_as_referenced_keys
):
pairs.append((binary.right, binary.left))
else:
if isinstance(binary.left, Column) and isinstance(
binary.right, Column
):
if binary.left.references(binary.right):
pairs.append((binary.right, binary.left))
elif binary.right.references(binary.left):
pairs.append((binary.left, binary.right))
pairs = []
visitors.traverse(expression, {}, {"binary": visit_binary})
return pairs
class ClauseAdapter(visitors.ReplacingCloningVisitor):
"""Clones and modifies clauses based on column correspondence.
E.g.::
table1 = Table('sometable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
table2 = Table('someothertable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
condition = table1.c.col1 == table2.c.col1
make an alias of table1::
s = table1.alias('foo')
calling ``ClauseAdapter(s).traverse(condition)`` converts
condition to read::
s.c.col1 == table2.c.col1
"""
def __init__(
self,
selectable,
equivalents=None,
include_fn=None,
exclude_fn=None,
adapt_on_names=False,
anonymize_labels=False,
):
self.__traverse_options__ = {
"stop_on": [selectable],
"anonymize_labels": anonymize_labels,
}
self.selectable = selectable
self.include_fn = include_fn
self.exclude_fn = exclude_fn
self.equivalents = util.column_dict(equivalents or {})
self.adapt_on_names = adapt_on_names
def _corresponding_column(
self, col, require_embedded, _seen=util.EMPTY_SET
):
newcol = self.selectable.corresponding_column(
col, require_embedded=require_embedded
)
if newcol is None and col in self.equivalents and col not in _seen:
for equiv in self.equivalents[col]:
newcol = self._corresponding_column(
equiv,
require_embedded=require_embedded,
_seen=_seen.union([col]),
)
if newcol is not None:
return newcol
if self.adapt_on_names and newcol is None:
newcol = self.selectable.c.get(col.name)
return newcol
def replace(self, col):
if isinstance(col, FromClause) and self.selectable.is_derived_from(
col
):
return self.selectable
elif not isinstance(col, ColumnElement):
return None
elif self.include_fn and not self.include_fn(col):
return None
elif self.exclude_fn and self.exclude_fn(col):
return None
else:
return self._corresponding_column(col, True)
class ColumnAdapter(ClauseAdapter):
"""Extends ClauseAdapter with extra utility functions.
Key aspects of ColumnAdapter include:
* Expressions that are adapted are stored in a persistent
.columns collection; so that an expression E adapted into
an expression E1, will return the same object E1 when adapted
a second time. This is important in particular for things like
Label objects that are anonymized, so that the ColumnAdapter can
be used to present a consistent "adapted" view of things.
* Exclusion of items from the persistent collection based on
include/exclude rules, but also independent of hash identity.
This because "annotated" items all have the same hash identity as their
parent.
* "wrapping" capability is added, so that the replacement of an expression
E can proceed through a series of adapters. This differs from the
visitor's "chaining" feature in that the resulting object is passed
through all replacing functions unconditionally, rather than stopping
at the first one that returns non-None.
* An adapt_required option, used by eager loading to indicate that
We don't trust a result row column that is not translated.
This is to prevent a column from being interpreted as that
of the child row in a self-referential scenario, see
inheritance/test_basic.py->EagerTargetingTest.test_adapt_stringency
"""
def __init__(
self,
selectable,
equivalents=None,
adapt_required=False,
include_fn=None,
exclude_fn=None,
adapt_on_names=False,
allow_label_resolve=True,
anonymize_labels=False,
):
ClauseAdapter.__init__(
self,
selectable,
equivalents,
include_fn=include_fn,
exclude_fn=exclude_fn,
adapt_on_names=adapt_on_names,
anonymize_labels=anonymize_labels,
)
self.columns = util.WeakPopulateDict(self._locate_col)
if self.include_fn or self.exclude_fn:
self.columns = self._IncludeExcludeMapping(self, self.columns)
self.adapt_required = adapt_required
self.allow_label_resolve = allow_label_resolve
self._wrap = None
class _IncludeExcludeMapping(object):
def __init__(self, parent, columns):
self.parent = parent
self.columns = columns
def __getitem__(self, key):
if (
self.parent.include_fn and not self.parent.include_fn(key)
) or (self.parent.exclude_fn and self.parent.exclude_fn(key)):
if self.parent._wrap:
return self.parent._wrap.columns[key]
else:
return key
return self.columns[key]
def wrap(self, adapter):
ac = self.__class__.__new__(self.__class__)
ac.__dict__.update(self.__dict__)
ac._wrap = adapter
ac.columns = util.WeakPopulateDict(ac._locate_col)
if ac.include_fn or ac.exclude_fn:
ac.columns = self._IncludeExcludeMapping(ac, ac.columns)
return ac
def traverse(self, obj):
return self.columns[obj]
adapt_clause = traverse
adapt_list = ClauseAdapter.copy_and_process
def _locate_col(self, col):
c = ClauseAdapter.traverse(self, col)
if self._wrap:
c2 = self._wrap._locate_col(c)
if c2 is not None:
c = c2
if self.adapt_required and c is col:
return None
c._allow_label_resolve = self.allow_label_resolve
return c
def __getstate__(self):
d = self.__dict__.copy()
del d["columns"]
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.columns = util.WeakPopulateDict(self._locate_col)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/sql/visitors.py
|
# sql/visitors.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Visitor/traversal interface and library functions.
SQLAlchemy schema and expression constructs rely on a Python-centric
version of the classic "visitor" pattern as the primary way in which
they apply functionality. The most common use of this pattern
is statement compilation, where individual expression classes match
up to rendering methods that produce a string result. Beyond this,
the visitor system is also used to inspect expressions for various
information and patterns, as well as for the purposes of applying
transformations to expressions.
Examples of how the visit system is used can be seen in the source code
of for example the ``sqlalchemy.sql.util`` and the ``sqlalchemy.sql.compiler``
modules. Some background on clause adaption is also at
http://techspot.zzzeek.org/2008/01/23/expression-transformations/ .
"""
from collections import deque
import operator
from .. import exc
from .. import util
__all__ = [
"VisitableType",
"Visitable",
"ClauseVisitor",
"CloningVisitor",
"ReplacingCloningVisitor",
"iterate",
"iterate_depthfirst",
"traverse_using",
"traverse",
"traverse_depthfirst",
"cloned_traverse",
"replacement_traverse",
]
class VisitableType(type):
"""Metaclass which assigns a ``_compiler_dispatch`` method to classes
having a ``__visit_name__`` attribute.
The ``_compiler_dispatch`` attribute becomes an instance method which
looks approximately like the following::
def _compiler_dispatch (self, visitor, **kw):
'''Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.'''
visit_attr = 'visit_%s' % self.__visit_name__
return getattr(visitor, visit_attr)(self, **kw)
Classes having no ``__visit_name__`` attribute will remain unaffected.
"""
def __init__(cls, clsname, bases, clsdict):
if clsname != "Visitable" and hasattr(cls, "__visit_name__"):
_generate_dispatch(cls)
super(VisitableType, cls).__init__(clsname, bases, clsdict)
def _generate_dispatch(cls):
"""Return an optimized visit dispatch function for the cls
for use by the compiler.
"""
if "__visit_name__" in cls.__dict__:
visit_name = cls.__visit_name__
if isinstance(visit_name, util.compat.string_types):
# There is an optimization opportunity here because the
# the string name of the class's __visit_name__ is known at
# this early stage (import time) so it can be pre-constructed.
getter = operator.attrgetter("visit_%s" % visit_name)
def _compiler_dispatch(self, visitor, **kw):
try:
meth = getter(visitor)
except AttributeError as err:
util.raise_(
exc.UnsupportedCompilationError(visitor, cls),
replace_context=err,
)
else:
return meth(self, **kw)
else:
# The optimization opportunity is lost for this case because the
# __visit_name__ is not yet a string. As a result, the visit
# string has to be recalculated with each compilation.
def _compiler_dispatch(self, visitor, **kw):
visit_attr = "visit_%s" % self.__visit_name__
try:
meth = getattr(visitor, visit_attr)
except AttributeError as err:
util.raise_(
exc.UnsupportedCompilationError(visitor, cls),
replace_context=err,
)
else:
return meth(self, **kw)
_compiler_dispatch.__doc__ = """Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.
"""
cls._compiler_dispatch = _compiler_dispatch
class Visitable(util.with_metaclass(VisitableType, object)):
"""Base class for visitable objects, applies the
:class:`.visitors.VisitableType` metaclass.
The :class:`.Visitable` class is essentially at the base of the
:class:`_expression.ClauseElement` hierarchy.
"""
class ClauseVisitor(object):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.traverse` function.
Direct usage of the :func:`.visitors.traverse` function is usually
preferred.
"""
__traverse_options__ = {}
def traverse_single(self, obj, **kw):
for v in self.visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(self, obj):
"""traverse the given expression structure, returning an iterator
of all elements.
"""
return iterate(obj, self.__traverse_options__)
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
@util.memoized_property
def _visitor_dict(self):
visitors = {}
for name in dir(self):
if name.startswith("visit_"):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def visitor_iterator(self):
"""iterate through this visitor and each 'chained' visitor."""
v = self
while v:
yield v
v = getattr(v, "_next", None)
def chain(self, visitor):
"""'chain' an additional ClauseVisitor onto this ClauseVisitor.
the chained visitor will receive all visit events after this one.
"""
tail = list(self.visitor_iterator)[-1]
tail._next = visitor
return self
class CloningVisitor(ClauseVisitor):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.cloned_traverse` function.
Direct usage of the :func:`.visitors.cloned_traverse` function is usually
preferred.
"""
def copy_and_process(self, list_):
"""Apply cloned traversal to the given list of elements, and return
the new list.
"""
return [self.traverse(x) for x in list_]
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict
)
class ReplacingCloningVisitor(CloningVisitor):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.replacement_traverse` function.
Direct usage of the :func:`.visitors.replacement_traverse` function is
usually preferred.
"""
def replace(self, elem):
"""receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
def traverse(self, obj):
"""traverse and visit the given expression structure."""
def replace(elem):
for v in self.visitor_iterator:
e = v.replace(elem)
if e is not None:
return e
return replacement_traverse(obj, self.__traverse_options__, replace)
def iterate(obj, opts):
r"""traverse the given expression structure, returning an iterator.
traversal is configured to be breadth-first.
The central API feature used by the :func:`.visitors.iterate` and
:func:`.visitors.iterate_depthfirst` functions is the
:meth:`_expression.ClauseElement.get_children` method of
:class:`_expression.ClauseElement`
objects. This method should return all the
:class:`_expression.ClauseElement` objects
which are associated with a particular :class:`_expression.ClauseElement`
object.
For example, a :class:`.Case` structure will refer to a series of
:class:`_expression.ColumnElement`
objects within its "whens" and "else\_" member
variables.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
"""
# fasttrack for atomic elements like columns
children = obj.get_children(**opts)
if not children:
return [obj]
traversal = deque()
stack = deque([obj])
while stack:
t = stack.popleft()
traversal.append(t)
for c in t.get_children(**opts):
stack.append(c)
return iter(traversal)
def iterate_depthfirst(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be depth-first.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
.. seealso::
:func:`.visitors.iterate` - includes a general overview of iteration.
"""
# fasttrack for atomic elements like columns
children = obj.get_children(**opts)
if not children:
return [obj]
stack = deque([obj])
traversal = deque()
while stack:
t = stack.pop()
traversal.appendleft(t)
for c in t.get_children(**opts):
stack.append(c)
return iter(traversal)
def traverse_using(iterator, obj, visitors):
"""visit the given expression structure using the given iterator of
objects.
:func:`.visitors.traverse_using` is usually called internally as the result
of the :func:`.visitors.traverse` or :func:`.visitors.traverse_depthfirst`
functions.
:param iterator: an iterable or sequence which will yield
:class:`_expression.ClauseElement`
structures; the iterator is assumed to be the
product of the :func:`.visitors.iterate` or
:func:`.visitors.iterate_depthfirst` functions.
:param obj: the :class:`_expression.ClauseElement`
that was used as the target of the
:func:`.iterate` or :func:`.iterate_depthfirst` function.
:param visitors: dictionary of visit functions. See :func:`.traverse`
for details on this dictionary.
.. seealso::
:func:`.traverse`
:func:`.traverse_depthfirst`
"""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
def traverse(obj, opts, visitors):
"""traverse and visit the given expression structure using the default
iterator.
e.g.::
from sqlalchemy.sql import visitors
stmt = select([some_table]).where(some_table.c.foo == 'bar')
def visit_bindparam(bind_param):
print("found bound value: %s" % bind_param.value)
visitors.traverse(stmt, {}, {"bindparam": visit_bindparam})
The iteration of objects uses the :func:`.visitors.iterate` function,
which does a breadth-first traversal using a stack.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
:param visitors: dictionary of visit functions. The dictionary should
have strings as keys, each of which would correspond to the
``__visit_name__`` of a particular kind of SQL expression object, and
callable functions as values, each of which represents a visitor function
for that kind of object.
"""
return traverse_using(iterate(obj, opts), obj, visitors)
def traverse_depthfirst(obj, opts, visitors):
"""traverse and visit the given expression structure using the
depth-first iterator.
The iteration of objects uses the :func:`.visitors.iterate_depthfirst`
function, which does a depth-first traversal using a stack.
Usage is the same as that of :func:`.visitors.traverse` function.
"""
return traverse_using(iterate_depthfirst(obj, opts), obj, visitors)
def cloned_traverse(obj, opts, visitors):
"""clone the given expression structure, allowing modifications by
visitors.
Traversal usage is the same as that of :func:`.visitors.traverse`.
The visitor functions present in the ``visitors`` dictionary may also
modify the internals of the given structure as the traversal proceeds.
The central API feature used by the :func:`.visitors.cloned_traverse`
and :func:`.visitors.replacement_traverse` functions, in addition to the
:meth:`_expression.ClauseElement.get_children`
function that is used to achieve
the iteration, is the :meth:`_expression.ClauseElement._copy_internals`
method.
For a :class:`_expression.ClauseElement`
structure to support cloning and replacement
traversals correctly, it needs to be able to pass a cloning function into
its internal members in order to make copies of them.
.. seealso::
:func:`.visitors.traverse`
:func:`.visitors.replacement_traverse`
"""
cloned = {}
stop_on = set(opts.get("stop_on", []))
def clone(elem, **kw):
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
cloned[id(elem)] = newelem = elem._clone()
newelem._copy_internals(clone=clone, **kw)
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(obj)
clone = None # remove gc cycles
return obj
def replacement_traverse(obj, opts, replace):
"""clone the given expression structure, allowing element
replacement by a given replacement function.
This function is very similar to the :func:`.visitors.cloned_traverse`
function, except instead of being passed a dictionary of visitors, all
elements are unconditionally passed into the given replace function.
The replace function then has the option to return an entirely new object
which will replace the one given. if it returns ``None``, then the object
is kept in place.
The difference in usage between :func:`.visitors.cloned_traverse` and
:func:`.visitors.replacement_traverse` is that in the former case, an
already-cloned object is passed to the visitor function, and the visitor
function can then manipulate the internal state of the object.
In the case of the latter, the visitor function should only return an
entirely different object, or do nothing.
The use case for :func:`.visitors.replacement_traverse` is that of
replacing a FROM clause inside of a SQL structure with a different one,
as is a common use case within the ORM.
"""
cloned = {}
stop_on = {id(x) for x in opts.get("stop_on", [])}
def clone(elem, **kw):
if (
id(elem) in stop_on
or "no_replacement_traverse" in elem._annotations
):
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem
else:
if elem not in cloned:
cloned[elem] = newelem = elem._clone()
newelem._copy_internals(clone=clone, **kw)
return cloned[elem]
if obj is not None:
obj = clone(obj, **opts)
clone = None # remove gc cycles
return obj
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/sql/default_comparator.py
|
# sql/default_comparator.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Default implementation of SQL comparison operations.
"""
from . import operators
from . import type_api
from .elements import _clause_element_as_expr
from .elements import _const_expr
from .elements import _is_literal
from .elements import _literal_as_text
from .elements import and_
from .elements import BinaryExpression
from .elements import BindParameter
from .elements import ClauseElement
from .elements import ClauseList
from .elements import collate
from .elements import CollectionAggregate
from .elements import ColumnElement
from .elements import False_
from .elements import Null
from .elements import or_
from .elements import TextClause
from .elements import True_
from .elements import Tuple
from .elements import UnaryExpression
from .elements import Visitable
from .selectable import Alias
from .selectable import ScalarSelect
from .selectable import Selectable
from .selectable import SelectBase
from .. import exc
from .. import util
def _boolean_compare(
expr,
op,
obj,
negate=None,
reverse=False,
_python_is_types=(util.NoneType, bool),
result_type=None,
**kwargs
):
if result_type is None:
result_type = type_api.BOOLEANTYPE
if isinstance(obj, _python_is_types + (Null, True_, False_)):
# allow x ==/!= True/False to be treated as a literal.
# this comes out to "== / != true/false" or "1/0" if those
# constants aren't supported and works on all platforms
if op in (operators.eq, operators.ne) and isinstance(
obj, (bool, True_, False_)
):
return BinaryExpression(
expr,
_literal_as_text(obj),
op,
type_=result_type,
negate=negate,
modifiers=kwargs,
)
elif op in (operators.is_distinct_from, operators.isnot_distinct_from):
return BinaryExpression(
expr,
_literal_as_text(obj),
op,
type_=result_type,
negate=negate,
modifiers=kwargs,
)
else:
# all other None/True/False uses IS, IS NOT
if op in (operators.eq, operators.is_):
return BinaryExpression(
expr,
_const_expr(obj),
operators.is_,
negate=operators.isnot,
type_=result_type,
)
elif op in (operators.ne, operators.isnot):
return BinaryExpression(
expr,
_const_expr(obj),
operators.isnot,
negate=operators.is_,
type_=result_type,
)
else:
raise exc.ArgumentError(
"Only '=', '!=', 'is_()', 'isnot()', "
"'is_distinct_from()', 'isnot_distinct_from()' "
"operators can be used with None/True/False"
)
else:
obj = _check_literal(expr, op, obj)
if reverse:
return BinaryExpression(
obj, expr, op, type_=result_type, negate=negate, modifiers=kwargs
)
else:
return BinaryExpression(
expr, obj, op, type_=result_type, negate=negate, modifiers=kwargs
)
def _custom_op_operate(expr, op, obj, reverse=False, result_type=None, **kw):
if result_type is None:
if op.return_type:
result_type = op.return_type
elif op.is_comparison:
result_type = type_api.BOOLEANTYPE
return _binary_operate(
expr, op, obj, reverse=reverse, result_type=result_type, **kw
)
def _binary_operate(expr, op, obj, reverse=False, result_type=None, **kw):
obj = _check_literal(expr, op, obj)
if reverse:
left, right = obj, expr
else:
left, right = expr, obj
if result_type is None:
op, result_type = left.comparator._adapt_expression(
op, right.comparator
)
return BinaryExpression(left, right, op, type_=result_type, modifiers=kw)
def _conjunction_operate(expr, op, other, **kw):
if op is operators.and_:
return and_(expr, other)
elif op is operators.or_:
return or_(expr, other)
else:
raise NotImplementedError()
def _scalar(expr, op, fn, **kw):
return fn(expr)
def _in_impl(expr, op, seq_or_selectable, negate_op, **kw):
seq_or_selectable = _clause_element_as_expr(seq_or_selectable)
if isinstance(seq_or_selectable, ScalarSelect):
return _boolean_compare(expr, op, seq_or_selectable, negate=negate_op)
elif isinstance(seq_or_selectable, SelectBase):
# TODO: if we ever want to support (x, y, z) IN (select x,
# y, z from table), we would need a multi-column version of
# as_scalar() to produce a multi- column selectable that
# does not export itself as a FROM clause
return _boolean_compare(
expr, op, seq_or_selectable.as_scalar(), negate=negate_op, **kw
)
elif isinstance(seq_or_selectable, (Selectable, TextClause)):
return _boolean_compare(
expr, op, seq_or_selectable, negate=negate_op, **kw
)
elif isinstance(seq_or_selectable, ClauseElement):
if (
isinstance(seq_or_selectable, BindParameter)
and seq_or_selectable.expanding
):
if isinstance(expr, Tuple):
seq_or_selectable = seq_or_selectable._with_expanding_in_types(
[elem.type for elem in expr]
)
return _boolean_compare(
expr, op, seq_or_selectable, negate=negate_op
)
else:
raise exc.InvalidRequestError(
"in_() accepts"
" either a list of expressions, "
'a selectable, or an "expanding" bound parameter: %r'
% seq_or_selectable
)
# Handle non selectable arguments as sequences
args = []
for o in seq_or_selectable:
if not _is_literal(o):
if not isinstance(o, operators.ColumnOperators):
raise exc.InvalidRequestError(
"in_() accepts"
" either a list of expressions, "
'a selectable, or an "expanding" bound parameter: %r' % o
)
elif o is None:
o = Null()
else:
o = expr._bind_param(op, o)
args.append(o)
if len(args) == 0:
op, negate_op = (
(operators.empty_in_op, operators.empty_notin_op)
if op is operators.in_op
else (operators.empty_notin_op, operators.empty_in_op)
)
return _boolean_compare(
expr,
op,
ClauseList(_tuple_values=isinstance(expr, Tuple), *args).self_group(
against=op
),
negate=negate_op,
)
def _getitem_impl(expr, op, other, **kw):
if isinstance(expr.type, type_api.INDEXABLE):
other = _check_literal(expr, op, other)
return _binary_operate(expr, op, other, **kw)
else:
_unsupported_impl(expr, op, other, **kw)
def _unsupported_impl(expr, op, *arg, **kw):
raise NotImplementedError(
"Operator '%s' is not supported on " "this expression" % op.__name__
)
def _inv_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__inv__`."""
if hasattr(expr, "negation_clause"):
return expr.negation_clause
else:
return expr._negate()
def _neg_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__neg__`."""
return UnaryExpression(expr, operator=operators.neg, type_=expr.type)
def _match_impl(expr, op, other, **kw):
"""See :meth:`.ColumnOperators.match`."""
return _boolean_compare(
expr,
operators.match_op,
_check_literal(expr, operators.match_op, other),
result_type=type_api.MATCHTYPE,
negate=operators.notmatch_op
if op is operators.match_op
else operators.match_op,
**kw
)
def _distinct_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.distinct`."""
return UnaryExpression(
expr, operator=operators.distinct_op, type_=expr.type
)
def _between_impl(expr, op, cleft, cright, **kw):
"""See :meth:`.ColumnOperators.between`."""
return BinaryExpression(
expr,
ClauseList(
_check_literal(expr, operators.and_, cleft),
_check_literal(expr, operators.and_, cright),
operator=operators.and_,
group=False,
group_contents=False,
),
op,
negate=operators.notbetween_op
if op is operators.between_op
else operators.between_op,
modifiers=kw,
)
def _collate_impl(expr, op, other, **kw):
return collate(expr, other)
# a mapping of operators with the method they use, along with
# their negated operator for comparison operators
operator_lookup = {
"and_": (_conjunction_operate,),
"or_": (_conjunction_operate,),
"inv": (_inv_impl,),
"add": (_binary_operate,),
"mul": (_binary_operate,),
"sub": (_binary_operate,),
"div": (_binary_operate,),
"mod": (_binary_operate,),
"truediv": (_binary_operate,),
"custom_op": (_custom_op_operate,),
"json_path_getitem_op": (_binary_operate,),
"json_getitem_op": (_binary_operate,),
"concat_op": (_binary_operate,),
"any_op": (_scalar, CollectionAggregate._create_any),
"all_op": (_scalar, CollectionAggregate._create_all),
"lt": (_boolean_compare, operators.ge),
"le": (_boolean_compare, operators.gt),
"ne": (_boolean_compare, operators.eq),
"gt": (_boolean_compare, operators.le),
"ge": (_boolean_compare, operators.lt),
"eq": (_boolean_compare, operators.ne),
"is_distinct_from": (_boolean_compare, operators.isnot_distinct_from),
"isnot_distinct_from": (_boolean_compare, operators.is_distinct_from),
"like_op": (_boolean_compare, operators.notlike_op),
"ilike_op": (_boolean_compare, operators.notilike_op),
"notlike_op": (_boolean_compare, operators.like_op),
"notilike_op": (_boolean_compare, operators.ilike_op),
"contains_op": (_boolean_compare, operators.notcontains_op),
"startswith_op": (_boolean_compare, operators.notstartswith_op),
"endswith_op": (_boolean_compare, operators.notendswith_op),
"desc_op": (_scalar, UnaryExpression._create_desc),
"asc_op": (_scalar, UnaryExpression._create_asc),
"nullsfirst_op": (_scalar, UnaryExpression._create_nullsfirst),
"nullslast_op": (_scalar, UnaryExpression._create_nullslast),
"in_op": (_in_impl, operators.notin_op),
"notin_op": (_in_impl, operators.in_op),
"is_": (_boolean_compare, operators.is_),
"isnot": (_boolean_compare, operators.isnot),
"collate": (_collate_impl,),
"match_op": (_match_impl,),
"notmatch_op": (_match_impl,),
"distinct_op": (_distinct_impl,),
"between_op": (_between_impl,),
"notbetween_op": (_between_impl,),
"neg": (_neg_impl,),
"getitem": (_getitem_impl,),
"lshift": (_unsupported_impl,),
"rshift": (_unsupported_impl,),
"contains": (_unsupported_impl,),
}
def _check_literal(expr, operator, other, bindparam_type=None):
if isinstance(other, (ColumnElement, TextClause)):
if isinstance(other, BindParameter) and other.type._isnull:
other = other._clone()
other.type = expr.type
return other
elif hasattr(other, "__clause_element__"):
other = other.__clause_element__()
elif isinstance(other, type_api.TypeEngine.Comparator):
other = other.expr
if isinstance(other, (SelectBase, Alias)):
return other.as_scalar()
elif not isinstance(other, Visitable):
return expr._bind_param(operator, other, type_=bindparam_type)
else:
return other
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/sql/elements.py
|
# sql/elements.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core SQL expression elements, including :class:`_expression.ClauseElement`,
:class:`_expression.ColumnElement`, and derived classes.
"""
from __future__ import unicode_literals
import itertools
import numbers
import operator
import re
from . import operators
from . import type_api
from .annotation import Annotated
from .base import _generative
from .base import Executable
from .base import Immutable
from .base import NO_ARG
from .base import PARSE_AUTOCOMMIT
from .visitors import cloned_traverse
from .visitors import traverse
from .visitors import Visitable
from .. import exc
from .. import inspection
from .. import util
def _clone(element, **kw):
return element._clone()
def _document_text_coercion(paramname, meth_rst, param_rst):
return util.add_parameter_text(
paramname,
(
".. warning:: "
"The %s argument to %s can be passed as a Python string argument, "
"which will be treated "
"as **trusted SQL text** and rendered as given. **DO NOT PASS "
"UNTRUSTED INPUT TO THIS PARAMETER**."
)
% (param_rst, meth_rst),
)
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``.
e.g.::
collate(mycolumn, 'utf8_bin')
produces::
mycolumn COLLATE utf8_bin
The collation expression is also quoted if it is a case sensitive
identifier, e.g. contains uppercase characters.
.. versionchanged:: 1.2 quoting is automatically applied to COLLATE
expressions if they are case sensitive.
"""
expr = _literal_as_binds(expression)
return BinaryExpression(
expr, CollationClause(collation), operators.collate, type_=expr.type
)
def between(expr, lower_bound, upper_bound, symmetric=False):
"""Produce a ``BETWEEN`` predicate clause.
E.g.::
from sqlalchemy import between
stmt = select([users_table]).where(between(users_table.c.id, 5, 7))
Would produce SQL resembling::
SELECT id, name FROM user WHERE id BETWEEN :id_1 AND :id_2
The :func:`.between` function is a standalone version of the
:meth:`_expression.ColumnElement.between` method available on all
SQL expressions, as in::
stmt = select([users_table]).where(users_table.c.id.between(5, 7))
All arguments passed to :func:`.between`, including the left side
column expression, are coerced from Python scalar values if a
the value is not a :class:`_expression.ColumnElement` subclass.
For example,
three fixed values can be compared as in::
print(between(5, 3, 7))
Which would produce::
:param_1 BETWEEN :param_2 AND :param_3
:param expr: a column expression, typically a
:class:`_expression.ColumnElement`
instance or alternatively a Python scalar expression to be coerced
into a column expression, serving as the left side of the ``BETWEEN``
expression.
:param lower_bound: a column or Python scalar expression serving as the
lower bound of the right side of the ``BETWEEN`` expression.
:param upper_bound: a column or Python scalar expression serving as the
upper bound of the right side of the ``BETWEEN`` expression.
:param symmetric: if True, will render " BETWEEN SYMMETRIC ". Note
that not all databases support this syntax.
.. versionadded:: 0.9.5
.. seealso::
:meth:`_expression.ColumnElement.between`
"""
expr = _literal_as_binds(expr)
return expr.between(lower_bound, upper_bound, symmetric=symmetric)
def literal(value, type_=None):
r"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non-
:class:`_expression.ClauseElement` objects (such as strings, ints, dates,
etc.) are
used in a comparison operation with a :class:`_expression.ColumnElement`
subclass,
such as a :class:`~sqlalchemy.schema.Column` object. Use this function
to force the generation of a literal clause, which will be created as a
:class:`BindParameter` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return BindParameter(None, value, type_=type_, unique=True)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return BindParameter(key, None, type_=type_, unique=False, isoutparam=True)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`_expression.ColumnElement` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
@inspection._self_inspects
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = "clause"
_annotations = {}
supports_execution = False
_from_objects = []
bind = None
_is_clone_of = None
is_selectable = False
is_clause_element = True
description = None
_order_by_label_element = None
_is_from_container = False
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
ClauseElement._cloned_set._reset(c)
ColumnElement.comparator._reset(c)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned ancestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
# note this creates a cycle, asserted in test_memusage. however,
# turning this into a plain @property adds tends of thousands of method
# calls to Core / ORM performance tests, so the small overhead
# introduced by the relatively small amount of short term cycles
# produced here is preferable
while f is not None:
s.add(f)
f = f._is_clone_of
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop("_is_clone_of", None)
return d
def _annotate(self, values):
"""return a copy of this ClauseElement with annotations
updated by the given dictionary.
"""
return Annotated(self, values)
def _with_annotations(self, values):
"""return a copy of this ClauseElement with annotations
replaced by the given dictionary.
"""
return Annotated(self, values)
def _deannotate(self, values=None, clone=False):
"""return a copy of this :class:`_expression.ClauseElement`
with annotations
removed.
:param values: optional tuple of individual values
to remove.
"""
if clone:
# clone is used when we are also copying
# the expression for a deep deannotation
return self._clone()
else:
# if no clone, since we have no annotations we return
# self
return self
def _execute_on_connection(self, connection, multiparams, params):
if self.supports_execution:
return connection._execute_clauseelement(self, multiparams, params)
else:
raise exc.ObjectNotExecutableError(self)
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print(clause.compile().params)
{'foo':None}
>>> print(clause.params({'foo':7}).compile().params)
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument"
)
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
bind.required = False
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {"bindparam": visit_bindparam})
def compare(self, other, **kw):
r"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`_expression.ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone, **kw):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
pass
def get_children(self, **kwargs):
r"""Return immediate child elements of this
:class:`_expression.ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`_expression.ClauseElement`.
This method is overridden by subclasses to return a
"grouping" construct, i.e. parenthesis. In particular
it's used by "binary" expressions to provide a grouping
around themselves when placed into a larger expression,
as well as by :func:`_expression.select` constructs when placed into
the FROM clause of another :func:`_expression.select`. (Note that
subqueries should be normally created using the
:meth:`_expression.Select.alias` method, as many platforms require
nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of
:class:`_expression.ClauseElement`
just returns self.
"""
return self
@util.dependencies("sqlalchemy.engine.default")
def compile(self, default, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`_expression.ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance from which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`_expression.ClauseElement`
's bound engine,
if any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
:param compile_kwargs: optional dictionary of additional parameters
that will be passed through to the compiler within all "visit"
methods. This allows any custom flag to be passed through to
a custom compilation construct, for example. It is also used
for the case of passing the ``literal_binds`` flag through::
from sqlalchemy.sql import table, column, select
t = table('t', column('x'))
s = select([t]).where(t.c.x == 5)
print(s.compile(compile_kwargs={"literal_binds": True}))
.. versionadded:: 0.9.0
.. seealso::
:ref:`faq_sql_expression_string`
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
dialect = default.StrCompileDialect()
return self._compiler(dialect, bind=bind, **kw)
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
if util.py3k:
return str(self.compile())
else:
return unicode(self.compile()).encode( # noqa
"ascii", "backslashreplace"
) # noqa
@util.deprecated(
"0.9",
"The :meth:`_expression.ClauseElement.__and__` "
"method is deprecated and will "
"be removed in a future release. Conjunctions should only be "
"used from a :class:`_expression.ColumnElement` subclass, e.g. "
":meth:`_expression.ColumnElement.__and__`.",
)
def __and__(self, other):
"""'and' at the ClauseElement level.
"""
return and_(self, other)
@util.deprecated(
"0.9",
"The :meth:`_expression.ClauseElement.__or__` "
"method is deprecated and will "
"be removed in a future release. Conjunctions should only be "
"used from a :class:`_expression.ColumnElement` subclass, e.g. "
":meth:`_expression.ColumnElement.__or__`.",
)
def __or__(self, other):
"""'or' at the ClauseElement level.
"""
return or_(self, other)
def __invert__(self):
if hasattr(self, "negation_clause"):
return self.negation_clause
else:
return self._negate()
def _negate(self):
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None,
)
def __bool__(self):
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
def __repr__(self):
friendly = self.description
if friendly is None:
return object.__repr__(self)
else:
return "<%s.%s at 0x%x; %s>" % (
self.__module__,
self.__class__.__name__,
id(self),
friendly,
)
class ColumnElement(operators.ColumnOperators, ClauseElement):
"""Represent a column-oriented SQL expression suitable for usage in the
"columns" clause, WHERE clause etc. of a statement.
While the most familiar kind of :class:`_expression.ColumnElement` is the
:class:`_schema.Column` object, :class:`_expression.ColumnElement`
serves as the basis
for any unit that may be present in a SQL expression, including
the expressions themselves, SQL functions, bound parameters,
literal expressions, keywords such as ``NULL``, etc.
:class:`_expression.ColumnElement`
is the ultimate base class for all such elements.
A wide variety of SQLAlchemy Core functions work at the SQL expression
level, and are intended to accept instances of
:class:`_expression.ColumnElement` as
arguments. These functions will typically document that they accept a
"SQL expression" as an argument. What this means in terms of SQLAlchemy
usually refers to an input which is either already in the form of a
:class:`_expression.ColumnElement` object,
or a value which can be **coerced** into
one. The coercion rules followed by most, but not all, SQLAlchemy Core
functions with regards to SQL expressions are as follows:
* a literal Python value, such as a string, integer or floating
point value, boolean, datetime, ``Decimal`` object, or virtually
any other Python object, will be coerced into a "literal bound
value". This generally means that a :func:`.bindparam` will be
produced featuring the given value embedded into the construct; the
resulting :class:`.BindParameter` object is an instance of
:class:`_expression.ColumnElement`.
The Python value will ultimately be sent
to the DBAPI at execution time as a parameterized argument to the
``execute()`` or ``executemany()`` methods, after SQLAlchemy
type-specific converters (e.g. those provided by any associated
:class:`.TypeEngine` objects) are applied to the value.
* any special object value, typically ORM-level constructs, which
feature a method called ``__clause_element__()``. The Core
expression system looks for this method when an object of otherwise
unknown type is passed to a function that is looking to coerce the
argument into a :class:`_expression.ColumnElement` expression. The
``__clause_element__()`` method, if present, should return a
:class:`_expression.ColumnElement` instance. The primary use of
``__clause_element__()`` within SQLAlchemy is that of class-bound
attributes on ORM-mapped classes; a ``User`` class which contains a
mapped attribute named ``.name`` will have a method
``User.name.__clause_element__()`` which when invoked returns the
:class:`_schema.Column`
called ``name`` associated with the mapped table.
* The Python ``None`` value is typically interpreted as ``NULL``,
which in SQLAlchemy Core produces an instance of :func:`.null`.
A :class:`_expression.ColumnElement` provides the ability to generate new
:class:`_expression.ColumnElement`
objects using Python expressions. This means that Python operators
such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations,
and allow the instantiation of further :class:`_expression.ColumnElement`
instances
which are composed from other, more fundamental
:class:`_expression.ColumnElement`
objects. For example, two :class:`.ColumnClause` objects can be added
together with the addition operator ``+`` to produce
a :class:`.BinaryExpression`.
Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
of :class:`_expression.ColumnElement`::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print(column('a') + column('b'))
a + b
.. seealso::
:class:`_schema.Column`
:func:`_expression.column`
"""
__visit_name__ = "column_element"
primary_key = False
foreign_keys = []
_proxies = ()
_label = None
"""The named label that can be used to target
this column in a result set.
This label is almost always the label used when
rendering <expr> AS <label> in a SELECT statement. It also
refers to a name that this column expression can be located from
in a result set.
For a regular Column bound to a Table, this is typically the label
<tablename>_<columnname>. For other constructs, different rules
may apply, such as anonymized labels and others.
"""
key = None
"""the 'key' that in some circumstances refers to this object in a
Python namespace.
This typically refers to the "key" of the column as present in the
``.c`` collection of a selectable, e.g. sometable.c["somekey"] would
return a Column with a .key of "somekey".
"""
_key_label = None
"""A label-based version of 'key' that in some circumstances refers
to this object in a Python namespace.
_key_label comes into play when a select() statement is constructed with
apply_labels(); in this case, all Column objects in the ``.c`` collection
are rendered as <tablename>_<columnname> in SQL; this is essentially the
value of ._label. But to locate those columns in the ``.c`` collection,
the name is along the lines of <tablename>_<key>; that's the typical
value of .key_label.
"""
_render_label_in_columns_clause = True
"""A flag used by select._columns_plus_names that helps to determine
we are actually going to render in terms of "SELECT <col> AS <label>".
This flag can be returned as False for some Column objects that want
to be rendered as simple "SELECT <col>"; typically columns that don't have
any parent table and are named the same as what the label would be
in any case.
"""
_resolve_label = None
"""The name that should be used to identify this ColumnElement in a
select() object when "label resolution" logic is used; this refers
to using a string name in an expression like order_by() or group_by()
that wishes to target a labeled expression in the columns clause.
The name is distinct from that of .name or ._label to account for the case
where anonymizing logic may be used to change the name that's actually
rendered at compile time; this attribute should hold onto the original
name that was user-assigned when producing a .label() construct.
"""
_allow_label_resolve = True
"""A flag that can be flipped to prevent a column from being resolvable
by string label name."""
_is_implicitly_boolean = False
_alt_names = ()
def self_group(self, against=None):
if (
against in (operators.and_, operators.or_, operators._asbool)
and self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity
):
return AsBoolean(self, operators.istrue, operators.isfalse)
elif against in (operators.any_op, operators.all_op):
return Grouping(self)
else:
return self
def _negate(self):
if self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return AsBoolean(self, operators.isfalse, operators.istrue)
else:
return super(ColumnElement, self)._negate()
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@util.memoized_property
def comparator(self):
try:
comparator_factory = self.type.comparator_factory
except AttributeError as err:
util.raise_(
TypeError(
"Object %r associated with '.type' attribute "
"is not a TypeEngine class or object" % self.type
),
replace_context=err,
)
else:
return comparator_factory(self)
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError as err:
util.raise_(
AttributeError(
"Neither %r object nor %r object has an attribute %r"
% (
type(self).__name__,
type(self.comparator).__name__,
key,
)
),
replace_context=err,
)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def _bind_param(self, operator, obj, type_=None):
return BindParameter(
None,
obj,
_compared_to_operator=operator,
type_=type_,
_compared_to_type=self.type,
unique=True,
)
@property
def expression(self):
"""Return a column expression.
Part of the inspection interface; returns self.
"""
return self
@property
def _select_iterable(self):
return (self,)
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set if not c._proxies)
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
for c in self._proxies:
s.update(c.proxy_set)
return s
def _uncached_proxy_set(self):
"""An 'uncached' version of proxy set.
This is so that we can read annotations from the list of columns
without breaking the caching of the above proxy_set.
"""
s = util.column_set([self])
for c in self._proxies:
s.update(c._uncached_proxy_set())
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`_expression.ColumnElement`
has a common ancestor to this :class:`_expression.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other):
"""Return True if the given column element compares to this one
when targeting within a result row."""
return (
hasattr(other, "name")
and hasattr(self, "name")
and other.name == self.name
)
def _make_proxy(
self, selectable, name=None, name_is_truncatable=False, **kw
):
"""Create a new :class:`_expression.ColumnElement` representing this
:class:`_expression.ColumnElement`
as it appears in the select list of a
descending selectable.
"""
if name is None:
name = self.anon_label
if self.key:
key = self.key
else:
try:
key = str(self)
except exc.UnsupportedCompilationError:
key = self.anon_label
else:
key = name
co = ColumnClause(
_as_truncated(name) if name_is_truncatable else name,
type_=getattr(self, "type", None),
_selectable=selectable,
)
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = selectable._is_clone_of.columns.get(key)
selectable._columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this
dictionary, if any of the columns in the corresponding set() pass
the comparison test, the result is True. This is used to expand the
comparison to other columns that may be known to be equivalent to
this one via foreign key or other criterion.
"""
to_compare = (other,)
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif hash(oth) == hash(self):
return True
else:
return False
def cast(self, type_):
"""Produce a type cast, i.e. ``CAST(<expression> AS <type>)``.
This is a shortcut to the :func:`_expression.cast` function.
.. seealso::
:ref:`coretutorial_casts`
:func:`_expression.cast`
:func:`_expression.type_coerce`
.. versionadded:: 1.0.7
"""
return Cast(self, type_)
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`_expression.label` function.
if 'name' is None, an anonymous label name will be generated.
"""
return Label(name, self, self.type)
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
while self._is_clone_of is not None:
self = self._is_clone_of
return _anonymous_label(
"%%(%d %s)s" % (id(self), getattr(self, "name", "anon"))
)
class BindParameter(ColumnElement):
r"""Represent a "bound expression".
:class:`.BindParameter` is invoked explicitly using the
:func:`.bindparam` function, as in::
from sqlalchemy import bindparam
stmt = select([users_table]).\
where(users_table.c.name == bindparam('username'))
Detailed discussion of how :class:`.BindParameter` is used is
at :func:`.bindparam`.
.. seealso::
:func:`.bindparam`
"""
__visit_name__ = "bindparam"
_is_crud = False
_expanding_in_types = ()
def __init__(
self,
key,
value=NO_ARG,
type_=None,
unique=False,
required=NO_ARG,
quote=None,
callable_=None,
expanding=False,
isoutparam=False,
_compared_to_operator=None,
_compared_to_type=None,
):
r"""Produce a "bound expression".
The return value is an instance of :class:`.BindParameter`; this
is a :class:`_expression.ColumnElement`
subclass which represents a so-called
"placeholder" value in a SQL expression, the value of which is
supplied at the point at which the statement in executed against a
database connection.
In SQLAlchemy, the :func:`.bindparam` construct has
the ability to carry along the actual value that will be ultimately
used at expression time. In this way, it serves not just as
a "placeholder" for eventual population, but also as a means of
representing so-called "unsafe" values which should not be rendered
directly in a SQL statement, but rather should be passed along
to the :term:`DBAPI` as values which need to be correctly escaped
and potentially handled for type-safety.
When using :func:`.bindparam` explicitly, the use case is typically
one of traditional deferment of parameters; the :func:`.bindparam`
construct accepts a name which can then be referred to at execution
time::
from sqlalchemy import bindparam
stmt = select([users_table]).\
where(users_table.c.name == bindparam('username'))
The above statement, when rendered, will produce SQL similar to::
SELECT id, name FROM user WHERE name = :username
In order to populate the value of ``:username`` above, the value
would typically be applied at execution time to a method
like :meth:`_engine.Connection.execute`::
result = connection.execute(stmt, username='wendy')
Explicit use of :func:`.bindparam` is also common when producing
UPDATE or DELETE statements that are to be invoked multiple times,
where the WHERE criterion of the statement is to change on each
invocation, such as::
stmt = (users_table.update().
where(user_table.c.name == bindparam('username')).
values(fullname=bindparam('fullname'))
)
connection.execute(
stmt, [{"username": "wendy", "fullname": "Wendy Smith"},
{"username": "jack", "fullname": "Jack Jones"},
]
)
SQLAlchemy's Core expression system makes wide use of
:func:`.bindparam` in an implicit sense. It is typical that Python
literal values passed to virtually all SQL expression functions are
coerced into fixed :func:`.bindparam` constructs. For example, given
a comparison operation such as::
expr = users_table.c.name == 'Wendy'
The above expression will produce a :class:`.BinaryExpression`
construct, where the left side is the :class:`_schema.Column` object
representing the ``name`` column, and the right side is a
:class:`.BindParameter` representing the literal value::
print(repr(expr.right))
BindParameter('%(4327771088 name)s', 'Wendy', type_=String())
The expression above will render SQL such as::
user.name = :name_1
Where the ``:name_1`` parameter name is an anonymous name. The
actual string ``Wendy`` is not in the rendered string, but is carried
along where it is later used within statement execution. If we
invoke a statement like the following::
stmt = select([users_table]).where(users_table.c.name == 'Wendy')
result = connection.execute(stmt)
We would see SQL logging output as::
SELECT "user".id, "user".name
FROM "user"
WHERE "user".name = %(name_1)s
{'name_1': 'Wendy'}
Above, we see that ``Wendy`` is passed as a parameter to the database,
while the placeholder ``:name_1`` is rendered in the appropriate form
for the target database, in this case the PostgreSQL database.
Similarly, :func:`.bindparam` is invoked automatically
when working with :term:`CRUD` statements as far as the "VALUES"
portion is concerned. The :func:`_expression.insert`
construct produces an
``INSERT`` expression which will, at statement execution time,
generate bound placeholders based on the arguments passed, as in::
stmt = users_table.insert()
result = connection.execute(stmt, name='Wendy')
The above will produce SQL output as::
INSERT INTO "user" (name) VALUES (%(name)s)
{'name': 'Wendy'}
The :class:`_expression.Insert` construct,
at compilation/execution time,
rendered a single :func:`.bindparam` mirroring the column
name ``name`` as a result of the single ``name`` parameter
we passed to the :meth:`_engine.Connection.execute` method.
:param key:
the key (e.g. the name) for this bind param.
Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. Will be used at statement
execution time as the value for this parameter passed to the
DBAPI, if no other value is indicated to the statement execution
method for this particular parameter name. Defaults to ``None``.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A :class:`.TypeEngine` class or instance representing an optional
datatype for this :func:`.bindparam`. If not passed, a type
may be determined automatically for the bind, based on the given
value; for example, trivial Python types such as ``str``,
``int``, ``bool``
may result in the :class:`.String`, :class:`.Integer` or
:class:`.Boolean` types being automatically selected.
The type of a :func:`.bindparam` is significant especially in that
the type will apply pre-processing to the value before it is
passed to the database. For example, a :func:`.bindparam` which
refers to a datetime value, and is specified as holding the
:class:`.DateTime` type, may apply conversion needed to the
value (such as stringification on SQLite) before passing the value
to the database.
:param unique:
if True, the key name of this :class:`.BindParameter` will be
modified if another :class:`.BindParameter` of the same name
already has been located within the containing
expression. This flag is used generally by the internals
when producing so-called "anonymous" bound expressions, it
isn't generally applicable to explicitly-named :func:`.bindparam`
constructs.
:param required:
If ``True``, a value is required at execution time. If not passed,
it defaults to ``True`` if neither :paramref:`.bindparam.value`
or :paramref:`.bindparam.callable` were passed. If either of these
parameters are present, then :paramref:`.bindparam.required`
defaults to ``False``.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend, where bound names must
sometimes be quoted.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter. This applies to backends such as Oracle which
support OUT parameters.
:param expanding:
if True, this parameter will be treated as an "expanding" parameter
at execution time; the parameter value is expected to be a sequence,
rather than a scalar value, and the string SQL statement will
be transformed on a per-execution basis to accommodate the sequence
with a variable number of parameter slots passed to the DBAPI.
This is to allow statement caching to be used in conjunction with
an IN clause.
.. seealso::
:meth:`.ColumnOperators.in_`
:ref:`baked_in` - with baked queries
.. note:: The "expanding" feature does not support "executemany"-
style parameter sets.
.. versionadded:: 1.2
.. versionchanged:: 1.3 the "expanding" bound parameter feature now
supports empty lists.
.. seealso::
:ref:`coretutorial_bind_param`
:ref:`coretutorial_insert_expressions`
:func:`.outparam`
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.key
if required is NO_ARG:
required = value is NO_ARG and callable_ is None
if value is NO_ARG:
value = None
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label(
"%%(%d %s)s"
% (
id(self),
re.sub(r"[%\(\) \$]+", "_", key).strip("_")
if key is not None
else "param",
)
)
else:
self.key = key or _anonymous_label("%%(%d param)s" % id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or "param"
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
self.expanding = expanding
if type_ is None:
if _compared_to_type is not None:
self.type = _compared_to_type.coerce_compared_value(
_compared_to_operator, value
)
else:
self.type = type_api._resolve_value_to_type(value)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
def _with_expanding_in_types(self, types):
"""Return a copy of this :class:`.BindParameter` in
the context of an expanding IN against a tuple.
"""
cloned = self._clone()
cloned._expanding_in_types = types
return cloned
def _with_value(self, value):
"""Return a copy of this :class:`.BindParameter` with the given value
set.
"""
cloned = self._clone()
cloned.value = value
cloned.callable = None
cloned.required = False
if cloned.type is type_api.NULLTYPE:
cloned.type = type_api._resolve_value_to_type(value)
return cloned
@property
def effective_value(self):
"""Return the value of this bound parameter,
taking into account if the ``callable`` parameter
was set.
The ``callable`` value will be evaluated
and returned if present, else ``value``.
"""
if self.callable:
return self.callable()
else:
return self.value
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _anonymous_label(
"%%(%d %s)s" % (id(c), c._orig_key or "param")
)
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _anonymous_label(
"%%(%d %s)s" % (id(self), self._orig_key or "param")
)
def compare(self, other, **kw):
"""Compare this :class:`BindParameter` to the given
clause."""
return (
isinstance(other, BindParameter)
and self.type._compare_type_affinity(other.type)
and self.value == other.value
and self.callable == other.callable
)
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if self.callable:
v = self.callable()
d["callable"] = None
d["value"] = v
return d
def __setstate__(self, state):
if state.get("unique", False):
state["key"] = _anonymous_label(
"%%(%d %s)s" % (id(self), state.get("_orig_key", "param"))
)
self.__dict__.update(state)
def __repr__(self):
return "BindParameter(%r, %r, type_=%r)" % (
self.key,
self.value,
self.type,
)
class TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = "typeclause"
def __init__(self, type_):
self.type = type_
class TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
E.g.::
from sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The :class:`_expression.TextClause` construct is produced using the
:func:`_expression.text`
function; see that function for full documentation.
.. seealso::
:func:`_expression.text`
"""
__visit_name__ = "textclause"
_bind_params_regex = re.compile(r"(?<![:\w\x5c]):(\w+)(?!:)", re.UNICODE)
_execution_options = Executable._execution_options.union(
{"autocommit": PARSE_AUTOCOMMIT}
)
_is_implicitly_boolean = False
def __and__(self, other):
# support use in select.where(), query.filter()
return and_(self, other)
@property
def _select_iterable(self):
return (self,)
@property
def selectable(self):
# allows text() to be considered by
# _interpret_as_from
return self
_hide_froms = []
# help in those cases where text() is
# interpreted in a column expression situation
key = _label = _resolve_label = None
_allow_label_resolve = False
def __init__(self, text, bind=None):
self._bind = bind
self._bindparams = {}
def repl(m):
self._bindparams[m.group(1)] = BindParameter(m.group(1))
return ":%s" % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
@classmethod
@util.deprecated_params(
autocommit=(
"0.6",
"The :paramref:`_expression.text.autocommit` "
"parameter is deprecated and "
"will be removed in a future release. Please use the "
":paramref:`.Connection.execution_options.autocommit` parameter "
"in conjunction with the :meth:`.Executable.execution_options` "
"method.",
),
bindparams=(
"0.9",
"The :paramref:`_expression.text.bindparams` parameter "
"is deprecated and will be removed in a future release. Please "
"refer to the :meth:`_expression.TextClause.bindparams` method.",
),
typemap=(
"0.9",
"The :paramref:`_expression.text.typemap` parameter is "
"deprecated and will be removed in a future release. Please "
"refer to the :meth:`_expression.TextClause.columns` method.",
),
)
@_document_text_coercion("text", ":func:`.text`", ":paramref:`.text.text`")
def _create_text(
self, text, bind=None, bindparams=None, typemap=None, autocommit=None
):
r"""Construct a new :class:`_expression.TextClause` clause,
representing
a textual SQL string directly.
E.g.::
from sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The advantages :func:`_expression.text`
provides over a plain string are
backend-neutral support for bind parameters, per-statement
execution options, as well as
bind parameter and result-column typing behavior, allowing
SQLAlchemy type constructs to play a role when executing
a statement that is specified literally. The construct can also
be provided with a ``.c`` collection of column elements, allowing
it to be embedded in other SQL expression constructs as a subquery.
Bind parameters are specified by name, using the format ``:name``.
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
For SQL statements where a colon is required verbatim, as within
an inline string, use a backslash to escape::
t = text("SELECT * FROM users WHERE name='\:username'")
The :class:`_expression.TextClause`
construct includes methods which can
provide information about the bound parameters as well as the column
values which would be returned from the textual statement, assuming
it's an executable SELECT type of statement. The
:meth:`_expression.TextClause.bindparams`
method is used to provide bound
parameter detail, and :meth:`_expression.TextClause.columns`
method allows
specification of return columns including names and types::
t = text("SELECT * FROM users WHERE id=:user_id").\
bindparams(user_id=7).\
columns(id=Integer, name=String)
for id, name in connection.execute(t):
print(id, name)
The :func:`_expression.text` construct is used in cases when
a literal string SQL fragment is specified as part of a larger query,
such as for the WHERE clause of a SELECT statement::
s = select([users.c.id, users.c.name]).where(text("id=:user_id"))
result = connection.execute(s, user_id=12)
:func:`_expression.text` is also used for the construction
of a full, standalone statement using plain text.
As such, SQLAlchemy refers
to it as an :class:`.Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`_expression.text`
construct that should be subject to "autocommit"
can be set explicitly so using the
:paramref:`.Connection.execution_options.autocommit` option::
t = text("EXEC my_procedural_thing()").\
execution_options(autocommit=True)
Note that SQLAlchemy's usual "autocommit" behavior applies to
:func:`_expression.text` constructs implicitly - that is,
statements which begin
with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
or a variety of other phrases specific to certain backends, will
be eligible for autocommit if no transaction is in progress.
:param text:
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
:param autocommit: whether or not to set the "autocommit" execution
option for this :class:`_expression.TextClause` object.
:param bind:
an optional connection or engine to be used for this text query.
:param bindparams:
A list of :func:`.bindparam` instances used to
provide information about parameters embedded in the statement.
E.g.::
stmt = text("SELECT * FROM table WHERE id=:id",
bindparams=[bindparam('id', value=5, type_=Integer)])
:param typemap:
A dictionary mapping the names of columns represented in the columns
clause of a ``SELECT`` statement to type objects.
E.g.::
stmt = text("SELECT * FROM table",
typemap={'id': Integer, 'name': String},
)
.. seealso::
:ref:`sqlexpression_text` - in the Core tutorial
:ref:`orm_tutorial_literal_sql` - in the ORM tutorial
"""
stmt = TextClause(text, bind=bind)
if bindparams:
stmt = stmt.bindparams(*bindparams)
if typemap:
stmt = stmt.columns(**typemap)
if autocommit is not None:
stmt = stmt.execution_options(autocommit=autocommit)
return stmt
@_generative
def bindparams(self, *binds, **names_to_values):
"""Establish the values and/or types of bound parameters within
this :class:`_expression.TextClause` construct.
Given a text construct such as::
from sqlalchemy import text
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
the :meth:`_expression.TextClause.bindparams`
method can be used to establish
the initial value of ``:name`` and ``:timestamp``,
using simple keyword arguments::
stmt = stmt.bindparams(name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
Where above, new :class:`.BindParameter` objects
will be generated with the names ``name`` and ``timestamp``, and
values of ``jack`` and ``datetime.datetime(2012, 10, 8, 15, 12, 5)``,
respectively. The types will be
inferred from the values given, in this case :class:`.String` and
:class:`.DateTime`.
When specific typing behavior is needed, the positional ``*binds``
argument can be used in which to specify :func:`.bindparam` constructs
directly. These constructs must include at least the ``key``
argument, then an optional value and type::
from sqlalchemy import bindparam
stmt = stmt.bindparams(
bindparam('name', value='jack', type_=String),
bindparam('timestamp', type_=DateTime)
)
Above, we specified the type of :class:`.DateTime` for the
``timestamp`` bind, and the type of :class:`.String` for the ``name``
bind. In the case of ``name`` we also set the default value of
``"jack"``.
Additional bound parameters can be supplied at statement execution
time, e.g.::
result = connection.execute(stmt,
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
The :meth:`_expression.TextClause.bindparams`
method can be called repeatedly,
where it will re-use existing :class:`.BindParameter` objects to add
new information. For example, we can call
:meth:`_expression.TextClause.bindparams`
first with typing information, and a
second time with value information, and it will be combined::
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
stmt = stmt.bindparams(
bindparam('name', type_=String),
bindparam('timestamp', type_=DateTime)
)
stmt = stmt.bindparams(
name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
)
The :meth:`_expression.TextClause.bindparams`
method also supports the concept of
**unique** bound parameters. These are parameters that are
"uniquified" on name at statement compilation time, so that multiple
:func:`_expression.text`
constructs may be combined together without the names
conflicting. To use this feature, specify the
:paramref:`.BindParameter.unique` flag on each :func:`.bindparam`
object::
stmt1 = text("select id from table where name=:name").bindparams(
bindparam("name", value='name1', unique=True)
)
stmt2 = text("select id from table where name=:name").bindparams(
bindparam("name", value='name2', unique=True)
)
union = union_all(
stmt1.columns(column("id")),
stmt2.columns(column("id"))
)
The above statement will render as::
select id from table where name=:name_1
UNION ALL select id from table where name=:name_2
.. versionadded:: 1.3.11 Added support for the
:paramref:`.BindParameter.unique` flag to work with
:func:`_expression.text`
constructs.
"""
self._bindparams = new_params = self._bindparams.copy()
for bind in binds:
try:
# the regex used for text() currently will not match
# a unique/anonymous key in any case, so use the _orig_key
# so that a text() construct can support unique parameters
existing = new_params[bind._orig_key]
except KeyError as err:
util.raise_(
exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % bind._orig_key
),
replace_context=err,
)
else:
new_params[existing._orig_key] = bind
for key, value in names_to_values.items():
try:
existing = new_params[key]
except KeyError as err:
util.raise_(
exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % key
),
replace_context=err,
)
else:
new_params[key] = existing._with_value(value)
@util.dependencies("sqlalchemy.sql.selectable")
def columns(self, selectable, *cols, **types):
"""Turn this :class:`_expression.TextClause` object into a
:class:`.TextAsFrom`
object that can be embedded into another statement.
This function essentially bridges the gap between an entirely
textual SELECT statement and the SQL expression language concept
of a "selectable"::
from sqlalchemy.sql import column, text
stmt = text("SELECT id, name FROM some_table")
stmt = stmt.columns(column('id'), column('name')).alias('st')
stmt = select([mytable]).\
select_from(
mytable.join(stmt, mytable.c.name == stmt.c.name)
).where(stmt.c.id > 5)
Above, we pass a series of :func:`_expression.column` elements to the
:meth:`_expression.TextClause.columns` method positionally. These
:func:`_expression.column`
elements now become first class elements upon the :attr:`.TextAsFrom.c`
column collection, just like any other selectable.
The column expressions we pass to
:meth:`_expression.TextClause.columns` may
also be typed; when we do so, these :class:`.TypeEngine` objects become
the effective return type of the column, so that SQLAlchemy's
result-set-processing systems may be used on the return values.
This is often needed for types such as date or boolean types, as well
as for unicode processing on some dialect configurations::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
column('id', Integer),
column('name', Unicode),
column('timestamp', DateTime)
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
As a shortcut to the above syntax, keyword arguments referring to
types alone may be used, if only type conversion is needed::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
id=Integer,
name=Unicode,
timestamp=DateTime
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
The positional form of :meth:`_expression.TextClause.columns`
also provides the
unique feature of **positional column targeting**, which is
particularly useful when using the ORM with complex textual queries. If
we specify the columns from our model to
:meth:`_expression.TextClause.columns`,
the result set will match to those columns positionally, meaning the
name or origin of the column in the textual SQL doesn't matter::
stmt = text("SELECT users.id, addresses.id, users.id, "
"users.name, addresses.email_address AS email "
"FROM users JOIN addresses ON users.id=addresses.user_id "
"WHERE users.id = 1").columns(
User.id,
Address.id,
Address.user_id,
User.name,
Address.email_address
)
query = session.query(User).from_statement(stmt).options(
contains_eager(User.addresses))
.. versionadded:: 1.1 the :meth:`_expression.TextClause.columns`
method now
offers positional column targeting in the result set when
the column expressions are passed purely positionally.
The :meth:`_expression.TextClause.columns` method provides a direct
route to calling :meth:`_expression.FromClause.alias` as well as
:meth:`_expression.SelectBase.cte`
against a textual SELECT statement::
stmt = stmt.columns(id=Integer, name=String).cte('st')
stmt = select([sometable]).where(sometable.c.id == stmt.c.id)
.. versionadded:: 0.9.0 :func:`_expression.text`
can now be converted into a
fully featured "selectable" construct using the
:meth:`_expression.TextClause.columns` method.
"""
positional_input_cols = [
ColumnClause(col.key, types.pop(col.key))
if col.key in types
else col
for col in cols
]
keyed_input_cols = [
ColumnClause(key, type_) for key, type_ in types.items()
]
return selectable.TextAsFrom(
self,
positional_input_cols + keyed_input_cols,
positional=bool(positional_input_cols) and not keyed_input_cols,
)
@property
def type(self):
return type_api.NULLTYPE
@property
def comparator(self):
return self.type.comparator_factory(self)
def self_group(self, against=None):
if against is operators.in_op:
return Grouping(self)
else:
return self
def _copy_internals(self, clone=_clone, **kw):
self._bindparams = dict(
(b.key, clone(b, **kw)) for b in self._bindparams.values()
)
def get_children(self, **kwargs):
return list(self._bindparams.values())
def compare(self, other):
return isinstance(other, TextClause) and other.text == self.text
class Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
:class:`.Null` is accessed as a constant via the
:func:`.null` function.
"""
__visit_name__ = "null"
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@classmethod
def _instance(cls):
"""Return a constant :class:`.Null` construct."""
return Null()
def compare(self, other):
return isinstance(other, Null)
class False_(ColumnElement):
"""Represent the ``false`` keyword, or equivalent, in a SQL statement.
:class:`.False_` is accessed as a constant via the
:func:`.false` function.
"""
__visit_name__ = "false"
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return True_()
@classmethod
def _instance(cls):
"""Return a :class:`.False_` construct.
E.g.::
>>> from sqlalchemy import false
>>> print(select([t.c.x]).where(false()))
SELECT x FROM t WHERE false
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print(select([t.c.x]).where(false()))
SELECT x FROM t WHERE 0 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print(select([t.c.x]).where(or_(t.c.x > 5, true())))
SELECT x FROM t WHERE true
>>> print(select([t.c.x]).where(and_(t.c.x > 5, false())))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.true`
"""
return False_()
def compare(self, other):
return isinstance(other, False_)
class True_(ColumnElement):
"""Represent the ``true`` keyword, or equivalent, in a SQL statement.
:class:`.True_` is accessed as a constant via the
:func:`.true` function.
"""
__visit_name__ = "true"
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return False_()
@classmethod
def _ifnone(cls, other):
if other is None:
return cls._instance()
else:
return other
@classmethod
def _instance(cls):
"""Return a constant :class:`.True_` construct.
E.g.::
>>> from sqlalchemy import true
>>> print(select([t.c.x]).where(true()))
SELECT x FROM t WHERE true
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print(select([t.c.x]).where(true()))
SELECT x FROM t WHERE 1 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print(select([t.c.x]).where(or_(t.c.x > 5, true())))
SELECT x FROM t WHERE true
>>> print(select([t.c.x]).where(and_(t.c.x > 5, false())))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.false`
"""
return True_()
def compare(self, other):
return isinstance(other, True_)
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = "clauselist"
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop("operator", operators.comma_op)
self.group = kwargs.pop("group", True)
self.group_contents = kwargs.pop("group_contents", True)
self._tuple_values = kwargs.pop("_tuple_values", False)
text_converter = kwargs.pop(
"_literal_as_text", _expression_literal_as_text
)
if self.group_contents:
self.clauses = [
text_converter(clause).self_group(against=self.operator)
for clause in clauses
]
else:
self.clauses = [text_converter(clause) for clause in clauses]
self._is_implicitly_boolean = operators.is_boolean(self.operator)
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
if self.group_contents:
self.clauses.append(
_literal_as_text(clause).self_group(against=self.operator)
)
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone, **kw):
self.clauses = [clone(clause, **kw) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`.ClauseList` to the given :class:`.ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif (
isinstance(other, ClauseList)
and len(self.clauses) == len(other.clauses)
and self.operator is other.operator
):
if self.operator in (operators.and_, operators.or_):
completed = set()
for clause in self.clauses:
for other_clause in set(other.clauses).difference(
completed
):
if clause.compare(other_clause, **kw):
completed.add(other_clause)
break
return len(completed) == len(other.clauses)
else:
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return True
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = "clauselist"
_tuple_values = False
def __init__(self, *arg, **kw):
raise NotImplementedError(
"BooleanClauseList has a private constructor"
)
@classmethod
def _construct(cls, operator, continue_on, skip_on, *clauses, **kw):
convert_clauses = []
clauses = [
_expression_literal_as_text(clause)
for clause in util.coerce_generator_arg(clauses)
]
for clause in clauses:
if isinstance(clause, continue_on):
continue
elif isinstance(clause, skip_on):
return clause.self_group(against=operators._asbool)
convert_clauses.append(clause)
if len(convert_clauses) == 1:
return convert_clauses[0].self_group(against=operators._asbool)
elif not convert_clauses and clauses:
return clauses[0].self_group(against=operators._asbool)
convert_clauses = [
c.self_group(against=operator) for c in convert_clauses
]
self = cls.__new__(cls)
self.clauses = convert_clauses
self.group = True
self.operator = operator
self.group_contents = True
self.type = type_api.BOOLEANTYPE
self._is_implicitly_boolean = True
return self
@classmethod
def and_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``AND``.
E.g.::
from sqlalchemy import and_
stmt = select([users_table]).where(
and_(
users_table.c.name == 'wendy',
users_table.c.enrolled == True
)
)
The :func:`.and_` conjunction is also available using the
Python ``&`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') &
(users_table.c.enrolled == True)
)
The :func:`.and_` operation is also implicit in some cases;
the :meth:`_expression.Select.where`
method for example can be invoked multiple
times against a statement, which will have the effect of each
clause being combined using :func:`.and_`::
stmt = select([users_table]).\
where(users_table.c.name == 'wendy').\
where(users_table.c.enrolled == True)
.. seealso::
:func:`.or_`
"""
return cls._construct(operators.and_, True_, False_, *clauses)
@classmethod
def or_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``OR``.
E.g.::
from sqlalchemy import or_
stmt = select([users_table]).where(
or_(
users_table.c.name == 'wendy',
users_table.c.name == 'jack'
)
)
The :func:`.or_` conjunction is also available using the
Python ``|`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') |
(users_table.c.name == 'jack')
)
.. seealso::
:func:`.and_`
"""
return cls._construct(operators.or_, False_, True_, *clauses)
@property
def _select_iterable(self):
return (self,)
def self_group(self, against=None):
if not self.clauses:
return self
else:
return super(BooleanClauseList, self).self_group(against=against)
def _negate(self):
return ClauseList._negate(self)
and_ = BooleanClauseList.and_
or_ = BooleanClauseList.or_
class Tuple(ClauseList, ColumnElement):
"""Represent a SQL tuple."""
def __init__(self, *clauses, **kw):
"""Return a :class:`.Tuple`.
Main usage is to produce a composite IN construct using
:meth:`.ColumnOperators.in_` ::
from sqlalchemy import tuple_
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
.. versionchanged:: 1.3.6 Added support for SQLite IN tuples.
.. warning::
The composite IN construct is not supported by all backends, and is
currently known to work on PostgreSQL, MySQL, and SQLite.
Unsupported backends will raise a subclass of
:class:`~sqlalchemy.exc.DBAPIError` when such an expression is
invoked.
"""
clauses = [_literal_as_binds(c) for c in clauses]
self._type_tuple = [arg.type for arg in clauses]
self.type = kw.pop(
"type_",
self._type_tuple[0] if self._type_tuple else type_api.NULLTYPE,
)
super(Tuple, self).__init__(*clauses, **kw)
@property
def _select_iterable(self):
return (self,)
def _bind_param(self, operator, obj, type_=None):
return Tuple(
*[
BindParameter(
None,
o,
_compared_to_operator=operator,
_compared_to_type=compared_to_type,
unique=True,
type_=type_,
)
for o, compared_to_type in zip(obj, self._type_tuple)
]
).self_group()
class Case(ColumnElement):
"""Represent a ``CASE`` expression.
:class:`.Case` is produced using the :func:`.case` factory function,
as in::
from sqlalchemy import case
stmt = select([users_table]).\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
Details on :class:`.Case` usage is at :func:`.case`.
.. seealso::
:func:`.case`
"""
__visit_name__ = "case"
def __init__(self, whens, value=None, else_=None):
r"""Produce a ``CASE`` expression.
The ``CASE`` construct in SQL is a conditional object that
acts somewhat analogously to an "if/then" construct in other
languages. It returns an instance of :class:`.Case`.
:func:`.case` in its usual form is passed a list of "when"
constructs, that is, a list of conditions and results as tuples::
from sqlalchemy import case
stmt = select([users_table]).\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
The above statement will produce SQL resembling::
SELECT id, name FROM user
WHERE CASE
WHEN (name = :name_1) THEN :param_1
WHEN (name = :name_2) THEN :param_2
ELSE :param_3
END
When simple equality expressions of several values against a single
parent column are needed, :func:`.case` also has a "shorthand" format
used via the
:paramref:`.case.value` parameter, which is passed a column
expression to be compared. In this form, the :paramref:`.case.whens`
parameter is passed as a dictionary containing expressions to be
compared against keyed to result expressions. The statement below is
equivalent to the preceding statement::
stmt = select([users_table]).\
where(
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name,
else_='E'
)
)
The values which are accepted as result values in
:paramref:`.case.whens` as well as with :paramref:`.case.else_` are
coerced from Python literals into :func:`.bindparam` constructs.
SQL expressions, e.g. :class:`_expression.ColumnElement` constructs,
are accepted
as well. To coerce a literal string expression into a constant
expression rendered inline, use the :func:`_expression.literal_column`
construct,
as in::
from sqlalchemy import case, literal_column
case(
[
(
orderline.c.qty > 100,
literal_column("'greaterthan100'")
),
(
orderline.c.qty > 10,
literal_column("'greaterthan10'")
)
],
else_=literal_column("'lessthan10'")
)
The above will render the given constants without using bound
parameters for the result values (but still for the comparison
values), as in::
CASE
WHEN (orderline.qty > :qty_1) THEN 'greaterthan100'
WHEN (orderline.qty > :qty_2) THEN 'greaterthan10'
ELSE 'lessthan10'
END
:param whens: The criteria to be compared against,
:paramref:`.case.whens` accepts two different forms, based on
whether or not :paramref:`.case.value` is used.
In the first form, it accepts a list of 2-tuples; each 2-tuple
consists of ``(<sql expression>, <value>)``, where the SQL
expression is a boolean expression and "value" is a resulting value,
e.g.::
case([
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
])
In the second form, it accepts a Python dictionary of comparison
values mapped to a resulting value; this form requires
:paramref:`.case.value` to be present, and values will be compared
using the ``==`` operator, e.g.::
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name
)
:param value: An optional SQL expression which will be used as a
fixed "comparison point" for candidate values within a dictionary
passed to :paramref:`.case.whens`.
:param else\_: An optional SQL expression which will be the evaluated
result of the ``CASE`` construct if all expressions within
:paramref:`.case.whens` evaluate to false. When omitted, most
databases will produce a result of NULL if none of the "when"
expressions evaluate to true.
"""
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(), _literal_as_binds(r))
for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(), _literal_as_binds(r))
for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone, **kw):
if self.value is not None:
self.value = clone(self.value, **kw)
self.whens = [(clone(x, **kw), clone(y, **kw)) for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_, **kw)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(
itertools.chain(*[x._from_objects for x in self.get_children()])
)
def literal_column(text, type_=None):
r"""Produce a :class:`.ColumnClause` object that has the
:paramref:`_expression.column.is_literal` flag set to True.
:func:`_expression.literal_column` is similar to
:func:`_expression.column`, except that
it is more often used as a "standalone" column expression that renders
exactly as stated; while :func:`_expression.column`
stores a string name that
will be assumed to be part of a table and may be quoted as such,
:func:`_expression.literal_column` can be that,
or any other arbitrary column-oriented
expression.
:param text: the text of the expression; can be any SQL expression.
Quoting rules will not be applied. To specify a column-name expression
which should be subject to quoting rules, use the :func:`column`
function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine`
object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
.. seealso::
:func:`_expression.column`
:func:`_expression.text`
:ref:`sqlexpression_literal_column`
"""
return ColumnClause(text, type_=type_, is_literal=True)
class Cast(ColumnElement):
"""Represent a ``CAST`` expression.
:class:`.Cast` is produced using the :func:`.cast` factory function,
as in::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
Details on :class:`.Cast` usage is at :func:`.cast`.
.. seealso::
:ref:`coretutorial_casts`
:func:`.cast`
:func:`.type_coerce` - an alternative to CAST that coerces the type
on the Python side only, which is often sufficient to generate the
correct SQL and data coercion.
"""
__visit_name__ = "cast"
def __init__(self, expression, type_):
r"""Produce a ``CAST`` expression.
:func:`.cast` returns an instance of :class:`.Cast`.
E.g.::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
The above statement will produce SQL resembling::
SELECT CAST(unit_price AS NUMERIC(10, 4)) FROM product
The :func:`.cast` function performs two distinct functions when
used. The first is that it renders the ``CAST`` expression within
the resulting SQL string. The second is that it associates the given
type (e.g. :class:`.TypeEngine` class or instance) with the column
expression on the Python side, which means the expression will take
on the expression operator behavior associated with that type,
as well as the bound-value handling and result-row-handling behavior
of the type.
.. versionchanged:: 0.9.0 :func:`.cast` now applies the given type
to the expression such that it takes effect on the bound-value,
e.g. the Python-to-database direction, in addition to the
result handling, e.g. database-to-Python, direction.
An alternative to :func:`.cast` is the :func:`.type_coerce` function.
This function performs the second task of associating an expression
with a specific type, but does not render the ``CAST`` expression
in SQL.
:param expression: A SQL expression, such as a
:class:`_expression.ColumnElement`
expression or a Python string which will be coerced into a bound
literal value.
:param type\_: A :class:`.TypeEngine` class or instance indicating
the type to which the ``CAST`` should apply.
.. seealso::
:ref:`coretutorial_casts`
:func:`.type_coerce` - an alternative to CAST that coerces the type
on the Python side only, which is often sufficient to generate the
correct SQL and data coercion.
"""
self.type = type_api.to_instance(type_)
self.clause = _literal_as_binds(expression, type_=self.type)
self.typeclause = TypeClause(self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.typeclause = clone(self.typeclause, **kw)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class TypeCoerce(ColumnElement):
"""Represent a Python-side type-coercion wrapper.
:class:`.TypeCoerce` supplies the :func:`_expression.type_coerce`
function; see that function for usage details.
.. versionchanged:: 1.1 The :func:`.type_coerce` function now produces
a persistent :class:`.TypeCoerce` wrapper object rather than
translating the given object in place.
.. seealso::
:func:`_expression.type_coerce`
:func:`.cast`
"""
__visit_name__ = "type_coerce"
def __init__(self, expression, type_):
r"""Associate a SQL expression with a particular type, without rendering
``CAST``.
E.g.::
from sqlalchemy import type_coerce
stmt = select([type_coerce(log_table.date_string, StringDateTime())])
The above construct will produce a :class:`.TypeCoerce` object, which
does not modify the rendering in any way on the SQL side, with the
possible exception of a generated label if used in a columns clause
context::
SELECT date_string AS anon_1 FROM log
When result rows are fetched, the ``StringDateTime`` type processor
will be applied to result rows on behalf of the ``date_string`` column.
.. note:: the :func:`.type_coerce` construct does not render any
SQL syntax of its own, including that it does not imply
parenthesization. Please use :meth:`.TypeCoerce.self_group`
if explicit parenthesization is required.
In order to provide a named label for the expression, use
:meth:`_expression.ColumnElement.label`::
stmt = select([
type_coerce(log_table.date_string, StringDateTime()).label('date')
])
A type that features bound-value handling will also have that behavior
take effect when literal values or :func:`.bindparam` constructs are
passed to :func:`.type_coerce` as targets.
For example, if a type implements the
:meth:`.TypeEngine.bind_expression`
method or :meth:`.TypeEngine.bind_processor` method or equivalent,
these functions will take effect at statement compilation/execution
time when a literal value is passed, as in::
# bound-value handling of MyStringType will be applied to the
# literal value "some string"
stmt = select([type_coerce("some string", MyStringType)])
When using :func:`.type_coerce` with composed expressions, note that
**parenthesis are not applied**. If :func:`.type_coerce` is being
used in an operator context where the parenthesis normally present from
CAST are necessary, use the :meth:`.TypeCoerce.self_group` method::
>>> some_integer = column("someint", Integer)
>>> some_string = column("somestr", String)
>>> expr = type_coerce(some_integer + 5, String) + some_string
>>> print(expr)
someint + :someint_1 || somestr
>>> expr = type_coerce(some_integer + 5, String).self_group() + some_string
>>> print(expr)
(someint + :someint_1) || somestr
:param expression: A SQL expression, such as a
:class:`_expression.ColumnElement`
expression or a Python string which will be coerced into a bound
literal value.
:param type\_: A :class:`.TypeEngine` class or instance indicating
the type to which the expression is coerced.
.. seealso::
:ref:`coretutorial_casts`
:func:`.cast`
""" # noqa
self.type = type_api.to_instance(type_)
self.clause = _literal_as_binds(expression, type_=self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.__dict__.pop("typed_expression", None)
def get_children(self, **kwargs):
return (self.clause,)
@property
def _from_objects(self):
return self.clause._from_objects
@util.memoized_property
def typed_expression(self):
if isinstance(self.clause, BindParameter):
bp = self.clause._clone()
bp.type = self.type
return bp
else:
return self.clause
def self_group(self, against=None):
grouped = self.clause.self_group(against=against)
if grouped is not self.clause:
return TypeCoerce(grouped, self.type)
else:
return self
class Extract(ColumnElement):
"""Represent a SQL EXTRACT clause, ``extract(field FROM expr)``."""
__visit_name__ = "extract"
def __init__(self, field, expr, **kwargs):
"""Return a :class:`.Extract` construct.
This is typically available as :func:`.extract`
as well as ``func.extract`` from the
:data:`.func` namespace.
"""
self.type = type_api.INTEGERTYPE
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone, **kw):
self.expr = clone(self.expr, **kw)
def get_children(self, **kwargs):
return (self.expr,)
@property
def _from_objects(self):
return self.expr._from_objects
class _label_reference(ColumnElement):
"""Wrap a column expression as it appears in a 'reference' context.
This expression is any that includes an _order_by_label_element,
which is a Label, or a DESC / ASC construct wrapping a Label.
The production of _label_reference() should occur when an expression
is added to this context; this includes the ORDER BY or GROUP BY of a
SELECT statement, as well as a few other places, such as the ORDER BY
within an OVER clause.
"""
__visit_name__ = "label_reference"
def __init__(self, element):
self.element = element
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return ()
class _textual_label_reference(ColumnElement):
__visit_name__ = "textual_label_reference"
def __init__(self, element):
self.element = element
@util.memoized_property
def _text_clause(self):
return TextClause._create_text(self.element)
class UnaryExpression(ColumnElement):
"""Define a 'unary' expression.
A unary expression has a single column expression
and an operator. The operator can be placed on the left
(where it is called the 'operator') or right (where it is called the
'modifier') of the column expression.
:class:`.UnaryExpression` is the basis for several unary operators
including those used by :func:`.desc`, :func:`.asc`, :func:`.distinct`,
:func:`.nullsfirst` and :func:`.nullslast`.
"""
__visit_name__ = "unary"
def __init__(
self,
element,
operator=None,
modifier=None,
type_=None,
negate=None,
wraps_column_expression=False,
):
self.operator = operator
self.modifier = modifier
self.element = element.self_group(
against=self.operator or self.modifier
)
self.type = type_api.to_instance(type_)
self.negate = negate
self.wraps_column_expression = wraps_column_expression
@classmethod
def _create_nullsfirst(cls, column):
"""Produce the ``NULLS FIRST`` modifier for an ``ORDER BY`` expression.
:func:`.nullsfirst` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullsfirst
stmt = select([users_table]).\
order_by(nullsfirst(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS FIRST
Like :func:`.asc` and :func:`.desc`, :func:`.nullsfirst` is typically
invoked from the column expression itself using
:meth:`_expression.ColumnElement.nullsfirst`,
rather than as its standalone
function version, as in::
stmt = (select([users_table]).
order_by(users_table.c.name.desc().nullsfirst())
)
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullslast`
:meth:`_expression.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.nullsfirst_op,
wraps_column_expression=False,
)
@classmethod
def _create_nullslast(cls, column):
"""Produce the ``NULLS LAST`` modifier for an ``ORDER BY`` expression.
:func:`.nullslast` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullslast
stmt = select([users_table]).\
order_by(nullslast(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS LAST
Like :func:`.asc` and :func:`.desc`, :func:`.nullslast` is typically
invoked from the column expression itself using
:meth:`_expression.ColumnElement.nullslast`,
rather than as its standalone
function version, as in::
stmt = select([users_table]).\
order_by(users_table.c.name.desc().nullslast())
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullsfirst`
:meth:`_expression.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.nullslast_op,
wraps_column_expression=False,
)
@classmethod
def _create_desc(cls, column):
"""Produce a descending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import desc
stmt = select([users_table]).order_by(desc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name DESC
The :func:`.desc` function is a standalone version of the
:meth:`_expression.ColumnElement.desc`
method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.desc())
:param column: A :class:`_expression.ColumnElement` (e.g.
scalar SQL expression)
with which to apply the :func:`.desc` operation.
.. seealso::
:func:`.asc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`_expression.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.desc_op,
wraps_column_expression=False,
)
@classmethod
def _create_asc(cls, column):
"""Produce an ascending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import asc
stmt = select([users_table]).order_by(asc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name ASC
The :func:`.asc` function is a standalone version of the
:meth:`_expression.ColumnElement.asc`
method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.asc())
:param column: A :class:`_expression.ColumnElement` (e.g.
scalar SQL expression)
with which to apply the :func:`.asc` operation.
.. seealso::
:func:`.desc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`_expression.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.asc_op,
wraps_column_expression=False,
)
@classmethod
def _create_distinct(cls, expr):
"""Produce an column-expression-level unary ``DISTINCT`` clause.
This applies the ``DISTINCT`` keyword to an individual column
expression, and is typically contained within an aggregate function,
as in::
from sqlalchemy import distinct, func
stmt = select([func.count(distinct(users_table.c.name))])
The above would produce an expression resembling::
SELECT COUNT(DISTINCT name) FROM user
The :func:`.distinct` function is also available as a column-level
method, e.g. :meth:`_expression.ColumnElement.distinct`, as in::
stmt = select([func.count(users_table.c.name.distinct())])
The :func:`.distinct` operator is different from the
:meth:`_expression.Select.distinct` method of
:class:`_expression.Select`,
which produces a ``SELECT`` statement
with ``DISTINCT`` applied to the result set as a whole,
e.g. a ``SELECT DISTINCT`` expression. See that method for further
information.
.. seealso::
:meth:`_expression.ColumnElement.distinct`
:meth:`_expression.Select.distinct`
:data:`.func`
"""
expr = _literal_as_binds(expr)
return UnaryExpression(
expr,
operator=operators.distinct_op,
type_=expr.type,
wraps_column_expression=False,
)
@property
def _order_by_label_element(self):
if self.modifier in (operators.desc_op, operators.asc_op):
return self.element._order_by_label_element
else:
return None
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return (self.element,)
def compare(self, other, **kw):
"""Compare this :class:`UnaryExpression` against the given
:class:`_expression.ClauseElement`."""
return (
isinstance(other, UnaryExpression)
and self.operator == other.operator
and self.modifier == other.modifier
and self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type,
wraps_column_expression=self.wraps_column_expression,
)
elif self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
type_=type_api.BOOLEANTYPE,
wraps_column_expression=self.wraps_column_expression,
negate=None,
)
else:
return ClauseElement._negate(self)
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
class CollectionAggregate(UnaryExpression):
"""Forms the basis for right-hand collection operator modifiers
ANY and ALL.
The ANY and ALL keywords are available in different ways on different
backends. On PostgreSQL, they only work for an ARRAY type. On
MySQL, they only work for subqueries.
"""
@classmethod
def _create_any(cls, expr):
"""Produce an ANY expression.
This may apply to an array type for some dialects (e.g. postgresql),
or to a subquery for others (e.g. mysql). e.g.::
# postgresql '5 = ANY (somearray)'
expr = 5 == any_(mytable.c.somearray)
# mysql '5 = ANY (SELECT value FROM table)'
expr = 5 == any_(select([table.c.value]))
.. versionadded:: 1.1
.. seealso::
:func:`_expression.all_`
"""
expr = _literal_as_binds(expr)
if expr.is_selectable and hasattr(expr, "as_scalar"):
expr = expr.as_scalar()
expr = expr.self_group()
return CollectionAggregate(
expr,
operator=operators.any_op,
type_=type_api.NULLTYPE,
wraps_column_expression=False,
)
@classmethod
def _create_all(cls, expr):
"""Produce an ALL expression.
This may apply to an array type for some dialects (e.g. postgresql),
or to a subquery for others (e.g. mysql). e.g.::
# postgresql '5 = ALL (somearray)'
expr = 5 == all_(mytable.c.somearray)
# mysql '5 = ALL (SELECT value FROM table)'
expr = 5 == all_(select([table.c.value]))
.. versionadded:: 1.1
.. seealso::
:func:`_expression.any_`
"""
expr = _literal_as_binds(expr)
if expr.is_selectable and hasattr(expr, "as_scalar"):
expr = expr.as_scalar()
expr = expr.self_group()
return CollectionAggregate(
expr,
operator=operators.all_op,
type_=type_api.NULLTYPE,
wraps_column_expression=False,
)
# operate and reverse_operate are hardwired to
# dispatch onto the type comparator directly, so that we can
# ensure "reversed" behavior.
def operate(self, op, *other, **kwargs):
if not operators.is_comparison(op):
raise exc.ArgumentError(
"Only comparison operators may be used with ANY/ALL"
)
kwargs["reverse"] = True
return self.comparator.operate(operators.mirror(op), *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
# comparison operators should never call reverse_operate
assert not operators.is_comparison(op)
raise exc.ArgumentError(
"Only comparison operators may be used with ANY/ALL"
)
class AsBoolean(UnaryExpression):
def __init__(self, element, operator, negate):
self.element = element
self.type = type_api.BOOLEANTYPE
self.operator = operator
self.negate = negate
self.modifier = None
self.wraps_column_expression = True
self._is_implicitly_boolean = element._is_implicitly_boolean
def self_group(self, against=None):
return self
def _negate(self):
if isinstance(self.element, (True_, False_)):
return self.element._negate()
else:
return AsBoolean(self.element, self.negate, self.operator)
class BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``.
A :class:`.BinaryExpression` is generated automatically
whenever two column expressions are used in a Python binary expression::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print(column('a') + column('b'))
a + b
"""
__visit_name__ = "binary"
_is_implicitly_boolean = True
"""Indicates that any database will know this is a boolean expression
even if the database does not have an explicit boolean datatype.
"""
def __init__(
self, left, right, operator, type_=None, negate=None, modifiers=None
):
# allow compatibility with libraries that
# refer to BinaryExpression directly and pass strings
if isinstance(operator, util.string_types):
operator = operators.custom_op(operator)
self._orig = (left, right)
self.left = left.self_group(against=operator)
self.right = right.self_group(against=operator)
self.operator = operator
self.type = type_api.to_instance(type_)
self.negate = negate
self._is_implicitly_boolean = operators.is_boolean(operator)
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __bool__(self):
if self.operator in (operator.eq, operator.ne):
return self.operator(hash(self._orig[0]), hash(self._orig[1]))
else:
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
@property
def is_comparison(self):
return operators.is_comparison(self.operator)
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`BinaryExpression` against the
given :class:`BinaryExpression`."""
return (
isinstance(other, BinaryExpression)
and self.operator == other.operator
and (
self.left.compare(other.left, **kw)
and self.right.compare(other.right, **kw)
or (
operators.is_commutative(self.operator)
and self.left.compare(other.right, **kw)
and self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=self.type,
modifiers=self.modifiers,
)
else:
return super(BinaryExpression, self)._negate()
class Slice(ColumnElement):
"""Represent SQL for a Python array-slice object.
This is not a specific SQL construct at this level, but
may be interpreted by specific dialects, e.g. PostgreSQL.
"""
__visit_name__ = "slice"
def __init__(self, start, stop, step):
self.start = start
self.stop = stop
self.step = step
self.type = type_api.NULLTYPE
def self_group(self, against=None):
assert against is operator.getitem
return self
class IndexExpression(BinaryExpression):
"""Represent the class of expressions that are like an "index" operation.
"""
pass
class Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = "grouping"
def __init__(self, element):
self.element = element
self.type = getattr(element, "type", type_api.NULLTYPE)
def self_group(self, against=None):
return self
@util.memoized_property
def _is_implicitly_boolean(self):
return self.element._is_implicitly_boolean
@property
def _key_label(self):
return self._label
@property
def _label(self):
return getattr(self.element, "_label", None) or self.anon_label
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return (self.element,)
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {"element": self.element, "type": self.type}
def __setstate__(self, state):
self.element = state["element"]
self.type = state["type"]
def compare(self, other, **kw):
return isinstance(other, Grouping) and self.element.compare(
other.element
)
RANGE_UNBOUNDED = util.symbol("RANGE_UNBOUNDED")
RANGE_CURRENT = util.symbol("RANGE_CURRENT")
class Over(ColumnElement):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. It's supported only by certain database
backends.
"""
__visit_name__ = "over"
order_by = None
partition_by = None
element = None
"""The underlying expression object to which this :class:`.Over`
object refers towards."""
def __init__(
self, element, partition_by=None, order_by=None, range_=None, rows=None
):
r"""Produce an :class:`.Over` object against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
:func:`_expression.over` is usually called using
the :meth:`.FunctionElement.over` method, e.g.::
func.row_number().over(order_by=mytable.c.some_column)
Would produce::
ROW_NUMBER() OVER(ORDER BY some_column)
Ranges are also possible using the :paramref:`.expression.over.range_`
and :paramref:`.expression.over.rows` parameters. These
mutually-exclusive parameters each accept a 2-tuple, which contains
a combination of integers and None::
func.row_number().over(
order_by=my_table.c.some_column, range_=(None, 0))
The above would produce::
ROW_NUMBER() OVER(ORDER BY some_column
RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)
A value of None indicates "unbounded", a
value of zero indicates "current row", and negative / positive
integers indicate "preceding" and "following":
* RANGE BETWEEN 5 PRECEDING AND 10 FOLLOWING::
func.row_number().over(order_by='x', range_=(-5, 10))
* ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW::
func.row_number().over(order_by='x', rows=(None, 0))
* RANGE BETWEEN 2 PRECEDING AND UNBOUNDED FOLLOWING::
func.row_number().over(order_by='x', range_=(-2, None))
* RANGE BETWEEN 1 FOLLOWING AND 3 FOLLOWING::
func.row_number().over(order_by='x', range_=(1, 3))
.. versionadded:: 1.1 support for RANGE / ROWS within a window
:param element: a :class:`.FunctionElement`, :class:`.WithinGroup`,
or other compatible construct.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
:param order_by: a column element or string, or a list
of such, that will be used as the ORDER BY clause
of the OVER construct.
:param range\_: optional range clause for the window. This is a
tuple value which can contain integer values or None, and will
render a RANGE BETWEEN PRECEDING / FOLLOWING clause
.. versionadded:: 1.1
:param rows: optional rows clause for the window. This is a tuple
value which can contain integer values or None, and will render
a ROWS BETWEEN PRECEDING / FOLLOWING clause.
.. versionadded:: 1.1
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.over` method.
.. seealso::
:data:`.expression.func`
:func:`_expression.within_group`
"""
self.element = element
if order_by is not None:
self.order_by = ClauseList(
*util.to_list(order_by),
_literal_as_text=_literal_as_label_reference
)
if partition_by is not None:
self.partition_by = ClauseList(
*util.to_list(partition_by),
_literal_as_text=_literal_as_label_reference
)
if range_:
self.range_ = self._interpret_range(range_)
if rows:
raise exc.ArgumentError(
"'range_' and 'rows' are mutually exclusive"
)
else:
self.rows = None
elif rows:
self.rows = self._interpret_range(rows)
self.range_ = None
else:
self.rows = self.range_ = None
def _interpret_range(self, range_):
if not isinstance(range_, tuple) or len(range_) != 2:
raise exc.ArgumentError("2-tuple expected for range/rows")
if range_[0] is None:
lower = RANGE_UNBOUNDED
else:
try:
lower = int(range_[0])
except ValueError as err:
util.raise_(
exc.ArgumentError(
"Integer or None expected for range value"
),
replace_context=err,
)
else:
if lower == 0:
lower = RANGE_CURRENT
if range_[1] is None:
upper = RANGE_UNBOUNDED
else:
try:
upper = int(range_[1])
except ValueError as err:
util.raise_(
exc.ArgumentError(
"Integer or None expected for range value"
),
replace_context=err,
)
else:
if upper == 0:
upper = RANGE_CURRENT
return lower, upper
@property
@util.deprecated(
"1.1",
"the :attr:`.Over.func` member of the :class:`.Over` "
"class is deprecated and will be removed in a future release. "
"Please refer to the :attr:`.Over.element` attribute.",
)
def func(self):
"""the element referred to by this :class:`.Over`
clause.
"""
return self.element
@util.memoized_property
def type(self):
return self.element.type
def get_children(self, **kwargs):
return [
c
for c in (self.element, self.partition_by, self.order_by)
if c is not None
]
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(
itertools.chain(
*[
c._from_objects
for c in (self.element, self.partition_by, self.order_by)
if c is not None
]
)
)
class WithinGroup(ColumnElement):
"""Represent a WITHIN GROUP (ORDER BY) clause.
This is a special operator against so-called
"ordered set aggregate" and "hypothetical
set aggregate" functions, including ``percentile_cont()``,
``rank()``, ``dense_rank()``, etc.
It's supported only by certain database backends, such as PostgreSQL,
Oracle and MS SQL Server.
The :class:`.WithinGroup` construct extracts its type from the
method :meth:`.FunctionElement.within_group_type`. If this returns
``None``, the function's ``.type`` is used.
"""
__visit_name__ = "withingroup"
order_by = None
def __init__(self, element, *order_by):
r"""Produce a :class:`.WithinGroup` object against a function.
Used against so-called "ordered set aggregate" and "hypothetical
set aggregate" functions, including :class:`.percentile_cont`,
:class:`.rank`, :class:`.dense_rank`, etc.
:func:`_expression.within_group` is usually called using
the :meth:`.FunctionElement.within_group` method, e.g.::
from sqlalchemy import within_group
stmt = select([
department.c.id,
func.percentile_cont(0.5).within_group(
department.c.salary.desc()
)
])
The above statement would produce SQL similar to
``SELECT department.id, percentile_cont(0.5)
WITHIN GROUP (ORDER BY department.salary DESC)``.
:param element: a :class:`.FunctionElement` construct, typically
generated by :data:`~.expression.func`.
:param \*order_by: one or more column elements that will be used
as the ORDER BY clause of the WITHIN GROUP construct.
.. versionadded:: 1.1
.. seealso::
:data:`.expression.func`
:func:`_expression.over`
"""
self.element = element
if order_by is not None:
self.order_by = ClauseList(
*util.to_list(order_by),
_literal_as_text=_literal_as_label_reference
)
def over(self, partition_by=None, order_by=None, range_=None, rows=None):
"""Produce an OVER clause against this :class:`.WithinGroup`
construct.
This function has the same signature as that of
:meth:`.FunctionElement.over`.
"""
return Over(
self,
partition_by=partition_by,
order_by=order_by,
range_=range_,
rows=rows,
)
@util.memoized_property
def type(self):
wgt = self.element.within_group_type(self)
if wgt is not None:
return wgt
else:
return self.element.type
def get_children(self, **kwargs):
return [c for c in (self.element, self.order_by) if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(
itertools.chain(
*[
c._from_objects
for c in (self.element, self.order_by)
if c is not None
]
)
)
class FunctionFilter(ColumnElement):
"""Represent a function FILTER clause.
This is a special operator against aggregate and window functions,
which controls which rows are passed to it.
It's supported only by certain database backends.
Invocation of :class:`.FunctionFilter` is via
:meth:`.FunctionElement.filter`::
func.count(1).filter(True)
.. versionadded:: 1.0.0
.. seealso::
:meth:`.FunctionElement.filter`
"""
__visit_name__ = "funcfilter"
criterion = None
def __init__(self, func, *criterion):
"""Produce a :class:`.FunctionFilter` object against a function.
Used against aggregate and window functions,
for database backends that support the "FILTER" clause.
E.g.::
from sqlalchemy import funcfilter
funcfilter(func.count(1), MyClass.name == 'some name')
Would produce "COUNT(1) FILTER (WHERE myclass.name = 'some name')".
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.filter` method.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.FunctionElement.filter`
"""
self.func = func
self.filter(*criterion)
def filter(self, *criterion):
"""Produce an additional FILTER against the function.
This method adds additional criteria to the initial criteria
set up by :meth:`.FunctionElement.filter`.
Multiple criteria are joined together at SQL render time
via ``AND``.
"""
for criterion in list(criterion):
criterion = _expression_literal_as_text(criterion)
if self.criterion is not None:
self.criterion = self.criterion & criterion
else:
self.criterion = criterion
return self
def over(self, partition_by=None, order_by=None, range_=None, rows=None):
"""Produce an OVER clause against this filtered function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
The expression::
func.rank().filter(MyClass.y > 5).over(order_by='x')
is shorthand for::
from sqlalchemy import over, funcfilter
over(funcfilter(func.rank(), MyClass.y > 5), order_by='x')
See :func:`_expression.over` for a full description.
"""
return Over(
self,
partition_by=partition_by,
order_by=order_by,
range_=range_,
rows=rows,
)
def self_group(self, against=None):
if operators.is_precedent(operators.filter_op, against):
return Grouping(self)
else:
return self
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in (self.func, self.criterion) if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.criterion is not None:
self.criterion = clone(self.criterion, **kw)
@property
def _from_objects(self):
return list(
itertools.chain(
*[
c._from_objects
for c in (self.func, self.criterion)
if c is not None
]
)
)
class Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
"""
__visit_name__ = "label"
def __init__(self, name, element, type_=None):
"""Return a :class:`Label` object for the
given :class:`_expression.ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:meth:`_expression.ColumnElement.label` method on
:class:`_expression.ColumnElement`.
:param name: label name
:param obj: a :class:`_expression.ColumnElement`.
"""
if isinstance(element, Label):
self._resolve_label = element._label
while isinstance(element, Label):
element = element.element
if name:
self.name = name
self._resolve_label = self.name
else:
self.name = _anonymous_label(
"%%(%d %s)s" % (id(self), getattr(element, "name", "anon"))
)
self.key = self._label = self._key_label = self.name
self._element = element
self._type = type_
self._proxies = [element]
def __reduce__(self):
return self.__class__, (self.name, self._element, self._type)
@util.memoized_property
def _is_implicitly_boolean(self):
return self.element._is_implicitly_boolean
@util.memoized_property
def _allow_label_resolve(self):
return self.element._allow_label_resolve
@property
def _order_by_label_element(self):
return self
@util.memoized_property
def type(self):
return type_api.to_instance(
self._type or getattr(self._element, "type", None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
return self._apply_to_inner(self._element.self_group, against=against)
def _negate(self):
return self._apply_to_inner(self._element._negate)
def _apply_to_inner(self, fn, *arg, **kw):
sub_element = fn(*arg, **kw)
if sub_element is not self._element:
return Label(self.name, sub_element, type_=self._type)
else:
return self
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def get_children(self, **kwargs):
return (self.element,)
def _copy_internals(self, clone=_clone, anonymize_labels=False, **kw):
self._element = clone(self._element, **kw)
self.__dict__.pop("element", None)
self.__dict__.pop("_allow_label_resolve", None)
if anonymize_labels:
self.name = self._resolve_label = _anonymous_label(
"%%(%d %s)s"
% (id(self), getattr(self.element, "name", "anon"))
)
self.key = self._label = self._key_label = self.name
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name=None, **kw):
e = self.element._make_proxy(
selectable,
name=name if name else self.name,
disallow_is_literal=True,
)
e._proxies.append(self)
if self._type is not None:
e.type = self._type
return e
class ColumnClause(Immutable, ColumnElement):
"""Represents a column expression from any textual string.
The :class:`.ColumnClause`, a lightweight analogue to the
:class:`_schema.Column` class, is typically invoked using the
:func:`_expression.column` function, as in::
from sqlalchemy import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
:class:`.ColumnClause` is the immediate superclass of the schema-specific
:class:`_schema.Column` object. While the :class:`_schema.Column`
class has all the
same capabilities as :class:`.ColumnClause`, the :class:`.ColumnClause`
class is usable by itself in those cases where behavioral requirements
are limited to simple SQL expression generation. The object has none of
the associations with schema-level metadata or with execution-time
behavior that :class:`_schema.Column` does,
so in that sense is a "lightweight"
version of :class:`_schema.Column`.
Full details on :class:`.ColumnClause` usage is at
:func:`_expression.column`.
.. seealso::
:func:`_expression.column`
:class:`_schema.Column`
"""
__visit_name__ = "column"
onupdate = default = server_default = server_onupdate = None
_is_multiparam_column = False
_memoized_property = util.group_expirable_memoized_property()
def __init__(self, text, type_=None, is_literal=False, _selectable=None):
"""Produce a :class:`.ColumnClause` object.
The :class:`.ColumnClause` is a lightweight analogue to the
:class:`_schema.Column` class. The :func:`_expression.column`
function can
be invoked with just a name alone, as in::
from sqlalchemy import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
Once constructed, :func:`_expression.column`
may be used like any other SQL
expression element such as within :func:`_expression.select`
constructs::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The text handled by :func:`_expression.column`
is assumed to be handled
like the name of a database column; if the string contains mixed case,
special characters, or matches a known reserved word on the target
backend, the column expression will render using the quoting
behavior determined by the backend. To produce a textual SQL
expression that is rendered exactly without any quoting,
use :func:`_expression.literal_column` instead,
or pass ``True`` as the
value of :paramref:`_expression.column.is_literal`. Additionally,
full SQL
statements are best handled using the :func:`_expression.text`
construct.
:func:`_expression.column` can be used in a table-like
fashion by combining it with the :func:`.table` function
(which is the lightweight analogue to :class:`_schema.Table`
) to produce
a working table construct with minimal boilerplate::
from sqlalchemy import table, column, select
user = table("user",
column("id"),
column("name"),
column("description"),
)
stmt = select([user.c.description]).where(user.c.name == 'wendy')
A :func:`_expression.column` / :func:`.table`
construct like that illustrated
above can be created in an
ad-hoc fashion and is not associated with any
:class:`_schema.MetaData`, DDL, or events, unlike its
:class:`_schema.Table` counterpart.
.. versionchanged:: 1.0.0 :func:`_expression.column` can now
be imported from the plain ``sqlalchemy`` namespace like any
other SQL element.
:param text: the text of the element.
:param type: :class:`_types.TypeEngine` object which can associate
this :class:`.ColumnClause` with a type.
:param is_literal: if True, the :class:`.ColumnClause` is assumed to
be an exact expression that will be delivered to the output with no
quoting rules applied regardless of case sensitive settings. the
:func:`_expression.literal_column()` function essentially invokes
:func:`_expression.column` while passing ``is_literal=True``.
.. seealso::
:class:`_schema.Column`
:func:`_expression.literal_column`
:func:`.table`
:func:`_expression.text`
:ref:`sqlexpression_literal_column`
"""
self.key = self.name = text
self.table = _selectable
self.type = type_api.to_instance(type_)
self.is_literal = is_literal
def _compare_name_for_result(self, other):
if (
self.is_literal
or self.table is None
or self.table._textual
or not hasattr(other, "proxy_set")
or (
isinstance(other, ColumnClause)
and (
other.is_literal
or other.table is None
or other.table._textual
)
)
):
return (hasattr(other, "name") and self.name == other.name) or (
hasattr(other, "_label") and self._label == other._label
)
else:
return other.proxy_set.intersection(self.proxy_set)
def _get_table(self):
return self.__dict__["table"]
def _set_table(self, table):
self._memoized_property.expire_instance(self)
self.__dict__["table"] = table
table = property(_get_table, _set_table)
@_memoized_property
def _from_objects(self):
t = self.table
if t is not None:
return [t]
else:
return []
@util.memoized_property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode("ascii", "backslashreplace")
@_memoized_property
def _key_label(self):
if self.key != self.name:
return self._gen_label(self.key)
else:
return self._label
@_memoized_property
def _label(self):
return self._gen_label(self.name)
@_memoized_property
def _render_label_in_columns_clause(self):
return self.table is not None
@property
def _ddl_label(self):
return self._gen_label(self.name, dedupe_on_key=False)
def _gen_label(self, name, dedupe_on_key=True):
t = self.table
if self.is_literal:
return None
elif t is not None and t.named_with_column:
if getattr(t, "schema", None):
label = t.schema.replace(".", "_") + "_" + t.name + "_" + name
else:
label = t.name + "_" + name
# propagate name quoting rules for labels.
if getattr(name, "quote", None) is not None:
if isinstance(label, quoted_name):
label.quote = name.quote
else:
label = quoted_name(label, name.quote)
elif getattr(t.name, "quote", None) is not None:
# can't get this situation to occur, so let's
# assert false on it for now
assert not isinstance(label, quoted_name)
label = quoted_name(label, t.name.quote)
if dedupe_on_key:
# ensure the label name doesn't conflict with that of an
# existing column. note that this implies that any Column
# must **not** set up its _label before its parent table has
# all of its other Column objects set up. There are several
# tables in the test suite which will fail otherwise; example:
# table "owner" has columns "name" and "owner_name". Therefore
# column owner.name cannot use the label "owner_name", it has
# to be "owner_name_1".
if label in t.c:
_label = label
counter = 1
while _label in t.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return _as_truncated(label)
else:
return name
def _bind_param(self, operator, obj, type_=None):
return BindParameter(
self.key,
obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
type_=type_,
unique=True,
)
def _make_proxy(
self,
selectable,
name=None,
attach=True,
name_is_truncatable=False,
disallow_is_literal=False,
**kw
):
# the "is_literal" flag normally should never be propagated; a proxied
# column is always a SQL identifier and never the actual expression
# being evaluated. however, there is a case where the "is_literal" flag
# might be used to allow the given identifier to have a fixed quoting
# pattern already, so maintain the flag for the proxy unless a
# :class:`.Label` object is creating the proxy. See [ticket:4730].
is_literal = (
not disallow_is_literal
and self.is_literal
and (
# note this does not accommodate for quoted_name differences
# right now
name is None
or name == self.name
)
)
c = self._constructor(
_as_truncated(name or self.name)
if name_is_truncatable
else (name or self.name),
type_=self.type,
_selectable=selectable,
is_literal=is_literal,
)
if name is None:
c.key = self.key
c._proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = selectable._is_clone_of.columns.get(c.key)
if attach:
selectable._columns[c.key] = c
return c
class CollationClause(ColumnElement):
__visit_name__ = "collation"
def __init__(self, collation):
self.collation = collation
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = "identified"
_execution_options = Executable._execution_options.union(
{"autocommit": False}
)
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = "savepoint"
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = "rollback_to_savepoint"
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = "release_savepoint"
class quoted_name(util.MemoizedSlots, util.text_type):
"""Represent a SQL identifier combined with quoting preferences.
:class:`.quoted_name` is a Python unicode/str subclass which
represents a particular identifier name along with a
``quote`` flag. This ``quote`` flag, when set to
``True`` or ``False``, overrides automatic quoting behavior
for this identifier in order to either unconditionally quote
or to not quote the name. If left at its default of ``None``,
quoting behavior is applied to the identifier on a per-backend basis
based on an examination of the token itself.
A :class:`.quoted_name` object with ``quote=True`` is also
prevented from being modified in the case of a so-called
"name normalize" option. Certain database backends, such as
Oracle, Firebird, and DB2 "normalize" case-insensitive names
as uppercase. The SQLAlchemy dialects for these backends
convert from SQLAlchemy's lower-case-means-insensitive convention
to the upper-case-means-insensitive conventions of those backends.
The ``quote=True`` flag here will prevent this conversion from occurring
to support an identifier that's quoted as all lower case against
such a backend.
The :class:`.quoted_name` object is normally created automatically
when specifying the name for key schema constructs such as
:class:`_schema.Table`, :class:`_schema.Column`, and others.
The class can also be
passed explicitly as the name to any function that receives a name which
can be quoted. Such as to use the :meth:`_engine.Engine.has_table`
method with
an unconditionally quoted name::
from sqlalchemy import create_engine
from sqlalchemy.sql import quoted_name
engine = create_engine("oracle+cx_oracle://some_dsn")
engine.has_table(quoted_name("some_table", True))
The above logic will run the "has table" logic against the Oracle backend,
passing the name exactly as ``"some_table"`` without converting to
upper case.
.. versionadded:: 0.9.0
.. versionchanged:: 1.2 The :class:`.quoted_name` construct is now
importable from ``sqlalchemy.sql``, in addition to the previous
location of ``sqlalchemy.sql.elements``.
"""
__slots__ = "quote", "lower", "upper"
def __new__(cls, value, quote):
if value is None:
return None
# experimental - don't bother with quoted_name
# if quote flag is None. doesn't seem to make any dent
# in performance however
# elif not sprcls and quote is None:
# return value
elif isinstance(value, cls) and (
quote is None or value.quote == quote
):
return value
self = super(quoted_name, cls).__new__(cls, value)
self.quote = quote
return self
def __reduce__(self):
return quoted_name, (util.text_type(self), self.quote)
def _memoized_method_lower(self):
if self.quote:
return self
else:
return util.text_type(self).lower()
def _memoized_method_upper(self):
if self.quote:
return self
else:
return util.text_type(self).upper()
def __repr__(self):
if util.py2k:
backslashed = self.encode("ascii", "backslashreplace")
if not util.py2k:
backslashed = backslashed.decode("ascii")
return "'%s'" % backslashed
else:
return str.__repr__(self)
class _truncated_label(quoted_name):
"""A unicode subclass used to identify symbolic "
"names that may require truncation."""
__slots__ = ()
def __new__(cls, value, quote=None):
quote = getattr(value, "quote", quote)
# return super(_truncated_label, cls).__new__(cls, value, quote, True)
return super(_truncated_label, cls).__new__(cls, value, quote)
def __reduce__(self):
return self.__class__, (util.text_type(self), self.quote)
def apply_map(self, map_):
return self
class conv(_truncated_label):
"""Mark a string indicating that a name has already been converted
by a naming convention.
This is a string subclass that indicates a name that should not be
subject to any further naming conventions.
E.g. when we create a :class:`.Constraint` using a naming convention
as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name='x5'))
The name of the above constraint will be rendered as ``"ck_t_x5"``.
That is, the existing name ``x5`` is used in the naming convention as the
``constraint_name`` token.
In some situations, such as in migration scripts, we may be rendering
the above :class:`.CheckConstraint` with a name that's already been
converted. In order to make sure the name isn't double-modified, the
new name is applied using the :func:`_schema.conv` marker. We can
use this explicitly as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name=conv('ck_t_x5')))
Where above, the :func:`_schema.conv` marker indicates that the constraint
name here is final, and the name will render as ``"ck_t_x5"`` and not
``"ck_t_ck_t_x5"``
.. versionadded:: 0.9.4
.. seealso::
:ref:`constraint_naming_conventions`
"""
__slots__ = ()
class _defer_name(_truncated_label):
"""mark a name as 'deferred' for the purposes of automated name
generation.
"""
__slots__ = ()
def __new__(cls, value):
if value is None:
return _NONE_NAME
elif isinstance(value, conv):
return value
else:
return super(_defer_name, cls).__new__(cls, value)
def __reduce__(self):
return self.__class__, (util.text_type(self),)
class _defer_none_name(_defer_name):
"""indicate a 'deferred' name that was ultimately the value None."""
__slots__ = ()
_NONE_NAME = _defer_none_name("_unnamed_")
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
"""A unicode subclass used to identify anonymously
generated names."""
__slots__ = ()
def __add__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(self, util.text_type(other)), self.quote
)
)
def __radd__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(util.text_type(other), self), self.quote
)
)
def apply_map(self, map_):
if self.quote is not None:
# preserve quoting only if necessary
return quoted_name(self % map_, self.quote)
else:
# else skip the constructor call
return self % map_
def _as_truncated(value):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(value, _truncated_label):
return value
else:
return _truncated_label(value)
def _string_or_unprintable(element):
if isinstance(element, util.string_types):
return element
else:
try:
return str(element)
except Exception:
return "unprintable element %r" % element
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(
elem for elem in a if all_overlap.intersection(elem._cloned_set)
)
def _cloned_difference(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(
elem for elem in a if not all_overlap.intersection(elem._cloned_set)
)
@util.dependencies("sqlalchemy.sql.functions")
def _labeled(functions, element):
if not hasattr(element, "name") or isinstance(
element, functions.FunctionElement
):
return element.label(None)
else:
return element
def _is_column(col):
"""True if ``col`` is an instance of
:class:`_expression.ColumnElement`. """
return isinstance(col, ColumnElement)
def _find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
traverse(clause, {}, {"column": cols.add})
return cols
# there is some inconsistency here between the usage of
# inspect() vs. checking for Visitable and __clause_element__.
# Ideally all functions here would derive from inspect(),
# however the inspect() versions add significant callcount
# overhead for critical functions like _interpret_as_column_or_from().
# Generally, the column-based functions are more performance critical
# and are fine just checking for __clause_element__(). It is only
# _interpret_as_from() where we'd like to be able to receive ORM entities
# that have no defined namespace, hence inspect() is needed there.
def _column_as_key(element):
if isinstance(element, util.string_types):
return element
if hasattr(element, "__clause_element__"):
element = element.__clause_element__()
try:
return element.key
except AttributeError:
return None
def _clause_element_as_expr(element):
if hasattr(element, "__clause_element__"):
return element.__clause_element__()
else:
return element
def _literal_as_label_reference(element):
if isinstance(element, util.string_types):
return _textual_label_reference(element)
elif hasattr(element, "__clause_element__"):
element = element.__clause_element__()
return _literal_as_text(element)
def _literal_and_labels_as_label_reference(element):
if isinstance(element, util.string_types):
return _textual_label_reference(element)
elif hasattr(element, "__clause_element__"):
element = element.__clause_element__()
if (
isinstance(element, ColumnElement)
and element._order_by_label_element is not None
):
return _label_reference(element)
else:
return _literal_as_text(element)
def _expression_literal_as_text(element):
return _literal_as_text(element)
def _literal_as(element, text_fallback):
if isinstance(element, Visitable):
return element
elif hasattr(element, "__clause_element__"):
return element.__clause_element__()
elif isinstance(element, util.string_types):
return text_fallback(element)
elif isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
else:
raise exc.ArgumentError(
"SQL expression object expected, got object of type %r "
"instead" % type(element)
)
def _literal_as_text(element, allow_coercion_to_text=False):
if allow_coercion_to_text:
return _literal_as(element, TextClause)
else:
return _literal_as(element, _no_text_coercion)
def _literal_as_column(element):
return _literal_as(element, ColumnClause)
def _no_column_coercion(element):
element = str(element)
guess_is_literal = not _guess_straight_column.match(element)
raise exc.ArgumentError(
"Textual column expression %(column)r should be "
"explicitly declared with text(%(column)r), "
"or use %(literal_column)s(%(column)r) "
"for more specificity"
% {
"column": util.ellipses_string(element),
"literal_column": "literal_column"
if guess_is_literal
else "column",
}
)
def _no_text_coercion(
element, exc_cls=exc.ArgumentError, extra=None, err=None
):
util.raise_(
exc_cls(
"%(extra)sTextual SQL expression %(expr)r should be "
"explicitly declared as text(%(expr)r)"
% {
"expr": util.ellipses_string(element),
"extra": "%s " % extra if extra else "",
}
),
replace_context=err,
)
def _no_literals(element):
if hasattr(element, "__clause_element__"):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError(
"Ambiguous literal: %r. Use the 'text()' "
"function to indicate a SQL expression "
"literal, or 'literal()' to indicate a "
"bound value." % (element,)
)
else:
return element
def _is_literal(element):
return not isinstance(element, Visitable) and not hasattr(
element, "__clause_element__"
)
def _only_column_elements_or_none(element, name):
if element is None:
return None
else:
return _only_column_elements(element, name)
def _only_column_elements(element, name):
if hasattr(element, "__clause_element__"):
element = element.__clause_element__()
if not isinstance(element, ColumnElement):
raise exc.ArgumentError(
"Column-based expression object expected for argument "
"'%s'; got: '%s', type %s" % (name, element, type(element))
)
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, "__clause_element__"):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return Null()
else:
return BindParameter(name, element, type_=type_, unique=True)
else:
return element
_guess_straight_column = re.compile(r"^\w\S*$", re.I)
def _interpret_as_column_or_from(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, "__clause_element__"):
return element.__clause_element__()
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
elif hasattr(insp, "selectable"):
return insp.selectable
# be forgiving as this is an extremely common
# and known expression
if element == "*":
guess_is_literal = True
elif isinstance(element, (numbers.Number)):
return ColumnClause(str(element), is_literal=True)
else:
_no_column_coercion(element)
return ColumnClause(element, is_literal=guess_is_literal)
def _const_expr(element):
if isinstance(element, (Null, False_, True_)):
return element
elif element is None:
return Null()
elif element is False:
return False_()
elif element is True:
return True_()
else:
raise exc.ArgumentError("Expected None, False, or True")
def _type_from_args(args):
for a in args:
if not a.type._isnull:
return a.type
else:
return type_api.NULLTYPE
def _corresponding_column_or_error(fromclause, column, require_embedded=False):
c = fromclause.corresponding_column(
column, require_embedded=require_embedded
)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
% (column, getattr(column, "table", None), fromclause.description)
)
return c
class AnnotatedColumnElement(Annotated):
def __init__(self, element, values):
Annotated.__init__(self, element, values)
ColumnElement.comparator._reset(self)
for attr in ("name", "key", "table"):
if self.__dict__.get(attr, False) is None:
self.__dict__.pop(attr)
def _with_annotations(self, values):
clone = super(AnnotatedColumnElement, self)._with_annotations(values)
ColumnElement.comparator._reset(clone)
return clone
@util.memoized_property
def name(self):
"""pull 'name' from parent, if not present"""
return self._Annotated__element.name
@util.memoized_property
def table(self):
"""pull 'table' from parent, if not present"""
return self._Annotated__element.table
@util.memoized_property
def key(self):
"""pull 'key' from parent, if not present"""
return self._Annotated__element.key
@util.memoized_property
def info(self):
return self._Annotated__element.info
@util.memoized_property
def anon_label(self):
return self._Annotated__element.anon_label
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/sql/__init__.py
|
# sql/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .expression import Alias # noqa
from .expression import alias # noqa
from .expression import all_ # noqa
from .expression import and_ # noqa
from .expression import any_ # noqa
from .expression import asc # noqa
from .expression import between # noqa
from .expression import bindparam # noqa
from .expression import case # noqa
from .expression import cast # noqa
from .expression import ClauseElement # noqa
from .expression import collate # noqa
from .expression import column # noqa
from .expression import ColumnCollection # noqa
from .expression import ColumnElement # noqa
from .expression import CompoundSelect # noqa
from .expression import cte # noqa
from .expression import Delete # noqa
from .expression import delete # noqa
from .expression import desc # noqa
from .expression import distinct # noqa
from .expression import except_ # noqa
from .expression import except_all # noqa
from .expression import exists # noqa
from .expression import extract # noqa
from .expression import false # noqa
from .expression import False_ # noqa
from .expression import FromClause # noqa
from .expression import func # noqa
from .expression import funcfilter # noqa
from .expression import Insert # noqa
from .expression import insert # noqa
from .expression import intersect # noqa
from .expression import intersect_all # noqa
from .expression import Join # noqa
from .expression import join # noqa
from .expression import label # noqa
from .expression import lateral # noqa
from .expression import literal # noqa
from .expression import literal_column # noqa
from .expression import modifier # noqa
from .expression import not_ # noqa
from .expression import null # noqa
from .expression import nullsfirst # noqa
from .expression import nullslast # noqa
from .expression import or_ # noqa
from .expression import outerjoin # noqa
from .expression import outparam # noqa
from .expression import over # noqa
from .expression import quoted_name # noqa
from .expression import Select # noqa
from .expression import select # noqa
from .expression import Selectable # noqa
from .expression import subquery # noqa
from .expression import table # noqa
from .expression import TableClause # noqa
from .expression import TableSample # noqa
from .expression import tablesample # noqa
from .expression import text # noqa
from .expression import true # noqa
from .expression import True_ # noqa
from .expression import tuple_ # noqa
from .expression import type_coerce # noqa
from .expression import union # noqa
from .expression import union_all # noqa
from .expression import Update # noqa
from .expression import update # noqa
from .expression import within_group # noqa
from .visitors import ClauseVisitor # noqa
def __go(lcls):
global __all__
from .. import util as _sa_util
import inspect as _inspect
__all__ = sorted(
name
for name, obj in lcls.items()
if not (name.startswith("_") or _inspect.ismodule(obj))
)
from .annotation import _prepare_annotations
from .annotation import Annotated # noqa
from .elements import AnnotatedColumnElement
from .elements import ClauseList # noqa
from .selectable import AnnotatedFromClause # noqa
_prepare_annotations(ColumnElement, AnnotatedColumnElement)
_prepare_annotations(FromClause, AnnotatedFromClause)
_prepare_annotations(ClauseList, Annotated)
_sa_util.dependencies.resolve_all("sqlalchemy.sql")
from . import naming # noqa
__go(locals())
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.