index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
721,522
pydal.base
__init__
null
def __init__( self, uri="sqlite://dummy.db", pool_size=0, folder=None, db_codec="UTF-8", check_reserved=None, migrate=True, fake_migrate=False, migrate_enabled=True, fake_migrate_all=False, decode_credentials=False, driver_args=None, adapter_args=None, attempts=5, auto_import=False, bigint_id=False, debug=False, lazy_tables=False, db_uid=None, after_connection=None, tables=None, ignore_field_case=True, entity_quoting=True, table_hash=None, ): if uri == "<zombie>" and db_uid is not None: return super(DAL, self).__init__() if not issubclass(self.Rows, Rows): raise RuntimeError("`Rows` class must be a subclass of pydal.objects.Rows") if not issubclass(self.Row, Row): raise RuntimeError("`Row` class must be a subclass of pydal.objects.Row") from .drivers import DRIVERS, is_jdbc self._drivers_available = DRIVERS if not decode_credentials: credential_decoder = lambda cred: cred else: credential_decoder = lambda cred: unquote(cred) self._folder = folder if folder: self.set_folder(folder) self._uri = uri self._pool_size = pool_size self._db_codec = db_codec self._pending_references = {} self._request_tenant = "request_tenant" self._common_fields = [] self._referee_name = "%(table)s" self._bigint_id = bigint_id self._debug = debug self._migrated = [] self._LAZY_TABLES = {} self._lazy_tables = lazy_tables self._tables = SQLCallableList() self._aliased_tables = threading.local() self._driver_args = driver_args self._adapter_args = adapter_args self._check_reserved = check_reserved self._decode_credentials = decode_credentials self._attempts = attempts self._ignore_field_case = ignore_field_case if not str(attempts).isdigit() or attempts < 0: attempts = 5 if uri: uris = isinstance(uri, (list, tuple)) and uri or [uri] connected = False for k in range(attempts): for uri in uris: try: from .adapters import adapters if is_jdbc and not uri.startswith("jdbc:"): uri = "jdbc:" + uri self._dbname = REGEX_DBNAME.match(uri).group() # notice that driver args or {} else driver_args # defaults to {} global, not correct kwargs = dict( db=self, uri=uri, pool_size=pool_size, folder=folder, db_codec=db_codec, credential_decoder=credential_decoder, driver_args=driver_args or {}, adapter_args=adapter_args or {}, after_connection=after_connection, entity_quoting=entity_quoting, ) adapter = adapters.get_for(self._dbname) self._adapter = adapter(**kwargs) # self._adapter.ignore_field_case = ignore_field_case if bigint_id: self._adapter.dialect._force_bigints() # if there are multiple URIs to try in sequence, do not defer connection if len(uris) > 1: self._adapter.connector() connected = True break except SyntaxError: raise except Exception: tb = traceback.format_exc() self.logger.debug( "DEBUG: connect attempt %i, connection error:\n%s" % (k, tb) ) if connected: break else: time.sleep(1) if not connected: raise RuntimeError( "Failure to connect, tried %d times:\n%s" % (attempts, tb) ) else: self._adapter = NullAdapter( db=self, pool_size=0, uri="None", folder=folder, db_codec=db_codec, after_connection=after_connection, entity_quoting=entity_quoting, ) migrate = fake_migrate = False self.validators_method = None self.validators = None adapter = self._adapter self._uri_hash = table_hash or hashlib_md5(adapter.uri).hexdigest() if check_reserved: from .contrib.reserved_sql_keywords import ADAPTERS as RSK self.RSK = RSK self._migrate = migrate self._fake_migrate = fake_migrate self._migrate_enabled = migrate_enabled self._fake_migrate_all = fake_migrate_all if self.serializers is not None: for k, v in self.serializers.items(): serializers._custom_[k] = v if auto_import or tables: self.import_table_definitions(adapter.folder, tables=tables)
(self, uri='sqlite://dummy.db', pool_size=0, folder=None, db_codec='UTF-8', check_reserved=None, migrate=True, fake_migrate=False, migrate_enabled=True, fake_migrate_all=False, decode_credentials=False, driver_args=None, adapter_args=None, attempts=5, auto_import=False, bigint_id=False, debug=False, lazy_tables=False, db_uid=None, after_connection=None, tables=None, ignore_field_case=True, entity_quoting=True, table_hash=None)
721,570
pydal.objects
Field
null
class Field(Expression, Serializable): Virtual = FieldVirtual Method = FieldMethod Lazy = FieldMethod # for backward compatibility """ Represents a database field Example: Usage:: a = Field(name, 'string', length=32, default=None, required=False, requires=IS_NOT_EMPTY(), ondelete='CASCADE', notnull=False, unique=False, regex=None, options=None, uploadfield=True, widget=None, label=None, comment=None, uploadfield=True, # True means store on disk, # 'a_field_name' means store in this field in db # False means file content will be discarded. writable=True, readable=True, searchable=True, listable=True, update=None, authorize=None, autodelete=False, represent=None, uploadfolder=None, uploadseparate=False # upload to separate directories by uuid_keys # first 2 character and tablename.fieldname # False - old behavior # True - put uploaded file in # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] # directory) uploadfs=None # a pyfilesystem where to store upload ) to be used as argument of `DAL.define_table` """ def __init__( self, fieldname, type="string", length=None, default=DEFAULT, required=False, requires=DEFAULT, ondelete="CASCADE", onupdate="CASCADE", notnull=False, unique=False, uploadfield=True, widget=None, label=None, comment=None, writable=True, readable=True, searchable=True, listable=True, regex=None, options=None, update=None, authorize=None, autodelete=False, represent=None, uploadfolder=None, uploadseparate=False, uploadfs=None, compute=None, custom_store=None, custom_retrieve=None, custom_retrieve_file_properties=None, custom_delete=None, filter_in=None, filter_out=None, custom_qualifier=None, map_none=None, rname=None, **others, ): self._db = self.db = None # both for backward compatibility self.table = self._table = None self.op = None self.first = None self.second = None if PY2 and isinstance(fieldname, unicode): try: fieldname = str(fieldname) except UnicodeEncodeError: raise SyntaxError("Field: invalid unicode field name") self.name = fieldname = cleanup(fieldname) if ( not isinstance(fieldname, str) or hasattr(Table, fieldname) or not REGEX_VALID_TB_FLD.match(fieldname) or REGEX_PYTHON_KEYWORDS.match(fieldname) ): raise SyntaxError( "Field: invalid field name: %s, " 'use rname for "funny" names' % fieldname ) if not isinstance(type, (Table, Field)): self.type = type else: self.type = "reference %s" % type self.length = ( length if length is not None else DEFAULTLENGTH.get(self.type, 512) ) self.default = default if default is not DEFAULT else (update or None) self.required = required # is this field required self.ondelete = ondelete.upper() # this is for reference fields only self.onupdate = onupdate.upper() # this is for reference fields only self.notnull = notnull self.unique = unique # split to deal with decimal(,) self.regex = regex if not regex and isinstance(self.type, str): self.regex = DEFAULT_REGEX.get(self.type.split("(")[0]) self.options = options self.uploadfield = uploadfield self.uploadfolder = uploadfolder self.uploadseparate = uploadseparate self.uploadfs = uploadfs self.widget = widget self.comment = comment self.writable = writable self.readable = readable self.searchable = searchable self.listable = listable self.update = update self.authorize = authorize self.autodelete = autodelete self.represent = ( list_represent if represent is None and type in ("list:integer", "list:string") else represent ) self.compute = compute self.isattachment = True self.custom_store = custom_store self.custom_retrieve = custom_retrieve self.custom_retrieve_file_properties = custom_retrieve_file_properties self.custom_delete = custom_delete self.filter_in = filter_in self.filter_out = filter_out self.custom_qualifier = custom_qualifier self.label = label if label is not None else fieldname.replace("_", " ").title() self.requires = requires if requires is not None else [] self.map_none = map_none self._rname = self._raw_rname = rname stype = self.type if isinstance(self.type, SQLCustomType): stype = self.type.type self._itype = REGEX_TYPE.match(stype).group(0) if stype else None for key in others: setattr(self, key, others[key]) def bind(self, table): if self._table is not None: raise ValueError("Field %s is already bound to a table" % self.longname) self.db = self._db = table._db self.table = self._table = table self.tablename = self._tablename = table._tablename if self._db and self._rname is None: self._rname = self._db._adapter.sqlsafe_field(self.name) self._raw_rname = self.name def set_attributes(self, *args, **attributes): self.__dict__.update(*args, **attributes) return self def clone(self, point_self_references_to=False, **args): field = copy.copy(self) if point_self_references_to and self.type == "reference %s" % self._tablename: field.type = "reference %s" % point_self_references_to field.__dict__.update(args) field.db = field._db = None field.table = field._table = None field.tablename = field._tablename = None if self._db and self._rname == self._db._adapter.sqlsafe_field(self.name): # Reset the name because it may need to be requoted by bind() field._rname = field._raw_rname = None return field def store(self, file, filename=None, path=None): # make sure filename is a str sequence filename = "{}".format(filename) if self.custom_store: return self.custom_store(file, filename, path) if isinstance(file, cgi.FieldStorage): filename = filename or file.filename file = file.file elif not filename: filename = file.name filename = os.path.basename(filename.replace("/", os.sep).replace("\\", os.sep)) m = re.search(REGEX_UPLOAD_EXTENSION, filename) extension = m and m.group(1) or "txt" uuid_key = self._db.uuid().replace("-", "")[-16:] if self._db else uuidstr() encoded_filename = to_native(base64.urlsafe_b64encode(to_bytes(filename))) newfilename = "%s.%s.%s.%s" % ( getattr(self, "_tablename", "no_table"), self.name, uuid_key, encoded_filename, ) newfilename = ( newfilename[: (self.length - 1 - len(extension))] + "." + extension ) self_uploadfield = self.uploadfield if isinstance(self_uploadfield, Field): blob_uploadfield_name = self_uploadfield.uploadfield keys = { self_uploadfield.name: newfilename, blob_uploadfield_name: file.read(), } self_uploadfield.table.insert(**keys) elif self_uploadfield is True: if self.uploadfs: dest_file = self.uploadfs.open(text_type(newfilename), "wb") else: if path: pass elif self.uploadfolder: path = self.uploadfolder elif self.db._adapter.folder: path = pjoin(self.db._adapter.folder, "..", "uploads") else: raise RuntimeError( "you must specify a Field(..., uploadfolder=...)" ) if self.uploadseparate: if self.uploadfs: raise RuntimeError("not supported") path = pjoin( path, "%s.%s" % (self._tablename, self.name), uuid_key[:2] ) if not exists(path): os.makedirs(path) pathfilename = pjoin(path, newfilename) dest_file = open(pathfilename, "wb") try: shutil.copyfileobj(file, dest_file) except IOError: raise IOError( 'Unable to store file "%s" because invalid permissions, ' "readonly file system, or filename too long" % pathfilename ) dest_file.close() return newfilename def retrieve(self, name, path=None, nameonly=False): """ If `nameonly==True` return (filename, fullfilename) instead of (filename, stream) """ self_uploadfield = self.uploadfield if self.custom_retrieve: return self.custom_retrieve(name, path) if self.authorize or isinstance(self_uploadfield, str): row = self.db(self == name).select().first() if not row: raise NotFoundException if self.authorize and not self.authorize(row): raise NotAuthorizedException file_properties = self.retrieve_file_properties(name, path) filename = file_properties["filename"] if isinstance(self_uploadfield, str): # ## if file is in DB stream = BytesIO(to_bytes(row[self_uploadfield] or "")) elif isinstance(self_uploadfield, Field): blob_uploadfield_name = self_uploadfield.uploadfield query = self_uploadfield == name data = self_uploadfield.table(query)[blob_uploadfield_name] stream = BytesIO(to_bytes(data)) elif self.uploadfs: # ## if file is on pyfilesystem stream = self.uploadfs.open(text_type(name), "rb") else: # ## if file is on regular filesystem # this is intentionally a string with filename and not a stream # this propagates and allows stream_file_or_304_or_206 to be called fullname = pjoin(file_properties["path"], name) if nameonly: return (filename, fullname) stream = open(fullname, "rb") return (filename, stream) def retrieve_file_properties(self, name, path=None): m = re.match(REGEX_UPLOAD_PATTERN, name) if not m or not self.isattachment: raise TypeError("Can't retrieve %s file properties" % name) self_uploadfield = self.uploadfield if self.custom_retrieve_file_properties: return self.custom_retrieve_file_properties(name, path) try: try: filename = to_unicode( base64.b16decode(m.group("name"), True) ) # Legacy file encoding is base 16 lowercase except (binascii.Error, TypeError): filename = to_unicode( base64.urlsafe_b64decode(m.group("name")) ) # New encoding is base 64 filename = re.sub(REGEX_UPLOAD_CLEANUP, "_", filename) except (TypeError, AttributeError): filename = name # ## if file is in DB if isinstance(self_uploadfield, (str, Field)): return dict(path=None, filename=filename) # ## if file is on filesystem if not path: if self.uploadfolder: path = self.uploadfolder else: path = pjoin(self.db._adapter.folder, "..", "uploads") path = os.path.abspath(path) if self.uploadseparate: t = m.group("table") f = m.group("field") u = m.group("uuidkey") path = pjoin(path, "%s.%s" % (t, f), u[:2]) return dict(path=path, filename=filename) def formatter(self, value): if value is None: return self.map_none requires = self.requires if not requires or requires is DEFAULT: return value if not isinstance(requires, (list, tuple)): requires = [requires] elif isinstance(requires, tuple): requires = list(requires) else: requires = copy.copy(requires) requires.reverse() for item in requires: if hasattr(item, "formatter"): value = item.formatter(value) return value def validate(self, value, record_id=None): requires = self.requires if not requires or requires is DEFAULT: return ((value if value != self.map_none else None), None) if not isinstance(requires, (list, tuple)): requires = [requires] for validator in requires: # notice that some validator may have different behavior # depending on the record id, for example # IS_NOT_IN_DB should exclude the current record_id from check (value, error) = validator(value, record_id) if error: return (value, error) return ((value if value != self.map_none else None), None) def count(self, distinct=None): return Expression(self.db, self._dialect.count, self, distinct, "integer") def as_dict(self, flat=False, sanitize=True): attrs = ( "name", "authorize", "represent", "ondelete", "onupdate", "custom_store", "autodelete", "custom_retrieve", "filter_out", "uploadseparate", "widget", "uploadfs", "update", "custom_delete", "uploadfield", "uploadfolder", "custom_qualifier", "unique", "writable", "compute", "map_none", "default", "type", "required", "readable", "requires", "comment", "label", "length", "notnull", "custom_retrieve_file_properties", "filter_in", ) serializable = (int, long, basestring, float, tuple, bool, type(None)) def flatten(obj): if isinstance(obj, dict): return dict((flatten(k), flatten(v)) for k, v in obj.items()) elif isinstance(obj, (tuple, list, set)): return [flatten(v) for v in obj] elif isinstance(obj, serializable): return obj elif isinstance(obj, (datetime.datetime, datetime.date, datetime.time)): return str(obj) else: return None d = dict() if not (sanitize and not (self.readable or self.writable)): for attr in attrs: if flat: d.update({attr: flatten(getattr(self, attr))}) else: d.update({attr: getattr(self, attr)}) d["fieldname"] = d.pop("name") return d def __bool__(self): return True def __str__(self): if self._table: return "%s.%s" % (self.tablename, self.name) return "<no table>.%s" % self.name def __hash__(self): return id(self) @property def sqlsafe(self): if self._table is None: raise SyntaxError("Field %s is not bound to any table" % self.name) return self._table.sql_shortref + "." + self._rname @property @deprecated("sqlsafe_name", "_rname", "Field") def sqlsafe_name(self): return self._rname @property def longname(self): if self._table is None: raise SyntaxError("Field %s is not bound to any table" % self.name) return self._table._tablename + "." + self.name
(fieldname, type='string', length=None, default=<function <lambda> at 0x7fbac9f848b0>, required=False, requires=<function <lambda> at 0x7fbac9f848b0>, ondelete='CASCADE', onupdate='CASCADE', notnull=False, unique=False, uploadfield=True, widget=None, label=None, comment=None, writable=True, readable=True, searchable=True, listable=True, regex=None, options=None, update=None, authorize=None, autodelete=False, represent=None, uploadfolder=None, uploadseparate=False, uploadfs=None, compute=None, custom_store=None, custom_retrieve=None, custom_retrieve_file_properties=None, custom_delete=None, filter_in=None, filter_out=None, custom_qualifier=None, map_none=None, rname=None, **others)
721,576
pydal.objects
__getitem__
null
def __getitem__(self, i): if isinstance(i, slice): start = i.start or 0 stop = i.stop db = self.db if start < 0: pos0 = "(%s - %d)" % (self.len(), abs(start) - 1) else: pos0 = start + 1 maxint = sys.maxint if PY2 else sys.maxsize if stop is None or stop == maxint: length = self.len() elif stop < 0: length = "(%s - %d - %s)" % (self.len(), abs(stop) - 1, pos0) else: length = "(%s - %s)" % (stop + 1, pos0) return Expression( db, self._dialect.substring, self, (pos0, length), self.type ) else: return self[i : i + 1]
(self, i)
721,579
pydal.objects
__init__
null
def __init__( self, fieldname, type="string", length=None, default=DEFAULT, required=False, requires=DEFAULT, ondelete="CASCADE", onupdate="CASCADE", notnull=False, unique=False, uploadfield=True, widget=None, label=None, comment=None, writable=True, readable=True, searchable=True, listable=True, regex=None, options=None, update=None, authorize=None, autodelete=False, represent=None, uploadfolder=None, uploadseparate=False, uploadfs=None, compute=None, custom_store=None, custom_retrieve=None, custom_retrieve_file_properties=None, custom_delete=None, filter_in=None, filter_out=None, custom_qualifier=None, map_none=None, rname=None, **others, ): self._db = self.db = None # both for backward compatibility self.table = self._table = None self.op = None self.first = None self.second = None if PY2 and isinstance(fieldname, unicode): try: fieldname = str(fieldname) except UnicodeEncodeError: raise SyntaxError("Field: invalid unicode field name") self.name = fieldname = cleanup(fieldname) if ( not isinstance(fieldname, str) or hasattr(Table, fieldname) or not REGEX_VALID_TB_FLD.match(fieldname) or REGEX_PYTHON_KEYWORDS.match(fieldname) ): raise SyntaxError( "Field: invalid field name: %s, " 'use rname for "funny" names' % fieldname ) if not isinstance(type, (Table, Field)): self.type = type else: self.type = "reference %s" % type self.length = ( length if length is not None else DEFAULTLENGTH.get(self.type, 512) ) self.default = default if default is not DEFAULT else (update or None) self.required = required # is this field required self.ondelete = ondelete.upper() # this is for reference fields only self.onupdate = onupdate.upper() # this is for reference fields only self.notnull = notnull self.unique = unique # split to deal with decimal(,) self.regex = regex if not regex and isinstance(self.type, str): self.regex = DEFAULT_REGEX.get(self.type.split("(")[0]) self.options = options self.uploadfield = uploadfield self.uploadfolder = uploadfolder self.uploadseparate = uploadseparate self.uploadfs = uploadfs self.widget = widget self.comment = comment self.writable = writable self.readable = readable self.searchable = searchable self.listable = listable self.update = update self.authorize = authorize self.autodelete = autodelete self.represent = ( list_represent if represent is None and type in ("list:integer", "list:string") else represent ) self.compute = compute self.isattachment = True self.custom_store = custom_store self.custom_retrieve = custom_retrieve self.custom_retrieve_file_properties = custom_retrieve_file_properties self.custom_delete = custom_delete self.filter_in = filter_in self.filter_out = filter_out self.custom_qualifier = custom_qualifier self.label = label if label is not None else fieldname.replace("_", " ").title() self.requires = requires if requires is not None else [] self.map_none = map_none self._rname = self._raw_rname = rname stype = self.type if isinstance(self.type, SQLCustomType): stype = self.type.type self._itype = REGEX_TYPE.match(stype).group(0) if stype else None for key in others: setattr(self, key, others[key])
(self, fieldname, type='string', length=None, default=<function <lambda> at 0x7fbac9f848b0>, required=False, requires=<function <lambda> at 0x7fbac9f848b0>, ondelete='CASCADE', onupdate='CASCADE', notnull=False, unique=False, uploadfield=True, widget=None, label=None, comment=None, writable=True, readable=True, searchable=True, listable=True, regex=None, options=None, update=None, authorize=None, autodelete=False, represent=None, uploadfolder=None, uploadseparate=False, uploadfs=None, compute=None, custom_store=None, custom_retrieve=None, custom_retrieve_file_properties=None, custom_delete=None, filter_in=None, filter_out=None, custom_qualifier=None, map_none=None, rname=None, **others)
721,580
pydal.objects
__invert__
null
def __invert__(self): if hasattr(self, "_op") and self.op == self._dialect.invert: return self.first return Expression(self.db, self._dialect.invert, self, type=self.type)
(self)
721,588
pydal.objects
__radd__
null
def __radd__(self, other): if not hasattr(other, "type"): if isinstance(other, str): other = self._dialect.quote(other) other = Expression(self.db, other, type=self.type) return Expression(self.db, self._dialect.add, other, self, self.type)
(self, other)
721,589
pydal.objects
__str__
null
def __str__(self): if self._table: return "%s.%s" % (self.tablename, self.name) return "<no table>.%s" % self.name
(self)
721,590
pydal.objects
__sub__
null
def __sub__(self, other): if self.type in ("integer", "bigint"): result_type = "integer" elif self.type in ["date", "time", "datetime", "double", "float"]: result_type = "double" elif self.type.startswith("decimal("): result_type = self.type else: raise SyntaxError("subtraction operation not supported for type") return Expression(self.db, self._dialect.sub, self, other, result_type)
(self, other)
721,592
pydal.objects
abs
null
def abs(self): return Expression(self.db, self._dialect.aggregate, self, "ABS", self.type)
(self)
721,593
pydal.objects
as_dict
null
def as_dict(self, flat=False, sanitize=True): attrs = ( "name", "authorize", "represent", "ondelete", "onupdate", "custom_store", "autodelete", "custom_retrieve", "filter_out", "uploadseparate", "widget", "uploadfs", "update", "custom_delete", "uploadfield", "uploadfolder", "custom_qualifier", "unique", "writable", "compute", "map_none", "default", "type", "required", "readable", "requires", "comment", "label", "length", "notnull", "custom_retrieve_file_properties", "filter_in", ) serializable = (int, long, basestring, float, tuple, bool, type(None)) def flatten(obj): if isinstance(obj, dict): return dict((flatten(k), flatten(v)) for k, v in obj.items()) elif isinstance(obj, (tuple, list, set)): return [flatten(v) for v in obj] elif isinstance(obj, serializable): return obj elif isinstance(obj, (datetime.datetime, datetime.date, datetime.time)): return str(obj) else: return None d = dict() if not (sanitize and not (self.readable or self.writable)): for attr in attrs: if flat: d.update({attr: flatten(getattr(self, attr))}) else: d.update({attr: getattr(self, attr)}) d["fieldname"] = d.pop("name") return d
(self, flat=False, sanitize=True)
721,597
pydal.objects
avg
null
def avg(self): return Expression(self.db, self._dialect.aggregate, self, "AVG", self.type)
(self)
721,598
pydal.objects
belongs
Accepts the following inputs:: field.belongs(1, 2) field.belongs((1, 2)) field.belongs(query) Does NOT accept: field.belongs(1) If the set you want back includes `None` values, you can do:: field.belongs((1, None), null=True)
def belongs(self, *value, **kwattr): """ Accepts the following inputs:: field.belongs(1, 2) field.belongs((1, 2)) field.belongs(query) Does NOT accept: field.belongs(1) If the set you want back includes `None` values, you can do:: field.belongs((1, None), null=True) """ db = self.db if len(value) == 1: value = value[0] if isinstance(value, Query): value = db(value)._select(value.first._table._id) elif not isinstance(value, (Select, basestring)): value = set(value) if kwattr.get("null") and None in value: value.remove(None) return (self == None) | Query( self.db, self._dialect.belongs, self, value ) return Query(self.db, self._dialect.belongs, self, value)
(self, *value, **kwattr)
721,599
pydal.objects
bind
null
def bind(self, table): if self._table is not None: raise ValueError("Field %s is already bound to a table" % self.longname) self.db = self._db = table._db self.table = self._table = table self.tablename = self._tablename = table._tablename if self._db and self._rname is None: self._rname = self._db._adapter.sqlsafe_field(self.name) self._raw_rname = self.name
(self, table)
721,600
pydal.objects
cast
null
def cast(self, cast_as, **kwargs): return Expression( self.db, self._dialect.cast, self, self._dialect.types[cast_as] % kwargs, cast_as, )
(self, cast_as, **kwargs)
721,601
pydal.objects
clone
null
def clone(self, point_self_references_to=False, **args): field = copy.copy(self) if point_self_references_to and self.type == "reference %s" % self._tablename: field.type = "reference %s" % point_self_references_to field.__dict__.update(args) field.db = field._db = None field.table = field._table = None field.tablename = field._tablename = None if self._db and self._rname == self._db._adapter.sqlsafe_field(self.name): # Reset the name because it may need to be requoted by bind() field._rname = field._raw_rname = None return field
(self, point_self_references_to=False, **args)
721,602
pydal.objects
coalesce
null
def coalesce(self, *others): return Expression(self.db, self._dialect.coalesce, self, others, self.type)
(self, *others)
721,603
pydal.objects
coalesce_zero
null
def coalesce_zero(self): return Expression(self.db, self._dialect.coalesce_zero, self, None, self.type)
(self)
721,604
pydal.objects
contains
For GAE contains() is always case sensitive
def contains(self, value, all=False, case_sensitive=False): """ For GAE contains() is always case sensitive """ if isinstance(value, (list, tuple)): subqueries = [ self.contains(str(v), case_sensitive=case_sensitive) for v in value if str(v) ] if not subqueries: return self.contains("") else: return reduce(all and AND or OR, subqueries) if self.type not in ( "string", "text", "json", "jsonb", "upload", ) and not self.type.startswith("list:"): raise SyntaxError("contains used with incompatible field type") return Query( self.db, self._dialect.contains, self, value, case_sensitive=case_sensitive )
(self, value, all=False, case_sensitive=False)
721,605
pydal.objects
count
null
def count(self, distinct=None): return Expression(self.db, self._dialect.count, self, distinct, "integer")
(self, distinct=None)
721,606
pydal.objects
day
null
def day(self): return Expression(self.db, self._dialect.extract, self, "day", "integer")
(self)
721,607
pydal.objects
endswith
null
def endswith(self, value): if self.type not in ("string", "text", "json", "jsonb", "upload"): raise SyntaxError("endswith used with incompatible field type") return Query(self.db, self._dialect.endswith, self, value)
(self, value)
721,608
pydal.objects
epoch
null
def epoch(self): return Expression(self.db, self._dialect.epoch, self, None, "integer")
(self)
721,609
pydal.objects
formatter
null
def formatter(self, value): if value is None: return self.map_none requires = self.requires if not requires or requires is DEFAULT: return value if not isinstance(requires, (list, tuple)): requires = [requires] elif isinstance(requires, tuple): requires = list(requires) else: requires = copy.copy(requires) requires.reverse() for item in requires: if hasattr(item, "formatter"): value = item.formatter(value) return value
(self, value)
721,610
pydal.objects
hour
null
def hour(self): return Expression(self.db, self._dialect.extract, self, "hour", "integer")
(self)
721,612
pydal.objects
json_contains
Containment operator, jsonvalue parameter must be a json string e.g. '{"country": "Peru"}' Example: Usage:: >>> tj = db.define_table('tj', Field('testjson', 'json')) >>> tj.insert(testjson={u'a': {u'a1': 2, u'a0': 1}, u'b': 3, u'c': {u'c0': {u'c01': [2, 4]}}}) >>> db(db.tj.testjson.json_contains('{"c": {"c0":{"c01": [2]}}}')).select().first() <Row {'testjson': {u'a': {u'a1': 2, u'a0': 1}, u'c': {u'c0': {u'c01': [2, 4]}}, u'b': 3}, 'id': 1L}>
def json_contains(self, jsonvalue): """ Containment operator, jsonvalue parameter must be a json string e.g. '{"country": "Peru"}' Example: Usage:: >>> tj = db.define_table('tj', Field('testjson', 'json')) >>> tj.insert(testjson={u'a': {u'a1': 2, u'a0': 1}, u'b': 3, u'c': {u'c0': {u'c01': [2, 4]}}}) >>> db(db.tj.testjson.json_contains('{"c": {"c0":{"c01": [2]}}}')).select().first() <Row {'testjson': {u'a': {u'a1': 2, u'a0': 1}, u'c': {u'c0': {u'c01': [2, 4]}}, u'b': 3}, 'id': 1L}> """ return Query(self.db, self._dialect.json_contains, self, jsonvalue)
(self, jsonvalue)
721,613
pydal.objects
json_key
Get the json in key which you can use to build queries or as one of the fields you want to get in a select. Example: Usage:: To use as one of the fields you want to get in a select >>> tj = db.define_table('tj', Field('testjson', 'json')) >>> tj.insert(testjson={u'a': {u'a1': 2, u'a0': 1}, u'b': 3, u'c': {u'c0': {u'c01': [2, 4]}}}) >>> row = db(db.tj).select(db.tj.testjson.json_key('a').with_alias('a')).first() >>> row.a {u'a1': 2, u'a0': 1} Using it as part of building a query >>> row = db(tj.testjson.json_key('a').json_key_value('a0') == 1).select().first() >>> row <Row {'testjson': {u'a': {u'a1': 2, u'a0': 1}, u'c': {u'c0': {u'c01': [2, 4]}}, u'b': 3}, 'id': 1L}>
def json_key(self, key): """ Get the json in key which you can use to build queries or as one of the fields you want to get in a select. Example: Usage:: To use as one of the fields you want to get in a select >>> tj = db.define_table('tj', Field('testjson', 'json')) >>> tj.insert(testjson={u'a': {u'a1': 2, u'a0': 1}, u'b': 3, u'c': {u'c0': {u'c01': [2, 4]}}}) >>> row = db(db.tj).select(db.tj.testjson.json_key('a').with_alias('a')).first() >>> row.a {u'a1': 2, u'a0': 1} Using it as part of building a query >>> row = db(tj.testjson.json_key('a').json_key_value('a0') == 1).select().first() >>> row <Row {'testjson': {u'a': {u'a1': 2, u'a0': 1}, u'c': {u'c0': {u'c01': [2, 4]}}, u'b': 3}, 'id': 1L}> """ return Expression(self.db, self._dialect.json_key, self, key)
(self, key)
721,614
pydal.objects
json_key_value
Get the value int or text in key Example: Usage:: To use as one of the fields you want to get in a select >>> tj = db.define_table('tj', Field('testjson', 'json')) >>> tj.insert(testjson={u'a': {u'a1': 2, u'a0': 1}, u'b': 3, u'c': {u'c0': {u'c01': [2, 4]}}}) >>> row = db(db.tj).select(db.tj.testjson.json_key_value('b').with_alias('b')).first() >>> row.b '3' Using it as part of building a query >>> row = db(db.tj.testjson.json_key('a').json_key_value('a0') == 1).select().first() >>> row <Row {'testjson': {u'a': {u'a1': 2, u'a0': 1}, u'c': {u'c0': {u'c01': [2, 4]}}, u'b': 3}, 'id': 1L}>
def json_key_value(self, key): """ Get the value int or text in key Example: Usage:: To use as one of the fields you want to get in a select >>> tj = db.define_table('tj', Field('testjson', 'json')) >>> tj.insert(testjson={u'a': {u'a1': 2, u'a0': 1}, u'b': 3, u'c': {u'c0': {u'c01': [2, 4]}}}) >>> row = db(db.tj).select(db.tj.testjson.json_key_value('b').with_alias('b')).first() >>> row.b '3' Using it as part of building a query >>> row = db(db.tj.testjson.json_key('a').json_key_value('a0') == 1).select().first() >>> row <Row {'testjson': {u'a': {u'a1': 2, u'a0': 1}, u'c': {u'c0': {u'c01': [2, 4]}}, u'b': 3}, 'id': 1L}> """ return Expression(self.db, self._dialect.json_key_value, self, key)
(self, key)
721,615
pydal.objects
json_path
Get the json in path which you can use for more queries Example: Usage:: >>> tj = db.define_table('tj', Field('testjson', 'json')) >>> tj.insert(testjson={u'a': {u'a1': 2, u'a0': 1}, u'b': 3, u'c': {u'c0': {u'c01': [2, 4]}}}) >>> row = db(db.tj.id > 0).select(db.tj.testjson.json_path('{c, c0, c01, 0}').with_alias('firstc01')).first() >>> row.firstc01 2
def json_path(self, path): """ Get the json in path which you can use for more queries Example: Usage:: >>> tj = db.define_table('tj', Field('testjson', 'json')) >>> tj.insert(testjson={u'a': {u'a1': 2, u'a0': 1}, u'b': 3, u'c': {u'c0': {u'c01': [2, 4]}}}) >>> row = db(db.tj.id > 0).select(db.tj.testjson.json_path('{c, c0, c01, 0}').with_alias('firstc01')).first() >>> row.firstc01 2 """ return Expression(self.db, self._dialect.json_path, self, path)
(self, path)
721,616
pydal.objects
json_path_value
Get the value in path which you can use for more queries Example: Usage:: >>> tj = db.define_table('tj', Field('testjson', 'json')) >>> tj.insert(testjson={u'a': {u'a1': 2, u'a0': 1}, u'b': 3, u'c': {u'c0': {u'c01': [2, 4]}}}) >>> db(db.tj.testjson.json_path_value('{a, a1}') == 2).select().first() <Row {'testjson': {u'a': {u'a1': 2, u'a0': 1}, u'c': {u'c0': {u'c01': [2, 4]}}, u'b': 3}, 'id': 1L}>
def json_path_value(self, path): """ Get the value in path which you can use for more queries Example: Usage:: >>> tj = db.define_table('tj', Field('testjson', 'json')) >>> tj.insert(testjson={u'a': {u'a1': 2, u'a0': 1}, u'b': 3, u'c': {u'c0': {u'c01': [2, 4]}}}) >>> db(db.tj.testjson.json_path_value('{a, a1}') == 2).select().first() <Row {'testjson': {u'a': {u'a1': 2, u'a0': 1}, u'c': {u'c0': {u'c01': [2, 4]}}, u'b': 3}, 'id': 1L}> """ return Expression(self.db, self._dialect.json_path_value, self, path)
(self, path)
721,617
pydal.objects
len
null
def len(self): return Expression(self.db, self._dialect.length, self, None, "integer")
(self)
721,619
pydal.objects
lower
null
def lower(self): return Expression(self.db, self._dialect.lower, self, None, self.type)
(self)
721,620
pydal.objects
max
null
def max(self): return Expression(self.db, self._dialect.aggregate, self, "MAX", self.type)
(self)
721,621
pydal.objects
min
null
def min(self): return Expression(self.db, self._dialect.aggregate, self, "MIN", self.type)
(self)
721,622
pydal.objects
minutes
null
def minutes(self): return Expression(self.db, self._dialect.extract, self, "minute", "integer")
(self)
721,623
pydal.objects
month
null
def month(self): return Expression(self.db, self._dialect.extract, self, "month", "integer")
(self)
721,624
pydal.objects
regexp
null
def regexp(self, value, match_parameter=None): return Query( self.db, self._dialect.regexp, self, value, match_parameter=match_parameter )
(self, value, match_parameter=None)
721,625
pydal.objects
replace
null
def replace(self, a, b): return Expression(self.db, self._dialect.replace, self, (a, b), self.type)
(self, a, b)
721,626
pydal.objects
retrieve
If `nameonly==True` return (filename, fullfilename) instead of (filename, stream)
def retrieve(self, name, path=None, nameonly=False): """ If `nameonly==True` return (filename, fullfilename) instead of (filename, stream) """ self_uploadfield = self.uploadfield if self.custom_retrieve: return self.custom_retrieve(name, path) if self.authorize or isinstance(self_uploadfield, str): row = self.db(self == name).select().first() if not row: raise NotFoundException if self.authorize and not self.authorize(row): raise NotAuthorizedException file_properties = self.retrieve_file_properties(name, path) filename = file_properties["filename"] if isinstance(self_uploadfield, str): # ## if file is in DB stream = BytesIO(to_bytes(row[self_uploadfield] or "")) elif isinstance(self_uploadfield, Field): blob_uploadfield_name = self_uploadfield.uploadfield query = self_uploadfield == name data = self_uploadfield.table(query)[blob_uploadfield_name] stream = BytesIO(to_bytes(data)) elif self.uploadfs: # ## if file is on pyfilesystem stream = self.uploadfs.open(text_type(name), "rb") else: # ## if file is on regular filesystem # this is intentionally a string with filename and not a stream # this propagates and allows stream_file_or_304_or_206 to be called fullname = pjoin(file_properties["path"], name) if nameonly: return (filename, fullname) stream = open(fullname, "rb") return (filename, stream)
(self, name, path=None, nameonly=False)
721,627
pydal.objects
retrieve_file_properties
null
def retrieve_file_properties(self, name, path=None): m = re.match(REGEX_UPLOAD_PATTERN, name) if not m or not self.isattachment: raise TypeError("Can't retrieve %s file properties" % name) self_uploadfield = self.uploadfield if self.custom_retrieve_file_properties: return self.custom_retrieve_file_properties(name, path) try: try: filename = to_unicode( base64.b16decode(m.group("name"), True) ) # Legacy file encoding is base 16 lowercase except (binascii.Error, TypeError): filename = to_unicode( base64.urlsafe_b64decode(m.group("name")) ) # New encoding is base 64 filename = re.sub(REGEX_UPLOAD_CLEANUP, "_", filename) except (TypeError, AttributeError): filename = name # ## if file is in DB if isinstance(self_uploadfield, (str, Field)): return dict(path=None, filename=filename) # ## if file is on filesystem if not path: if self.uploadfolder: path = self.uploadfolder else: path = pjoin(self.db._adapter.folder, "..", "uploads") path = os.path.abspath(path) if self.uploadseparate: t = m.group("table") f = m.group("field") u = m.group("uuidkey") path = pjoin(path, "%s.%s" % (t, f), u[:2]) return dict(path=path, filename=filename)
(self, name, path=None)
721,628
pydal.objects
seconds
null
def seconds(self): return Expression(self.db, self._dialect.extract, self, "second", "integer")
(self)
721,629
pydal.objects
set_attributes
null
def set_attributes(self, *args, **attributes): self.__dict__.update(*args, **attributes) return self
(self, *args, **attributes)
721,630
pydal.objects
st_asgeojson
null
def st_asgeojson(self, precision=15, options=0): return Expression( self.db, self._dialect.st_asgeojson, self, dict(precision=precision, options=options), "string", )
(self, precision=15, options=0)
721,631
pydal.objects
st_astext
null
def st_astext(self): return Expression(self.db, self._dialect.st_astext, self, type="string")
(self)
721,632
pydal.objects
st_aswkb
null
def st_aswkb(self): return Expression(self.db, self._dialect.st_aswkb, self, type="string")
(self)
721,634
pydal.objects
st_distance
null
def st_distance(self, other): return Expression(self.db, self._dialect.st_distance, self, other, "double")
(self, other)
721,635
pydal.objects
st_dwithin
null
def st_dwithin(self, value, distance): return Query(self.db, self._dialect.st_dwithin, self, (value, distance))
(self, value, distance)
721,639
pydal.objects
st_simplify
null
def st_simplify(self, value): return Expression(self.db, self._dialect.st_simplify, self, value, self.type)
(self, value)
721,640
pydal.objects
st_simplifypreservetopology
null
def st_simplifypreservetopology(self, value): return Expression( self.db, self._dialect.st_simplifypreservetopology, self, value, self.type )
(self, value)
721,642
pydal.objects
st_transform
null
def st_transform(self, value): return Expression(self.db, self._dialect.st_transform, self, value, self.type)
(self, value)
721,644
pydal.objects
st_x
null
def st_x(self): return Expression(self.db, self._dialect.st_x, self, type="string")
(self)
721,645
pydal.objects
st_y
null
def st_y(self): return Expression(self.db, self._dialect.st_y, self, type="string")
(self)
721,646
pydal.objects
startswith
null
def startswith(self, value): if self.type not in ("string", "text", "json", "jsonb", "upload"): raise SyntaxError("startswith used with incompatible field type") return Query(self.db, self._dialect.startswith, self, value)
(self, value)
721,647
pydal.objects
store
null
def store(self, file, filename=None, path=None): # make sure filename is a str sequence filename = "{}".format(filename) if self.custom_store: return self.custom_store(file, filename, path) if isinstance(file, cgi.FieldStorage): filename = filename or file.filename file = file.file elif not filename: filename = file.name filename = os.path.basename(filename.replace("/", os.sep).replace("\\", os.sep)) m = re.search(REGEX_UPLOAD_EXTENSION, filename) extension = m and m.group(1) or "txt" uuid_key = self._db.uuid().replace("-", "")[-16:] if self._db else uuidstr() encoded_filename = to_native(base64.urlsafe_b64encode(to_bytes(filename))) newfilename = "%s.%s.%s.%s" % ( getattr(self, "_tablename", "no_table"), self.name, uuid_key, encoded_filename, ) newfilename = ( newfilename[: (self.length - 1 - len(extension))] + "." + extension ) self_uploadfield = self.uploadfield if isinstance(self_uploadfield, Field): blob_uploadfield_name = self_uploadfield.uploadfield keys = { self_uploadfield.name: newfilename, blob_uploadfield_name: file.read(), } self_uploadfield.table.insert(**keys) elif self_uploadfield is True: if self.uploadfs: dest_file = self.uploadfs.open(text_type(newfilename), "wb") else: if path: pass elif self.uploadfolder: path = self.uploadfolder elif self.db._adapter.folder: path = pjoin(self.db._adapter.folder, "..", "uploads") else: raise RuntimeError( "you must specify a Field(..., uploadfolder=...)" ) if self.uploadseparate: if self.uploadfs: raise RuntimeError("not supported") path = pjoin( path, "%s.%s" % (self._tablename, self.name), uuid_key[:2] ) if not exists(path): os.makedirs(path) pathfilename = pjoin(path, newfilename) dest_file = open(pathfilename, "wb") try: shutil.copyfileobj(file, dest_file) except IOError: raise IOError( 'Unable to store file "%s" because invalid permissions, ' "readonly file system, or filename too long" % pathfilename ) dest_file.close() return newfilename
(self, file, filename=None, path=None)
721,648
pydal.objects
sum
null
def sum(self): return Expression(self.db, self._dialect.aggregate, self, "SUM", self.type)
(self)
721,649
pydal.objects
upper
null
def upper(self): return Expression(self.db, self._dialect.upper, self, None, self.type)
(self)
721,650
pydal.objects
validate
null
def validate(self, value, record_id=None): requires = self.requires if not requires or requires is DEFAULT: return ((value if value != self.map_none else None), None) if not isinstance(requires, (list, tuple)): requires = [requires] for validator in requires: # notice that some validator may have different behavior # depending on the record id, for example # IS_NOT_IN_DB should exclude the current record_id from check (value, error) = validator(value, record_id) if error: return (value, error) return ((value if value != self.map_none else None), None)
(self, value, record_id=None)
721,652
pydal.objects
year
null
def year(self): return Expression(self.db, self._dialect.extract, self, "year", "integer")
(self)
721,653
pydal.helpers.classes
SQLCustomType
Allows defining of custom SQL types Args: type: the web2py type (default = 'string') native: the backend type encoder: how to encode the value to store it in the backend decoder: how to decode the value retrieved from the backend validator: what validators to use ( default = None, will use the default validator for type) Example:: Define as: decimal = SQLCustomType( type ='double', native ='integer', encoder =(lambda x: int(float(x) * 100)), decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) ) db.define_table( 'example', Field('value', type=decimal) )
class SQLCustomType(object): """ Allows defining of custom SQL types Args: type: the web2py type (default = 'string') native: the backend type encoder: how to encode the value to store it in the backend decoder: how to decode the value retrieved from the backend validator: what validators to use ( default = None, will use the default validator for type) Example:: Define as: decimal = SQLCustomType( type ='double', native ='integer', encoder =(lambda x: int(float(x) * 100)), decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) ) db.define_table( 'example', Field('value', type=decimal) ) """ def __init__( self, type="string", native=None, encoder=None, decoder=None, validator=None, _class=None, widget=None, represent=None, ): self.type = type self.native = native self.encoder = encoder or (lambda x: x) self.decoder = decoder or (lambda x: x) self.validator = validator self._class = _class or type self.widget = widget self.represent = represent def startswith(self, text=None): try: return self.type.startswith(self, text) except TypeError: return False def endswith(self, text=None): try: return self.type.endswith(self, text) except TypeError: return False def __getslice__(self, a=0, b=100): return None def __getitem__(self, i): return None def __str__(self): return self._class
(type='string', native=None, encoder=None, decoder=None, validator=None, _class=None, widget=None, represent=None)
721,654
pydal.helpers.classes
__getitem__
null
def __getitem__(self, i): return None
(self, i)
721,655
pydal.helpers.classes
__getslice__
null
def __getslice__(self, a=0, b=100): return None
(self, a=0, b=100)
721,656
pydal.helpers.classes
__init__
null
def __init__( self, type="string", native=None, encoder=None, decoder=None, validator=None, _class=None, widget=None, represent=None, ): self.type = type self.native = native self.encoder = encoder or (lambda x: x) self.decoder = decoder or (lambda x: x) self.validator = validator self._class = _class or type self.widget = widget self.represent = represent
(self, type='string', native=None, encoder=None, decoder=None, validator=None, _class=None, widget=None, represent=None)
721,657
pydal.helpers.classes
__str__
null
def __str__(self): return self._class
(self)
721,658
pydal.helpers.classes
endswith
null
def endswith(self, text=None): try: return self.type.endswith(self, text) except TypeError: return False
(self, text=None)
721,659
pydal.helpers.classes
startswith
null
def startswith(self, text=None): try: return self.type.startswith(self, text) except TypeError: return False
(self, text=None)
721,671
pydal.helpers.methods
geoLine
null
def geoLine(*line): return "LINESTRING (%s)" % ",".join("%f %f" % item for item in line)
(*line)
721,672
pydal.helpers.methods
geoPoint
null
def geoPoint(x, y): return "POINT (%f %f)" % (x, y)
(x, y)
721,673
pydal.helpers.methods
geoPolygon
null
def geoPolygon(*line): return "POLYGON ((%s))" % ",".join("%f %f" % item for item in line)
(*line)
721,679
json_log_formatter
JSONFormatter
JSON log formatter. Usage example:: import logging import json_log_formatter json_handler = logging.FileHandler(filename='/var/log/my-log.json') json_handler.setFormatter(json_log_formatter.JSONFormatter()) logger = logging.getLogger('my_json') logger.addHandler(json_handler) logger.info('Sign up', extra={'referral_code': '52d6ce'}) The log file will contain the following log record (inline):: { "message": "Sign up", "time": "2015-09-01T06:06:26.524448", "referral_code": "52d6ce" }
class JSONFormatter(logging.Formatter): """JSON log formatter. Usage example:: import logging import json_log_formatter json_handler = logging.FileHandler(filename='/var/log/my-log.json') json_handler.setFormatter(json_log_formatter.JSONFormatter()) logger = logging.getLogger('my_json') logger.addHandler(json_handler) logger.info('Sign up', extra={'referral_code': '52d6ce'}) The log file will contain the following log record (inline):: { "message": "Sign up", "time": "2015-09-01T06:06:26.524448", "referral_code": "52d6ce" } """ json_lib = json def format(self, record): message = record.getMessage() extra = self.extra_from_record(record) json_record = self.json_record(message, extra, record) mutated_record = self.mutate_json_record(json_record) # Backwards compatibility: Functions that overwrite this but don't # return a new value will return None because they modified the # argument passed in. if mutated_record is None: mutated_record = json_record return self.to_json(mutated_record) def to_json(self, record): """Converts record dict to a JSON string. It makes best effort to serialize a record (represents an object as a string) instead of raising TypeError if json library supports default argument. Note, ujson doesn't support it. ValueError and OverflowError are also caught to avoid crashing an app, e.g., due to circular reference. Override this method to change the way dict is converted to JSON. """ try: return self.json_lib.dumps(record, default=_json_serializable) # ujson doesn't support default argument and raises TypeError. # "ValueError: Circular reference detected" is raised # when there is a reference to object inside the object itself. except (TypeError, ValueError, OverflowError): try: return self.json_lib.dumps(record) except (TypeError, ValueError, OverflowError): return '{}' def extra_from_record(self, record): """Returns `extra` dict you passed to logger. The `extra` keyword argument is used to populate the `__dict__` of the `LogRecord`. """ return { attr_name: record.__dict__[attr_name] for attr_name in record.__dict__ if attr_name not in BUILTIN_ATTRS } def json_record(self, message, extra, record): """Prepares a JSON payload which will be logged. Override this method to change JSON log format. :param message: Log message, e.g., `logger.info(msg='Sign up')`. :param extra: Dictionary that was passed as `extra` param `logger.info('Sign up', extra={'referral_code': '52d6ce'})`. :param record: `LogRecord` we got from `JSONFormatter.format()`. :return: Dictionary which will be passed to JSON lib. """ extra['message'] = message if 'time' not in extra: extra['time'] = datetime.now(timezone.utc) if record.exc_info: extra['exc_info'] = self.formatException(record.exc_info) return extra def mutate_json_record(self, json_record): """Override it to convert fields of `json_record` to needed types. Default implementation converts `datetime` to string in ISO8601 format. """ for attr_name in json_record: attr = json_record[attr_name] if isinstance(attr, datetime): json_record[attr_name] = attr.isoformat() return json_record
(fmt=None, datefmt=None, style='%', validate=True, *, defaults=None)
721,681
json_log_formatter
extra_from_record
Returns `extra` dict you passed to logger. The `extra` keyword argument is used to populate the `__dict__` of the `LogRecord`.
def extra_from_record(self, record): """Returns `extra` dict you passed to logger. The `extra` keyword argument is used to populate the `__dict__` of the `LogRecord`. """ return { attr_name: record.__dict__[attr_name] for attr_name in record.__dict__ if attr_name not in BUILTIN_ATTRS }
(self, record)
721,682
json_log_formatter
format
null
def format(self, record): message = record.getMessage() extra = self.extra_from_record(record) json_record = self.json_record(message, extra, record) mutated_record = self.mutate_json_record(json_record) # Backwards compatibility: Functions that overwrite this but don't # return a new value will return None because they modified the # argument passed in. if mutated_record is None: mutated_record = json_record return self.to_json(mutated_record)
(self, record)
721,687
json_log_formatter
json_record
Prepares a JSON payload which will be logged. Override this method to change JSON log format. :param message: Log message, e.g., `logger.info(msg='Sign up')`. :param extra: Dictionary that was passed as `extra` param `logger.info('Sign up', extra={'referral_code': '52d6ce'})`. :param record: `LogRecord` we got from `JSONFormatter.format()`. :return: Dictionary which will be passed to JSON lib.
def json_record(self, message, extra, record): """Prepares a JSON payload which will be logged. Override this method to change JSON log format. :param message: Log message, e.g., `logger.info(msg='Sign up')`. :param extra: Dictionary that was passed as `extra` param `logger.info('Sign up', extra={'referral_code': '52d6ce'})`. :param record: `LogRecord` we got from `JSONFormatter.format()`. :return: Dictionary which will be passed to JSON lib. """ extra['message'] = message if 'time' not in extra: extra['time'] = datetime.now(timezone.utc) if record.exc_info: extra['exc_info'] = self.formatException(record.exc_info) return extra
(self, message, extra, record)
721,688
json_log_formatter
mutate_json_record
Override it to convert fields of `json_record` to needed types. Default implementation converts `datetime` to string in ISO8601 format.
def mutate_json_record(self, json_record): """Override it to convert fields of `json_record` to needed types. Default implementation converts `datetime` to string in ISO8601 format. """ for attr_name in json_record: attr = json_record[attr_name] if isinstance(attr, datetime): json_record[attr_name] = attr.isoformat() return json_record
(self, json_record)
721,689
json_log_formatter
to_json
Converts record dict to a JSON string. It makes best effort to serialize a record (represents an object as a string) instead of raising TypeError if json library supports default argument. Note, ujson doesn't support it. ValueError and OverflowError are also caught to avoid crashing an app, e.g., due to circular reference. Override this method to change the way dict is converted to JSON.
def to_json(self, record): """Converts record dict to a JSON string. It makes best effort to serialize a record (represents an object as a string) instead of raising TypeError if json library supports default argument. Note, ujson doesn't support it. ValueError and OverflowError are also caught to avoid crashing an app, e.g., due to circular reference. Override this method to change the way dict is converted to JSON. """ try: return self.json_lib.dumps(record, default=_json_serializable) # ujson doesn't support default argument and raises TypeError. # "ValueError: Circular reference detected" is raised # when there is a reference to object inside the object itself. except (TypeError, ValueError, OverflowError): try: return self.json_lib.dumps(record) except (TypeError, ValueError, OverflowError): return '{}'
(self, record)
721,691
json_log_formatter
VerboseJSONFormatter
JSON log formatter with built-in log record attributes such as log level. Usage example:: import logging import json_log_formatter json_handler = logging.FileHandler(filename='/var/log/my-log.json') json_handler.setFormatter(json_log_formatter.VerboseJSONFormatter()) logger = logging.getLogger('my_verbose_json') logger.addHandler(json_handler) logger.error('An error has occured') The log file will contain the following log record (inline):: { "filename": "tests.py", "funcName": "test_file_name_is_testspy", "levelname": "ERROR", "lineno": 276, "module": "tests", "name": "my_verbose_json", "pathname": "/Users/bob/json-log-formatter/tests.py", "process": 3081, "processName": "MainProcess", "stack_info": null, "thread": 4664270272, "threadName": "MainThread", "message": "An error has occured", "time": "2021-07-04T21:05:42.767726" } Read more about the built-in log record attributes https://docs.python.org/3/library/logging.html#logrecord-attributes.
class VerboseJSONFormatter(JSONFormatter): """JSON log formatter with built-in log record attributes such as log level. Usage example:: import logging import json_log_formatter json_handler = logging.FileHandler(filename='/var/log/my-log.json') json_handler.setFormatter(json_log_formatter.VerboseJSONFormatter()) logger = logging.getLogger('my_verbose_json') logger.addHandler(json_handler) logger.error('An error has occured') The log file will contain the following log record (inline):: { "filename": "tests.py", "funcName": "test_file_name_is_testspy", "levelname": "ERROR", "lineno": 276, "module": "tests", "name": "my_verbose_json", "pathname": "/Users/bob/json-log-formatter/tests.py", "process": 3081, "processName": "MainProcess", "stack_info": null, "thread": 4664270272, "threadName": "MainThread", "message": "An error has occured", "time": "2021-07-04T21:05:42.767726" } Read more about the built-in log record attributes https://docs.python.org/3/library/logging.html#logrecord-attributes. """ def json_record(self, message, extra, record): extra['filename'] = record.filename extra['funcName'] = record.funcName extra['levelname'] = record.levelname extra['lineno'] = record.lineno extra['module'] = record.module extra['name'] = record.name extra['pathname'] = record.pathname extra['process'] = record.process extra['processName'] = record.processName if hasattr(record, 'stack_info'): extra['stack_info'] = record.stack_info else: extra['stack_info'] = None extra['thread'] = record.thread extra['threadName'] = record.threadName return super(VerboseJSONFormatter, self).json_record(message, extra, record)
(fmt=None, datefmt=None, style='%', validate=True, *, defaults=None)
721,699
json_log_formatter
json_record
null
def json_record(self, message, extra, record): extra['filename'] = record.filename extra['funcName'] = record.funcName extra['levelname'] = record.levelname extra['lineno'] = record.lineno extra['module'] = record.module extra['name'] = record.name extra['pathname'] = record.pathname extra['process'] = record.process extra['processName'] = record.processName if hasattr(record, 'stack_info'): extra['stack_info'] = record.stack_info else: extra['stack_info'] = None extra['thread'] = record.thread extra['threadName'] = record.threadName return super(VerboseJSONFormatter, self).json_record(message, extra, record)
(self, message, extra, record)
721,703
json_log_formatter
_json_serializable
null
def _json_serializable(obj): try: return obj.__dict__ except AttributeError: return str(obj)
(obj)
721,708
pytest_alembic.config
Config
Pytest-alembic configuration options. - `config_options`: Meant to simplify the creation of ``alembic.config.Config`` objects. Supply keys common to customization in alembic configuration. For example: - file/config_file_name (commonly alembic.ini) - script_location - sqlalchemy.url - target_metadata - process_revision_directives - include_schemas - Both `before_revision_data` and `at_revision_data` are described in detail in :ref:`Custom data`. - :code:`minimum_downgrade_revision` can be used to set a lower bound on the **downgrade** migrations which are run built-in tests like ``test_up_down_consistency`` and ``test_downgrade_leaves_no_trace``. - :code:`skip_revisions` can be used to avoid executing specific revisions, particularly if they are slow and you can guarantee to yourself that the difference in the resulting migrations history wont have a meaningful effect. Note that skipping migrations can be "dangerous", because either DDL or data differences could lead to migrations which pass in tests, but fail in practice. For example: >>> import pytest >>> @pytest.fixture ... def alembic_config(): ... return Config(minimum_downgrade_revision='abcde12345') This would essentially short-circuit and avoid running the downgrade migrations **including and below** this migration. .. note:: If a downgrade raises a ``NotImplementedError``, it will have the same effect as a ``minimum_downgrade_revision``, but will emit a warning suggesting the use of this feature instead.
class Config: """Pytest-alembic configuration options. - `config_options`: Meant to simplify the creation of ``alembic.config.Config`` objects. Supply keys common to customization in alembic configuration. For example: - file/config_file_name (commonly alembic.ini) - script_location - sqlalchemy.url - target_metadata - process_revision_directives - include_schemas - Both `before_revision_data` and `at_revision_data` are described in detail in :ref:`Custom data`. - :code:`minimum_downgrade_revision` can be used to set a lower bound on the **downgrade** migrations which are run built-in tests like ``test_up_down_consistency`` and ``test_downgrade_leaves_no_trace``. - :code:`skip_revisions` can be used to avoid executing specific revisions, particularly if they are slow and you can guarantee to yourself that the difference in the resulting migrations history wont have a meaningful effect. Note that skipping migrations can be "dangerous", because either DDL or data differences could lead to migrations which pass in tests, but fail in practice. For example: >>> import pytest >>> @pytest.fixture ... def alembic_config(): ... return Config(minimum_downgrade_revision='abcde12345') This would essentially short-circuit and avoid running the downgrade migrations **including and below** this migration. .. note:: If a downgrade raises a ``NotImplementedError``, it will have the same effect as a ``minimum_downgrade_revision``, but will emit a warning suggesting the use of this feature instead. """ config_options: Dict[str, Any] = field(default_factory=dict) alembic_config: Optional[alembic.config.Config] = None before_revision_data: Optional[Union[Dict, "RevisionSpec"]] = None at_revision_data: Optional[Union[Dict, "RevisionSpec"]] = None minimum_downgrade_revision: Optional[str] = None skip_revisions: Optional[List[str]] = None @classmethod def from_raw_config( cls, raw_config: Union[Dict[str, Any], alembic.config.Config, "Config", None] = None ): """Adapt between pre-produced alembic config and raw config options. Allows one to specify raw pytest-alembic config options through raw dictionary, as well as being flexible enough to allow a literal alembic Config object. Examples: >>> Config.from_raw_config() Config(config_options={}, alembic_config=None, before_revision_data=None, at_revision_data=None, minimum_downgrade_revision=None, skip_revisions=None) >>> Config.from_raw_config({'minimum_downgrade_revision': 'abc123'}) Config(config_options={}, alembic_config=None, before_revision_data=None, at_revision_data=None, minimum_downgrade_revision='abc123', skip_revisions=None) >>> Config.from_raw_config(Config(minimum_downgrade_revision='abc123')) Config(config_options={}, alembic_config=None, before_revision_data=None, at_revision_data=None, minimum_downgrade_revision='abc123', skip_revisions=None) """ if raw_config is None: return cls() if isinstance(raw_config, alembic.config.Config): return cls(alembic_config=raw_config) if isinstance(raw_config, Config): return raw_config before_data = raw_config.pop("before_revision_data", None) at_data = raw_config.pop("at_revision_data", None) minimum_downgrade_revision = raw_config.pop("minimum_downgrade_revision", None) skip_revisions = raw_config.pop("skip_revisions", None) return cls( config_options=raw_config, alembic_config=None, before_revision_data=before_data, at_revision_data=at_data, minimum_downgrade_revision=minimum_downgrade_revision, skip_revisions=skip_revisions, ) def make_alembic_config(self, stdout): file = ( self.config_options.get("file") or self.config_options.get("config_file_name") or "alembic.ini" ) alembic_config = self.config_options.get("alembic_config") if not alembic_config and self.alembic_config: alembic_config = self.alembic_config alembic_config.stdout = stdout else: alembic_config = alembic.config.Config(file, stdout=stdout) sqlalchemy_url = self.config_options.get("sqlalchemy.url") if sqlalchemy_url: alembic_config.set_main_option("sqlalchemy.url", sqlalchemy_url) # Only set script_location if set. script_location = self.config_options.get("script_location") if script_location: alembic_config.set_section_option("alembic", "script_location", script_location) elif not alembic_config.get_section_option("alembic", "script_location"): # Or in the event that it's not set after already having loaded the config. alembic_config.set_main_option("script_location", "migrations") target_metadata = self.config_options.get("target_metadata") alembic_config.attributes["target_metadata"] = target_metadata process_revision_directives = self.config_options.get("process_revision_directives") alembic_config.attributes["process_revision_directives"] = process_revision_directives include_schemas = self.config_options.get("include_schemas", True) alembic_config.attributes["include_schemas"] = include_schemas return alembic_config
(config_options: Dict[str, Any] = <factory>, alembic_config: Optional[alembic.config.Config] = None, before_revision_data: Union[Dict, ForwardRef('RevisionSpec'), NoneType] = None, at_revision_data: Union[Dict, ForwardRef('RevisionSpec'), NoneType] = None, minimum_downgrade_revision: Optional[str] = None, skip_revisions: Optional[List[str]] = None) -> None
721,709
pytest_alembic.config
__eq__
null
from dataclasses import dataclass, field from typing import Any, cast, Dict, List, Optional, TYPE_CHECKING, Union import alembic.config from alembic.util import immutabledict if TYPE_CHECKING: from pytest_alembic.revision_data import RevisionSpec @dataclass class Config: """Pytest-alembic configuration options. - `config_options`: Meant to simplify the creation of ``alembic.config.Config`` objects. Supply keys common to customization in alembic configuration. For example: - file/config_file_name (commonly alembic.ini) - script_location - sqlalchemy.url - target_metadata - process_revision_directives - include_schemas - Both `before_revision_data` and `at_revision_data` are described in detail in :ref:`Custom data`. - :code:`minimum_downgrade_revision` can be used to set a lower bound on the **downgrade** migrations which are run built-in tests like ``test_up_down_consistency`` and ``test_downgrade_leaves_no_trace``. - :code:`skip_revisions` can be used to avoid executing specific revisions, particularly if they are slow and you can guarantee to yourself that the difference in the resulting migrations history wont have a meaningful effect. Note that skipping migrations can be "dangerous", because either DDL or data differences could lead to migrations which pass in tests, but fail in practice. For example: >>> import pytest >>> @pytest.fixture ... def alembic_config(): ... return Config(minimum_downgrade_revision='abcde12345') This would essentially short-circuit and avoid running the downgrade migrations **including and below** this migration. .. note:: If a downgrade raises a ``NotImplementedError``, it will have the same effect as a ``minimum_downgrade_revision``, but will emit a warning suggesting the use of this feature instead. """ config_options: Dict[str, Any] = field(default_factory=dict) alembic_config: Optional[alembic.config.Config] = None before_revision_data: Optional[Union[Dict, "RevisionSpec"]] = None at_revision_data: Optional[Union[Dict, "RevisionSpec"]] = None minimum_downgrade_revision: Optional[str] = None skip_revisions: Optional[List[str]] = None @classmethod def from_raw_config( cls, raw_config: Union[Dict[str, Any], alembic.config.Config, "Config", None] = None ): """Adapt between pre-produced alembic config and raw config options. Allows one to specify raw pytest-alembic config options through raw dictionary, as well as being flexible enough to allow a literal alembic Config object. Examples: >>> Config.from_raw_config() Config(config_options={}, alembic_config=None, before_revision_data=None, at_revision_data=None, minimum_downgrade_revision=None, skip_revisions=None) >>> Config.from_raw_config({'minimum_downgrade_revision': 'abc123'}) Config(config_options={}, alembic_config=None, before_revision_data=None, at_revision_data=None, minimum_downgrade_revision='abc123', skip_revisions=None) >>> Config.from_raw_config(Config(minimum_downgrade_revision='abc123')) Config(config_options={}, alembic_config=None, before_revision_data=None, at_revision_data=None, minimum_downgrade_revision='abc123', skip_revisions=None) """ if raw_config is None: return cls() if isinstance(raw_config, alembic.config.Config): return cls(alembic_config=raw_config) if isinstance(raw_config, Config): return raw_config before_data = raw_config.pop("before_revision_data", None) at_data = raw_config.pop("at_revision_data", None) minimum_downgrade_revision = raw_config.pop("minimum_downgrade_revision", None) skip_revisions = raw_config.pop("skip_revisions", None) return cls( config_options=raw_config, alembic_config=None, before_revision_data=before_data, at_revision_data=at_data, minimum_downgrade_revision=minimum_downgrade_revision, skip_revisions=skip_revisions, ) def make_alembic_config(self, stdout): file = ( self.config_options.get("file") or self.config_options.get("config_file_name") or "alembic.ini" ) alembic_config = self.config_options.get("alembic_config") if not alembic_config and self.alembic_config: alembic_config = self.alembic_config alembic_config.stdout = stdout else: alembic_config = alembic.config.Config(file, stdout=stdout) sqlalchemy_url = self.config_options.get("sqlalchemy.url") if sqlalchemy_url: alembic_config.set_main_option("sqlalchemy.url", sqlalchemy_url) # Only set script_location if set. script_location = self.config_options.get("script_location") if script_location: alembic_config.set_section_option("alembic", "script_location", script_location) elif not alembic_config.get_section_option("alembic", "script_location"): # Or in the event that it's not set after already having loaded the config. alembic_config.set_main_option("script_location", "migrations") target_metadata = self.config_options.get("target_metadata") alembic_config.attributes["target_metadata"] = target_metadata process_revision_directives = self.config_options.get("process_revision_directives") alembic_config.attributes["process_revision_directives"] = process_revision_directives include_schemas = self.config_options.get("include_schemas", True) alembic_config.attributes["include_schemas"] = include_schemas return alembic_config
(self, other)
721,712
pytest_alembic.config
make_alembic_config
null
def make_alembic_config(self, stdout): file = ( self.config_options.get("file") or self.config_options.get("config_file_name") or "alembic.ini" ) alembic_config = self.config_options.get("alembic_config") if not alembic_config and self.alembic_config: alembic_config = self.alembic_config alembic_config.stdout = stdout else: alembic_config = alembic.config.Config(file, stdout=stdout) sqlalchemy_url = self.config_options.get("sqlalchemy.url") if sqlalchemy_url: alembic_config.set_main_option("sqlalchemy.url", sqlalchemy_url) # Only set script_location if set. script_location = self.config_options.get("script_location") if script_location: alembic_config.set_section_option("alembic", "script_location", script_location) elif not alembic_config.get_section_option("alembic", "script_location"): # Or in the event that it's not set after already having loaded the config. alembic_config.set_main_option("script_location", "migrations") target_metadata = self.config_options.get("target_metadata") alembic_config.attributes["target_metadata"] = target_metadata process_revision_directives = self.config_options.get("process_revision_directives") alembic_config.attributes["process_revision_directives"] = process_revision_directives include_schemas = self.config_options.get("include_schemas", True) alembic_config.attributes["include_schemas"] = include_schemas return alembic_config
(self, stdout)
721,713
pytest_alembic.runner
MigrationContext
Within a given environment/execution context, executes alembic commands.
class MigrationContext: """Within a given environment/execution context, executes alembic commands.""" command_executor: CommandExecutor revision_data: RevisionData connection_executor: ConnectionExecutor history: AlembicHistory config: Config @classmethod def from_config( cls, config: Config, command_executor: CommandExecutor, connection_executor: ConnectionExecutor, ): history = AlembicHistory.parse(command_executor.script.revision_map) return cls( command_executor=command_executor, revision_data=RevisionData.from_config(config), connection_executor=connection_executor, history=history, config=config, ) @property def heads(self) -> List[str]: """Get the list of revision heads. Result is cached for the lifetime of the `MigrationContext`. """ return self.command_executor.heads() @property def current(self) -> str: """Get the list of revision heads.""" current = "base" def get_current(rev, _): nonlocal current if rev: current = rev[0] return [] self.command_executor.execute_fn(get_current) if current: return current return "base" def refresh_history(self) -> AlembicHistory: """Refresh the context's version of the alembic history. Note this is not done automatically to avoid the expensive reevaluation step which can make long histories take seconds longer to evaluate for each test. """ script = self.command_executor.script script.revision_map = RevisionMap(script._load_revisions) # noqa: SLF001 self.history = AlembicHistory.parse(self.command_executor.script.revision_map) return self.history def generate_revision( self, process_revision_directives=None, *, prevent_file_generation=True, autogenerate=False, **kwargs ): """Generate a test revision. If `prevent_file_generation` is `True`, the final act of this process raises a `RevisionSuccess`, which is used as a sentinal to indicate the revision was generated successfully, while not actually finishing the generation of the revision file on disk. """ alembic_config = self.command_executor.alembic_config config_directive = alembic_config.attributes["process_revision_directives"] directive = _sequence_directives(config_directive, process_revision_directives) if prevent_file_generation: directive = RevisionSuccess.process_revision_directives(directive) try: result = self.command_executor.run_command( "revision", process_revision_directives=directive, autogenerate=autogenerate, **kwargs, ) # The history will only have changed if we didn't aritifically prevent it from failing. if not prevent_file_generation: self.refresh_history() except RevisionSuccess: pass else: return result def raw_command(self, *args, **kwargs): """Execute a raw alembic command.""" return self.command_executor.run_command(*args, **kwargs) def managed_upgrade(self, dest_revision, *, current=None, return_current=True): """Perform an upgrade one migration at a time, inserting static data at the given points.""" if current is None: current = self.current for current_revision, next_revision in self.history.revision_window(current, dest_revision): before_upgrade_data = self.revision_data.get_before(next_revision) self.insert_into(data=before_upgrade_data, revision=current_revision, table=None) if next_revision in (self.config.skip_revisions or {}): self.set_revision(next_revision) else: self.command_executor.upgrade(next_revision) at_upgrade_data = self.revision_data.get_at(next_revision) self.insert_into(data=at_upgrade_data, revision=next_revision, table=None) if return_current: current = self.current return current return None def managed_downgrade(self, dest_revision, *, current=None, return_current=True): """Perform an downgrade, one migration at a time.""" if current is None: current = self.current for next_revision, current_revision in reversed( self.history.revision_window(dest_revision, current) ): if current_revision in (self.config.skip_revisions or {}): self.set_revision(next_revision) else: try: self.command_executor.downgrade(next_revision) except alembic.util.CommandError as e: if "not a valid downgrade target" in str(e): pass else: raise if return_current: current = self.current return current return None def migrate_up_before(self, revision): """Migrate up to, but not including the given `revision`.""" preceeding_revision = self.history.previous_revision(revision) return self.managed_upgrade(preceeding_revision) def migrate_up_to(self, revision, *, return_current: bool = True): """Migrate up to, and including the given `revision`.""" return self.managed_upgrade(revision, return_current=return_current) def migrate_up_one(self): """Migrate up by exactly one revision.""" current = self.current next_revision = self.history.next_revision(current) new_revision = self.managed_upgrade(next_revision, current=current) if current == new_revision: return None return new_revision def migrate_down_before(self, revision): """Migrate down to, but not including the given `revision`.""" next_revision = self.history.next_revision(revision) return self.migrate_down_to(next_revision) def migrate_down_to(self, revision, *, return_current: bool = True): """Migrate down to, and including the given `revision`.""" self.history.validate_revision(revision) self.managed_downgrade(revision, return_current=return_current) return revision def migrate_down_one(self): """Migrate down by exactly one revision.""" current = self.current previous_revision = self.history.previous_revision(current) self.managed_downgrade(previous_revision, current=current) return previous_revision def roundtrip_next_revision(self): """Upgrade, downgrade then upgrade. This is meant to ensure that the given revision is idempotent. """ next_revision = self.migrate_up_one() if next_revision: self.migrate_down_one() return self.migrate_up_one() return None def insert_into(self, table: Optional[str], data: Union[Dict, List] = None, revision=None): """Insert data into a given table. Args: table: The name of the table to insert data into data: The data to insert. This is eventually passed through to SQLAlchemy's Table class `values` method, and so should accept either a list of `dict`s representing a list of rows, or a `dict` representing one row. revision: The revision of MetaData to use as the table definition for the insert. """ if data is None: return if revision is None: revision = self.current self.connection_executor.table_insert( revision=revision, tablename=table, data=data, ) def table_at_revision(self, name, *, revision=None, schema=None): """Return a reference to a `sqlalchemy.Table` at the given revision. Args: name: The name of the table to produce a `sqlalchemy.Table` for. revision: The revision of the table to return. schema: The schema of the table. """ revision = revision or self.current return self.connection_executor.table(revision=revision, name=name, schema=schema) def set_revision(self, revision: str): self.command_executor.stamp(revision)
(command_executor: pytest_alembic.executor.CommandExecutor, revision_data: pytest_alembic.revision_data.RevisionData, connection_executor: pytest_alembic.executor.ConnectionExecutor, history: pytest_alembic.history.AlembicHistory, config: pytest_alembic.config.Config) -> None
721,714
pytest_alembic.runner
__eq__
null
import contextlib import functools from dataclasses import dataclass from typing import Dict, List, Optional, Union import alembic.command import alembic.migration import alembic.util from alembic.script.revision import RevisionMap from pytest_alembic.config import Config from pytest_alembic.executor import CommandExecutor, ConnectionExecutor from pytest_alembic.history import AlembicHistory from pytest_alembic.revision_data import RevisionData @contextlib.contextmanager def runner(config: Config, engine=None): """Manage the alembic execution context, in a given context. Yields: `MigrationContext` to the caller. """ command_executor = CommandExecutor.from_config(config) migration_context = MigrationContext.from_config( config, command_executor, ConnectionExecutor(engine), ) command_executor.configure(connection=engine) yield migration_context
(self, other)
721,716
pytest_alembic.runner
__repr__
null
def roundtrip_next_revision(self): """Upgrade, downgrade then upgrade. This is meant to ensure that the given revision is idempotent. """ next_revision = self.migrate_up_one() if next_revision: self.migrate_down_one() return self.migrate_up_one() return None
(self)
721,717
pytest_alembic.runner
generate_revision
Generate a test revision. If `prevent_file_generation` is `True`, the final act of this process raises a `RevisionSuccess`, which is used as a sentinal to indicate the revision was generated successfully, while not actually finishing the generation of the revision file on disk.
def generate_revision( self, process_revision_directives=None, *, prevent_file_generation=True, autogenerate=False, **kwargs ): """Generate a test revision. If `prevent_file_generation` is `True`, the final act of this process raises a `RevisionSuccess`, which is used as a sentinal to indicate the revision was generated successfully, while not actually finishing the generation of the revision file on disk. """ alembic_config = self.command_executor.alembic_config config_directive = alembic_config.attributes["process_revision_directives"] directive = _sequence_directives(config_directive, process_revision_directives) if prevent_file_generation: directive = RevisionSuccess.process_revision_directives(directive) try: result = self.command_executor.run_command( "revision", process_revision_directives=directive, autogenerate=autogenerate, **kwargs, ) # The history will only have changed if we didn't aritifically prevent it from failing. if not prevent_file_generation: self.refresh_history() except RevisionSuccess: pass else: return result
(self, process_revision_directives=None, *, prevent_file_generation=True, autogenerate=False, **kwargs)
721,718
pytest_alembic.runner
insert_into
Insert data into a given table. Args: table: The name of the table to insert data into data: The data to insert. This is eventually passed through to SQLAlchemy's Table class `values` method, and so should accept either a list of `dict`s representing a list of rows, or a `dict` representing one row. revision: The revision of MetaData to use as the table definition for the insert.
def insert_into(self, table: Optional[str], data: Union[Dict, List] = None, revision=None): """Insert data into a given table. Args: table: The name of the table to insert data into data: The data to insert. This is eventually passed through to SQLAlchemy's Table class `values` method, and so should accept either a list of `dict`s representing a list of rows, or a `dict` representing one row. revision: The revision of MetaData to use as the table definition for the insert. """ if data is None: return if revision is None: revision = self.current self.connection_executor.table_insert( revision=revision, tablename=table, data=data, )
(self, table: Optional[str], data: Union[Dict, List, NoneType] = None, revision=None)
721,719
pytest_alembic.runner
managed_downgrade
Perform an downgrade, one migration at a time.
def managed_downgrade(self, dest_revision, *, current=None, return_current=True): """Perform an downgrade, one migration at a time.""" if current is None: current = self.current for next_revision, current_revision in reversed( self.history.revision_window(dest_revision, current) ): if current_revision in (self.config.skip_revisions or {}): self.set_revision(next_revision) else: try: self.command_executor.downgrade(next_revision) except alembic.util.CommandError as e: if "not a valid downgrade target" in str(e): pass else: raise if return_current: current = self.current return current return None
(self, dest_revision, *, current=None, return_current=True)
721,720
pytest_alembic.runner
managed_upgrade
Perform an upgrade one migration at a time, inserting static data at the given points.
def managed_upgrade(self, dest_revision, *, current=None, return_current=True): """Perform an upgrade one migration at a time, inserting static data at the given points.""" if current is None: current = self.current for current_revision, next_revision in self.history.revision_window(current, dest_revision): before_upgrade_data = self.revision_data.get_before(next_revision) self.insert_into(data=before_upgrade_data, revision=current_revision, table=None) if next_revision in (self.config.skip_revisions or {}): self.set_revision(next_revision) else: self.command_executor.upgrade(next_revision) at_upgrade_data = self.revision_data.get_at(next_revision) self.insert_into(data=at_upgrade_data, revision=next_revision, table=None) if return_current: current = self.current return current return None
(self, dest_revision, *, current=None, return_current=True)
721,721
pytest_alembic.runner
migrate_down_before
Migrate down to, but not including the given `revision`.
def migrate_down_before(self, revision): """Migrate down to, but not including the given `revision`.""" next_revision = self.history.next_revision(revision) return self.migrate_down_to(next_revision)
(self, revision)
721,722
pytest_alembic.runner
migrate_down_one
Migrate down by exactly one revision.
def migrate_down_one(self): """Migrate down by exactly one revision.""" current = self.current previous_revision = self.history.previous_revision(current) self.managed_downgrade(previous_revision, current=current) return previous_revision
(self)
721,723
pytest_alembic.runner
migrate_down_to
Migrate down to, and including the given `revision`.
def migrate_down_to(self, revision, *, return_current: bool = True): """Migrate down to, and including the given `revision`.""" self.history.validate_revision(revision) self.managed_downgrade(revision, return_current=return_current) return revision
(self, revision, *, return_current: bool = True)
721,724
pytest_alembic.runner
migrate_up_before
Migrate up to, but not including the given `revision`.
def migrate_up_before(self, revision): """Migrate up to, but not including the given `revision`.""" preceeding_revision = self.history.previous_revision(revision) return self.managed_upgrade(preceeding_revision)
(self, revision)
721,725
pytest_alembic.runner
migrate_up_one
Migrate up by exactly one revision.
def migrate_up_one(self): """Migrate up by exactly one revision.""" current = self.current next_revision = self.history.next_revision(current) new_revision = self.managed_upgrade(next_revision, current=current) if current == new_revision: return None return new_revision
(self)
721,726
pytest_alembic.runner
migrate_up_to
Migrate up to, and including the given `revision`.
def migrate_up_to(self, revision, *, return_current: bool = True): """Migrate up to, and including the given `revision`.""" return self.managed_upgrade(revision, return_current=return_current)
(self, revision, *, return_current: bool = True)
721,727
pytest_alembic.runner
raw_command
Execute a raw alembic command.
def raw_command(self, *args, **kwargs): """Execute a raw alembic command.""" return self.command_executor.run_command(*args, **kwargs)
(self, *args, **kwargs)
721,728
pytest_alembic.runner
refresh_history
Refresh the context's version of the alembic history. Note this is not done automatically to avoid the expensive reevaluation step which can make long histories take seconds longer to evaluate for each test.
def refresh_history(self) -> AlembicHistory: """Refresh the context's version of the alembic history. Note this is not done automatically to avoid the expensive reevaluation step which can make long histories take seconds longer to evaluate for each test. """ script = self.command_executor.script script.revision_map = RevisionMap(script._load_revisions) # noqa: SLF001 self.history = AlembicHistory.parse(self.command_executor.script.revision_map) return self.history
(self) -> pytest_alembic.history.AlembicHistory
721,729
pytest_alembic.runner
roundtrip_next_revision
Upgrade, downgrade then upgrade. This is meant to ensure that the given revision is idempotent.
def roundtrip_next_revision(self): """Upgrade, downgrade then upgrade. This is meant to ensure that the given revision is idempotent. """ next_revision = self.migrate_up_one() if next_revision: self.migrate_down_one() return self.migrate_up_one() return None
(self)
721,730
pytest_alembic.runner
set_revision
null
def set_revision(self, revision: str): self.command_executor.stamp(revision)
(self, revision: str)
721,731
pytest_alembic.runner
table_at_revision
Return a reference to a `sqlalchemy.Table` at the given revision. Args: name: The name of the table to produce a `sqlalchemy.Table` for. revision: The revision of the table to return. schema: The schema of the table.
def table_at_revision(self, name, *, revision=None, schema=None): """Return a reference to a `sqlalchemy.Table` at the given revision. Args: name: The name of the table to produce a `sqlalchemy.Table` for. revision: The revision of the table to return. schema: The schema of the table. """ revision = revision or self.current return self.connection_executor.table(revision=revision, name=name, schema=schema)
(self, name, *, revision=None, schema=None)
721,733
pytest_alembic.plugin.fixtures
create_alembic_fixture
Create a new fixture `alembic_runner`-like fixture. In many cases, this function should not be strictly necessary. You **can** generally rely solely on the :code:`--test-alembic` flag, automatic insertion of tests, and the :func:`alembic_runner` fixture. However this may be useful in some situations: - If you would generally prefer to avoid the :code:`--test-alembic` flag and automatic test insertion, this is the function for you! - If you have multiple alembic histories and therefore require more than one fixture, you will **minimally** need to use this for the 2nd history (if not both) Examples: >>> from pytest_alembic import tests >>> >>> alembic = create_alembic_fixture() >>> >>> def test_upgrade_head(alembic): ... tests.test_upgrade_head(alembic) >>> >>> def test_specific_migration(alembic): ... alembic_runner.migrate_up_to('xxxxxxx') ... assert ... Config can also be supplied similarly to the :func:`alembic_config` fixture. >>> alembic = create_alembic_fixture({'file': 'migrations.ini'})
def create_alembic_fixture(raw_config=None): """Create a new fixture `alembic_runner`-like fixture. In many cases, this function should not be strictly necessary. You **can** generally rely solely on the :code:`--test-alembic` flag, automatic insertion of tests, and the :func:`alembic_runner` fixture. However this may be useful in some situations: - If you would generally prefer to avoid the :code:`--test-alembic` flag and automatic test insertion, this is the function for you! - If you have multiple alembic histories and therefore require more than one fixture, you will **minimally** need to use this for the 2nd history (if not both) Examples: >>> from pytest_alembic import tests >>> >>> alembic = create_alembic_fixture() >>> >>> def test_upgrade_head(alembic): ... tests.test_upgrade_head(alembic) >>> >>> def test_specific_migration(alembic): ... alembic_runner.migrate_up_to('xxxxxxx') ... assert ... Config can also be supplied similarly to the :func:`alembic_config` fixture. >>> alembic = create_alembic_fixture({'file': 'migrations.ini'}) """ @pytest.fixture() def alembic_fixture(alembic_engine): config = Config.from_raw_config(raw_config) with pytest_alembic.runner(config=config, engine=alembic_engine) as runner: yield runner return alembic_fixture
(raw_config=None)
721,738
pytest_alembic.runner
runner
Manage the alembic execution context, in a given context. Yields: `MigrationContext` to the caller.
def set_revision(self, revision: str): self.command_executor.stamp(revision)
(config: pytest_alembic.config.Config, engine=None)
721,739
aiopg.connection
Connection
Low-level asynchronous interface for wrapped psycopg2 connection. The Connection instance encapsulates a database session. Provides support for creating asynchronous cursors.
class Connection: """Low-level asynchronous interface for wrapped psycopg2 connection. The Connection instance encapsulates a database session. Provides support for creating asynchronous cursors. """ _source_traceback = None def __init__( self, dsn: Optional[str], timeout: float, echo: bool = False, enable_json: bool = True, enable_hstore: bool = True, enable_uuid: bool = True, **kwargs: Any, ): self._enable_json = enable_json self._enable_hstore = enable_hstore self._enable_uuid = enable_uuid self._loop = get_running_loop() self._waiter: Optional[ "asyncio.Future[None]" ] = self._loop.create_future() kwargs["async_"] = kwargs.pop("async", True) kwargs.pop("loop", None) # backward compatibility self._conn = psycopg2.connect(dsn, **kwargs) self._dsn = self._conn.dsn assert self._conn.isexecuting(), "Is conn an async at all???" self._fileno: Optional[int] = self._conn.fileno() self._timeout = timeout self._last_usage = self._loop.time() self._writing = False self._echo = echo self._notifies = asyncio.Queue() # type: ignore self._notifies_proxy = ClosableQueue(self._notifies, self._loop) self._weakref = weakref.ref(self) self._loop.add_reader( self._fileno, self._ready, self._weakref # type: ignore ) if self._loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) @staticmethod def _ready(weak_self: "weakref.ref[Any]") -> None: self = cast(Connection, weak_self()) if self is None: return waiter = self._waiter try: state = self._conn.poll() while self._conn.notifies: notify = self._conn.notifies.pop(0) self._notifies.put_nowait(notify) except (psycopg2.Warning, psycopg2.Error) as exc: if self._fileno is not None: try: select.select([self._fileno], [], [], 0) except OSError as os_exc: if _is_bad_descriptor_error(os_exc): with contextlib.suppress(OSError): self._loop.remove_reader(self._fileno) # forget a bad file descriptor, don't try to # touch it self._fileno = None try: if self._writing: self._writing = False if self._fileno is not None: self._loop.remove_writer(self._fileno) except OSError as exc2: if exc2.errno != errno.EBADF: # EBADF is ok for closed file descriptor # chain exception otherwise exc2.__cause__ = exc exc = exc2 self._notifies_proxy.close(exc) if waiter is not None and not waiter.done(): waiter.set_exception(exc) else: if self._fileno is None: # connection closed if waiter is not None and not waiter.done(): waiter.set_exception( psycopg2.OperationalError("Connection closed") ) if state == psycopg2.extensions.POLL_OK: if self._writing: self._loop.remove_writer(self._fileno) # type: ignore self._writing = False if waiter is not None and not waiter.done(): waiter.set_result(None) elif state == psycopg2.extensions.POLL_READ: if self._writing: self._loop.remove_writer(self._fileno) # type: ignore self._writing = False elif state == psycopg2.extensions.POLL_WRITE: if not self._writing: self._loop.add_writer( self._fileno, self._ready, weak_self # type: ignore ) self._writing = True elif state == psycopg2.extensions.POLL_ERROR: self._fatal_error( "Fatal error on aiopg connection: " "POLL_ERROR from underlying .poll() call" ) else: self._fatal_error( f"Fatal error on aiopg connection: " f"unknown answer {state} from underlying " f".poll() call" ) def _fatal_error(self, message: str) -> None: # Should be called from exception handler only. self._loop.call_exception_handler( { "message": message, "connection": self, } ) self.close() if self._waiter and not self._waiter.done(): self._waiter.set_exception(psycopg2.OperationalError(message)) def _create_waiter(self, func_name: str) -> "asyncio.Future[None]": if self._waiter is not None: raise RuntimeError( f"{func_name}() called while another coroutine " f"is already waiting for incoming data" ) self._waiter = self._loop.create_future() return self._waiter async def _poll( self, waiter: "asyncio.Future[None]", timeout: float ) -> None: assert waiter is self._waiter, (waiter, self._waiter) self._ready(self._weakref) try: await asyncio.wait_for(self._waiter, timeout) except (asyncio.CancelledError, asyncio.TimeoutError) as exc: await asyncio.shield(self.close()) raise exc except psycopg2.extensions.QueryCanceledError as exc: self._loop.call_exception_handler( { "message": exc.pgerror, "exception": exc, "future": self._waiter, } ) raise asyncio.CancelledError finally: self._waiter = None def isexecuting(self) -> bool: return self._conn.isexecuting() # type: ignore def cursor( self, name: Optional[str] = None, cursor_factory: Any = None, scrollable: Optional[bool] = None, withhold: bool = False, timeout: Optional[float] = None, isolation_level: Optional[IsolationLevel] = None, ) -> _ContextManager[Cursor]: """A coroutine that returns a new cursor object using the connection. *cursor_factory* argument can be used to create non-standard cursors. The argument must be subclass of `psycopg2.extensions.cursor`. *name*, *scrollable* and *withhold* parameters are not supported by psycopg in asynchronous mode. """ self._last_usage = self._loop.time() coro = self._cursor( name=name, cursor_factory=cursor_factory, scrollable=scrollable, withhold=withhold, timeout=timeout, isolation_level=isolation_level, ) return _ContextManager[Cursor](coro, _close_cursor) async def _cursor( self, name: Optional[str] = None, cursor_factory: Any = None, scrollable: Optional[bool] = None, withhold: bool = False, timeout: Optional[float] = None, isolation_level: Optional[IsolationLevel] = None, ) -> Cursor: if timeout is None: timeout = self._timeout impl = await self._cursor_impl( name=name, cursor_factory=cursor_factory, scrollable=scrollable, withhold=withhold, ) cursor = Cursor(self, impl, timeout, self._echo, isolation_level) return cursor async def _cursor_impl( self, name: Optional[str] = None, cursor_factory: Any = None, scrollable: Optional[bool] = None, withhold: bool = False, ) -> Any: if cursor_factory is None: impl = self._conn.cursor( name=name, scrollable=scrollable, withhold=withhold ) else: impl = self._conn.cursor( name=name, cursor_factory=cursor_factory, scrollable=scrollable, withhold=withhold, ) return impl def _close(self) -> None: """Remove the connection from the event_loop and close it.""" # N.B. If connection contains uncommitted transaction the # transaction will be discarded if self._fileno is not None: self._loop.remove_reader(self._fileno) if self._writing: self._writing = False self._loop.remove_writer(self._fileno) self._conn.close() if not self._loop.is_closed(): if self._waiter is not None and not self._waiter.done(): self._waiter.set_exception( psycopg2.OperationalError("Connection closed") ) self._notifies_proxy.close( psycopg2.OperationalError("Connection closed") ) def close(self) -> "asyncio.Future[None]": self._close() return create_completed_future(self._loop) @property def closed(self) -> bool: """Connection status. Read-only attribute reporting whether the database connection is open (False) or closed (True). """ return self._conn.closed # type: ignore @property def raw(self) -> Any: """Underlying psycopg connection object, readonly""" return self._conn async def commit(self) -> None: raise psycopg2.ProgrammingError( "commit cannot be used in asynchronous mode" ) async def rollback(self) -> None: raise psycopg2.ProgrammingError( "rollback cannot be used in asynchronous mode" ) # TPC async def xid( self, format_id: int, gtrid: str, bqual: str ) -> Tuple[int, str, str]: return self._conn.xid(format_id, gtrid, bqual) # type: ignore async def tpc_begin(self, *args: Any, **kwargs: Any) -> None: raise psycopg2.ProgrammingError( "tpc_begin cannot be used in asynchronous mode" ) async def tpc_prepare(self) -> None: raise psycopg2.ProgrammingError( "tpc_prepare cannot be used in asynchronous mode" ) async def tpc_commit(self, *args: Any, **kwargs: Any) -> None: raise psycopg2.ProgrammingError( "tpc_commit cannot be used in asynchronous mode" ) async def tpc_rollback(self, *args: Any, **kwargs: Any) -> None: raise psycopg2.ProgrammingError( "tpc_rollback cannot be used in asynchronous mode" ) async def tpc_recover(self) -> None: raise psycopg2.ProgrammingError( "tpc_recover cannot be used in asynchronous mode" ) async def cancel(self) -> None: raise psycopg2.ProgrammingError( "cancel cannot be used in asynchronous mode" ) async def reset(self) -> None: raise psycopg2.ProgrammingError( "reset cannot be used in asynchronous mode" ) @property def dsn(self) -> Optional[str]: """DSN connection string. Read-only attribute representing dsn connection string used for connectint to PostgreSQL server. """ return self._dsn # type: ignore async def set_session(self, *args: Any, **kwargs: Any) -> None: raise psycopg2.ProgrammingError( "set_session cannot be used in asynchronous mode" ) @property def autocommit(self) -> bool: """Autocommit status""" return self._conn.autocommit # type: ignore @autocommit.setter def autocommit(self, val: bool) -> None: """Autocommit status""" self._conn.autocommit = val @property def isolation_level(self) -> int: """Transaction isolation level. The only allowed value is ISOLATION_LEVEL_READ_COMMITTED. """ return self._conn.isolation_level # type: ignore async def set_isolation_level(self, val: int) -> None: """Transaction isolation level. The only allowed value is ISOLATION_LEVEL_READ_COMMITTED. """ self._conn.set_isolation_level(val) @property def encoding(self) -> str: """Client encoding for SQL operations.""" return self._conn.encoding # type: ignore async def set_client_encoding(self, val: str) -> None: self._conn.set_client_encoding(val) @property def notices(self) -> List[str]: """A list of all db messages sent to the client during the session.""" return self._conn.notices # type: ignore @property def cursor_factory(self) -> Any: """The default cursor factory used by .cursor().""" return self._conn.cursor_factory async def get_backend_pid(self) -> int: """Returns the PID of the backend server process.""" return self._conn.get_backend_pid() # type: ignore async def get_parameter_status(self, parameter: str) -> Optional[str]: """Look up a current parameter setting of the server.""" return self._conn.get_parameter_status(parameter) # type: ignore async def get_transaction_status(self) -> int: """Return the current session transaction status as an integer.""" return self._conn.get_transaction_status() # type: ignore @property def protocol_version(self) -> int: """A read-only integer representing protocol being used.""" return self._conn.protocol_version # type: ignore @property def server_version(self) -> int: """A read-only integer representing the backend version.""" return self._conn.server_version # type: ignore @property def status(self) -> int: """A read-only integer representing the status of the connection.""" return self._conn.status # type: ignore async def lobject(self, *args: Any, **kwargs: Any) -> None: raise psycopg2.ProgrammingError( "lobject cannot be used in asynchronous mode" ) @property def timeout(self) -> float: """Return default timeout for connection operations.""" return self._timeout @property def last_usage(self) -> float: """Return time() when connection was used.""" return self._last_usage @property def echo(self) -> bool: """Return echo mode status.""" return self._echo def __repr__(self) -> str: return ( f"<" f"{type(self).__module__}::{type(self).__name__} " f"isexecuting={self.isexecuting()}, " f"closed={self.closed}, " f"echo={self.echo}, " f">" ) def __del__(self) -> None: try: _conn = self._conn except AttributeError: return if _conn is not None and not _conn.closed: self.close() warnings.warn(f"Unclosed connection {self!r}", ResourceWarning) context = {"connection": self, "message": "Unclosed connection"} if self._source_traceback is not None: context["source_traceback"] = self._source_traceback self._loop.call_exception_handler(context) @property def notifies(self) -> ClosableQueue: """Return notification queue (an asyncio.Queue -like object).""" return self._notifies_proxy async def _get_oids(self) -> Tuple[Any, Any]: cursor = await self.cursor() rv0, rv1 = [], [] try: await cursor.execute( "SELECT t.oid, typarray " "FROM pg_type t JOIN pg_namespace ns ON typnamespace = ns.oid " "WHERE typname = 'hstore';" ) async for oids in cursor: if isinstance(oids, Mapping): rv0.append(oids["oid"]) rv1.append(oids["typarray"]) else: rv0.append(oids[0]) rv1.append(oids[1]) finally: cursor.close() return tuple(rv0), tuple(rv1) async def _connect(self) -> "Connection": try: await self._poll(self._waiter, self._timeout) # type: ignore except BaseException: await asyncio.shield(self.close()) raise if self._enable_json: psycopg2.extras.register_default_json(self._conn) if self._enable_uuid: psycopg2.extras.register_uuid(conn_or_curs=self._conn) if self._enable_hstore: oid, array_oid = await self._get_oids() psycopg2.extras.register_hstore( self._conn, oid=oid, array_oid=array_oid ) return self def __await__(self) -> Generator[Any, None, "Connection"]: return self._connect().__await__() async def __aenter__(self) -> "Connection": return self async def __aexit__( self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ) -> None: await self.close()
(dsn: Optional[str], timeout: float, echo: bool = False, enable_json: bool = True, enable_hstore: bool = True, enable_uuid: bool = True, **kwargs: Any)